fit2[[1]]
fit2[[2]]
coef(fit2)
aux <- summary(fit2)
aux$terms
cov(fit2)
acov(fit2)
?vcov
vcov(fit2)
sqrt(diag(vcov(fit2)))
summary(fit2)
cbind(sqrt(diag(vcov(fit2))), sqrt(diag(vcov(fit2_sub))))
fit3_sub <- geeglm(weight~Time, id=Chick, data=chick_sub, std.err = 'fij')
summary(fit3_sub)
fit3_sub <- geeglm(weight~Time*Diet, id=Chick, data=chick_sub, std.err = 'fij')
summary(fit3_sub)
cbind(sqrt(diag(vcov(fit2))), sqrt(diag(vcov(fit2_sub))), , sqrt(diag(vcov(fit3_sub))))
cbind(sqrt(diag(vcov(fit2))), sqrt(diag(vcov(fit2_sub))), , sqrt(diag(vcov(fit3_sub)))))
cbind(sqrt(diag(vcov(fit2))), sqrt(diag(vcov(fit2_sub))), sqrt(diag(vcov(fit3_sub))))
aux$terms
aux
aux$coefficients
aux$coefficients[,4]
cbind(sqrt(diag(vcov(fit2))), sqrt(diag(vcov(fit2_sub))), sqrt(diag(vcov(fit3_sub))))
summary(fit2)$coefficients[,4]
cbind(summary(fit2)$coefficients[,4],
summary(fit2_sub)$coefficients[,4],
summary(fit3_sub)$coefficients[,4])
round(cbind(summary(fit2)$coefficients[,4],
summary(fit2_sub)$coefficients[,4],
summary(fit3_sub)$coefficients[,4]), 4)
n <- nrow(chick1)
n
chick_sub <- chick1[sample(1:n, 50, replace = FALSE),]
table(chick_sub$Diet)
n <- nrow(chick1)
chick_sub <- chick1[sample(1:n, 90, replace = FALSE),]
table(chick_sub$Diet)
table(chick_sub$Diet)
fit1_sub <- geeglm(weight~Time, id=Chick, data=chick_sub)
summary(fit1_sub)
fit2_sub <- geeglm(weight~Time*Diet, id=Chick, data=chick_sub)
summary(fit2_sub)
summary(fit2)
cbind(sqrt(diag(vcov(fit2))), sqrt(diag(vcov(fit2_sub))))
fit3_sub <- geeglm(weight~Time*Diet, id=Chick, data=chick_sub, std.err = 'fij')
summary(fit3_sub)
cbind(sqrt(diag(vcov(fit2))), sqrt(diag(vcov(fit2_sub))), sqrt(diag(vcov(fit3_sub))))
round(cbind(summary(fit2)$coefficients[,4],
summary(fit2_sub)$coefficients[,4],
summary(fit3_sub)$coefficients[,4]), 4)
summary(fit2)
?ChickWeight
install.packages("ClusterBootstrap")
require(ClusterBootstrap)
clusbootglm(
weight~Time*Diet, data=chick_sub
clusterid=Diet,
fit4 <- clusbootglm(
weight~Time*Diet, data=chick_sub,
clusterid=Diet,
family = gaussian,
B = 5000,
confint.level = 0.95,
n.cores = 1
)
fit4 <- clusbootglm(
weight~Time*Diet, data=chick_sub,
clusterid=Diet,
family = gaussian,
B = 1000,
confint.level = 0.95,
n.cores = 1
)
fit4
fit4$
asdf
fit4$ci.level
fit4$percentile.interval
round(cbind(summary(fit2)$coefficients[,4],
summary(fit2_sub)$coefficients[,4],
summary(fit3_sub)$coefficients[,4]), 4)
summary(fit2)
summary(fit2_sub)
summary(fit3_sub)
require(geepack)
set.seed(1)
require(geepack)
# Notice the difference here: Clusters of observations must
# appear as chunks in data.
chick1 <- ChickWeight
View(chick1)
# Notice the difference here: Clusters of observations must
# appear as chunks in data.
View(ChickWeight)
table(ChickWeight$Diet)
fit1 <- geeglm(weight~Time, id=Chick, data=ChickWeight)
summary(fit1)
fit2 <- geeglm(weight~Time*Diet, id=Chick, data=ChickWeight)
summary(fit2)
n <- nrow(ChickWeight)
# sub-amostra aletória de tamanho 50 dos dados
chick_sub <- ChickWeight[sample(1:n, 50, replace = FALSE),]
table(chick_sub$Diet)
fit1_sub <- geeglm(weight~Time, id=Chick, data=chick_sub)
summary(fit1_sub)
fit2_sub <- geeglm(weight~Time*Diet, id=Chick, data=chick_sub)
summary(fit2_sub)
cbind(sqrt(diag(vcov(fit2))), sqrt(diag(vcov(fit2_sub))))
# erros padrão diferentes
cbind(sqrt(diag(vcov(fit2))), sqrt(diag(vcov(fit2_sub))))
round(cbind(summary(fit2)$coefficients[,4],
)
round(cbind(summary(fit2)$coefficients[,4],
summary(fit2_sub)$coefficients[,4]
summary(fit2)$coefficients[,4]
cbind(summary(fit2)$coefficients[,4],
summary(fit2_sub)$coefficients[,4], 4)
round(cbind(summary(fit2)$coefficients[,4],
summary(fit2_sub)$coefficients[,4]), 4)
# p-valor diferentes
round(cbind(summary(fit2)$coefficients[,4],
summary(fit2_sub)$coefficients[,4]), 4)
fit3_sub <- geeglm(weight~Time*Diet, id=Chick, data=chick_sub, std.err = 'fij')
summary(fit3_sub)
round(cbind(summary(fit2)$coefficients[,4],
summary(fit2_sub)$coefficients[,4],
summary(fit3_sub)$coefficients[,4]), 4)
fit3_sub <- geeglm(weight~Time*Diet, id=Chick, data=chick_sub, std.err = 'jack')
summary(fit3_sub)
round(cbind(summary(fit2)$coefficients[,4],
summary(fit2_sub)$coefficients[,4],
summary(fit3_sub)$coefficients[,4]), 4)
# sub-amostra aletória de tamanho 50 dos dados
chick_sub <- ChickWeight[sample(1:n, 200, replace = FALSE),]
table(chick_sub$Diet)
# sub-amostra aletória de tamanho 50 dos dados
chick_sub <- ChickWeight[sample(1:n, 300, replace = FALSE),]
table(chick_sub$Diet)
fit1_sub <- geeglm(weight~Time, id=Chick, data=chick_sub)
summary(fit1_sub)
fit2_sub <- geeglm(weight~Time*Diet, id=Chick, data=chick_sub)
summary(fit2_sub)
# erros padrão diferentes
cbind(sqrt(diag(vcov(fit2))), sqrt(diag(vcov(fit2_sub))))
# p-valor diferentes
round(cbind(summary(fit2)$coefficients[,4],
summary(fit2_sub)$coefficients[,4]), 4)
fit3_sub <- geeglm(weight~Time*Diet, id=Chick, data=chick_sub, std.err = 'jack')
summary(fit3_sub)
round(cbind(summary(fit2)$coefficients[,4],
summary(fit2_sub)$coefficients[,4],
summary(fit3_sub)$coefficients[,4]), 4)
fit3_sub <- geeglm(weight~Time*Diet, id=Chick, data=chick_sub, std.err = 'fij')
summary(fit3_sub)
round(cbind(summary(fit2)$coefficients[,4],
summary(fit2_sub)$coefficients[,4],
summary(fit3_sub)$coefficients[,4]), 4)
# sub-amostra aletória de tamanho 50 dos dados
chick_sub <- ChickWeight[sample(1:n, 100, replace = FALSE),]
table(chick_sub$Diet)
fit1_sub <- geeglm(weight~Time, id=Chick, data=chick_sub)
summary(fit1_sub)
fit2_sub <- geeglm(weight~Time*Diet, id=Chick, data=chick_sub)
summary(fit2_sub)
# erros padrão diferentes
cbind(sqrt(diag(vcov(fit2))), sqrt(diag(vcov(fit2_sub))))
# p-valor diferentes
round(cbind(summary(fit2)$coefficients[,4],
summary(fit2_sub)$coefficients[,4]), 4)
fit3_sub <- geeglm(weight~Time*Diet, id=Chick, data=chick_sub, std.err = 'fij')
summary(fit3_sub)
round(cbind(summary(fit2)$coefficients[,4],
summary(fit2_sub)$coefficients[,4],
summary(fit3_sub)$coefficients[,4]), 4)
cbind(sqrt(diag(vcov(fit2))), sqrt(diag(vcov(fit2_sub))), sqrt(diag(vcov(fit3_sub))))
round(cbind(summary(fit2)$coefficients[,4],
summary(fit2_sub)$coefficients[,4],
summary(fit3_sub)$coefficients[,4]), 4)
fit1 <- geeglm(weight~Time*Diet, id=Chick, data=ChickWeight)
summary(fit1)
anova(fit1)
fit2 <- geeglm(weight~Time*Diet, id=Chick, data=ChickWeight, std.err = 'jack')
summary(fit2)
anova(fit2)
# erros padrão diferentes
cbind(sqrt(diag(vcov(fit1))), sqrt(diag(vcov(fit2))))
# p-valor diferentes
round(cbind(summary(fit2)$coefficients[,4],
summary(fit2_sub)$coefficients[,4]), 4)
# p-valor diferentes
round(cbind(summary(fit2)$coefficients[,1],
summary(fit2_sub)$coefficients[,1]), 4)
# p-valor diferentes
round(cbind(summary(fit1)$coefficients[,1],
summary(fit2)$coefficients[,1]), 4)
# erros padrão diferentes
cbind(sqrt(diag(vcov(fit1))), sqrt(diag(vcov(fit2))))
# p-valor diferentes
round(cbind(summary(fit1)$coefficients[,4],
summary(fit2)$coefficients[,4]), 4)
n <- nrow(ChickWeight)
# sub-amostra aletória de tamanho 50 dos dados
chick_sub <- ChickWeight[sample(1:n, 100, replace = FALSE),]
# sub-amostra aletória de tamanho 50 dos dados
chick_sub <- ChickWeight[sample(1:n, 50, replace = FALSE),]
table(chick_sub$Diet)
# sub-amostra aletória de tamanho 50 dos dados
chick_sub <- ChickWeight[sample(1:n, 100, replace = FALSE),]
table(chick_sub$Diet)
fit1_sub <- geeglm(weight~Time, id=Chick, data=chick_sub)
summary(fit1_sub)
fit1_sub <- geeglm(weight~Time*Diet, id=Chick, data=chick_sub)
summary(fit1_sub)
anova(fit1_sub)
fit2_sub <- geeglm(weight~Time*Diet, id=Chick, data=chick_sub, std.err = 'jack')
summary(fit2_sub)
anova(fit2_sub)
summary(fit1_sub)
anova(fit1_sub)
fit2_sub <- geeglm(weight~Time*Diet, id=Chick, data=chick_sub, std.err = 'jack')
summary(fit2_sub)
anova(fit2_sub)
# erros padrão diferentes
cbind(sqrt(diag(vcov(fit1_sub))), sqrt(diag(vcov(fit2_sub))))
# p-valor diferentes
round(cbind(summary(fit1_sub)$coefficients[,4],
summary(fit2_sub)$coefficients[,4]), 4)
cbind(summary(fit1)$coefficients[,4],
summary(fit2)$coefficients[,4],
summary(fit1_sub)$coefficients[,4],
summary(fit2_sub)$coefficients[,4])<0.05
cbind(summary(fit1)$coefficients[,4],
summary(fit2)$coefficients[,4],
summary(fit1_sub)$coefficients[,4],
summary(fit2_sub)$coefficients[,4])<0.05
fit1 <- geeglm(weight~Time*Diet, data=ChickWeight)
summary(fit1)
anova(fit1)
fit1 <- geeglm(weight~Time*Diet, data=ChickWeight)
fit1 <- geeglm(weight~Time*Diet, id=Chick, data=ChickWeight)
summary(fit1)
anova(fit1)
fit1 <- geeglm(weight~Time*Diet, data=ChickWeight)
# Notice the difference here: Clusters of observations must
# appear as chunks in data.
View(muscatine )
n <- nrow(ChickWeight)
# sub-amostra aletória de tamanho 50 dos dados
chick_sub <- ChickWeight[sample(1:n, 200, replace = FALSE),]
table(chick_sub$Diet)
fit1_sub <- geeglm(weight~Time*Diet, id=Chick, data=chick_sub)
summary(fit1_sub)
anova(fit1_sub)
fit2_sub <- geeglm(weight~Time*Diet, data=chick_sub, std.err = 'fij')
fit2_sub <- geeglm(weight~Time*Diet, id=Chick, data=chick_sub, std.err = 'fij')
summary(fit2_sub)
anova(fit2_sub)
# erros padrão diferentes
cbind(sqrt(diag(vcov(fit1_sub))), sqrt(diag(vcov(fit2_sub))))
# p-valor diferentes
round(cbind(summary(fit1_sub)$coefficients[,4],
summary(fit2_sub)$coefficients[,4]), 4)
cbind(summary(fit1)$coefficients[,4],
summary(fit2)$coefficients[,4],
summary(fit1_sub)$coefficients[,4],
summary(fit2_sub)$coefficients[,4])<0.05
install.packages("fracdiff")  # run once
#install.packages("fracdiff")  # run once
library(fracdiff)
set.seed(123)
# Sample size
n <- 1000
# Design matrix
x <- rnorm(n)
X <- cbind(1, x)
# True coefficients
beta <- c(1, 2)
# Long-memory parameter
d <- 0.3   # 0 < d < 0.5
# Generate fractionally integrated noise
u <- fracdiff.sim(n = n, d = d)$series
# Linear regression model
y <- X %*% beta + u
y <- as.numeric(y)
# Fit OLS (misspecified: ignores long memory)
fit_ols <- lm(y ~ x)
summary(fit_ols)
acf(u, lag.max = 100, main = "ACF of Long-Memory Errors")
# Long-memory parameter
d <- 0.4   # 0 < d < 0.5
# Generate fractionally integrated noise
u <- fracdiff.sim(n = n, d = d)$series
# Linear regression model
y <- X %*% beta + u
y <- as.numeric(y)
# Fit OLS (misspecified: ignores long memory)
fit_ols <- lm(y ~ x)
summary(fit_ols)
acf(u, lag.max = 100, main = "ACF of Long-Memory Errors")
# Sample size
n <- 1000
p <- 2000
# Design matrix
x <- rnorm(n)
# Design matrix
X <- matrix(rnorm(n*p), nrow=n, ncol=p)
# True coefficients
beta <- rep(1, 10)
# Long-memory parameter
d <- 0.3   # 0 < d < 0.5
# Generate fractionally integrated noise
u <- fracdiff.sim(n = n, d = d)$series
# Linear regression model
y <- X[,1:10] %*% beta + u
y <- as.numeric(y)
sd(X[,1])
sd(X[,10])
sd(y)
vcor <- rep(NA, n)
vcor <- rep(NA, p)
for(i in 1:p){
vcor[i] <- cor(X[,i], y)
}
hist(vcor)
vcor_order <- order(vcor, decreasing = TRUE)
vcor_order[1:10]
vcor_order[1:20]
# Long-memory parameter
d <- 0.4   # 0 < d < 0.5
# Generate fractionally integrated noise
u <- fracdiff.sim(n = n, d = d)$series
# Linear regression model
y <- X[,1:10] %*% beta + u
y <- as.numeric(y)
vcor <- rep(NA, p)
for(i in 1:p){
vcor[i] <- cor(X[,i], y)
}
# True coefficients
beta <- rep(1, 10)
# Long-memory parameter
d <- 0.4   # 0 < d < 0.5
# Generate fractionally integrated noise
u <- fracdiff.sim(n = n, d = d)$series
# Linear regression model
y <- X[,1:10] %*% beta + u
y <- as.numeric(y)
vcor <- rep(NA, p)
for(i in 1:p){
vcor[i] <- cor(X[,i], y)
}
hist(vcor)
vcor_order <- order(vcor, decreasing = TRUE)
vcor_order[1:20]
library(glmnet)
library(survival)
set.seed(2025)
# Dados do estudo de linfoma
data_x <- read.table('./lymphx.txt', header = FALSE)
setwd("~/Downloads/linfoma")
# Dados do estudo de linfoma
data_x <- read.table('./lymphx.txt', header = FALSE)
data_y <- read.table('./lymphtim.txt', header = FALSE)
data_cens <- read.table('./lymphstatus.txt', header = FALSE)
dim(data_x)
dim(data_y)
dim(data_cens)
head(data_y)
head(data_cens)
table(data_cens)
# matriz de preditores
x <- as.matrix(data_x)
# objeto Surv com tempos de sobrevivencia e indicador de censura
y <- survival::Surv(data_y$V1, data_cens$V1)
n <- dim(x)[1]
p <- dim(x)[2]
# Kaplan-Meier
km_fit <- survival::survfit(y ~ 1)
# png(file="linfoma_kaplan.png",
#     width=600, height=500, res = 100)
plot(km_fit, xlab = 'Tempos', ylab = 'S(t)', conf.int = 0.95)
lasso_fit = glmnet(x, y, family = "cox", alpha = 1)
# png(file="linfoma_lasso_path.png",
#     width=600, height=500, res = 100)
plot(lasso_fit, xvar = 'lambda', sign.lambda = 1,
lwd=2, ylab='Coeficientes')
# validação cruzada para escolher lambda
cv_lasso_fit <- glmnet::cv.glmnet(x, y, family = 'cox', alpha = 1,
type.measure = 'deviance', nfolds = 10)
# png(file="linfoma_lasso_deviance_cv.png",
#     width=600, height=500, res = 100)
plot(cv_lasso_fit, sign.lambda = 1)
# coeficientes estimados usando o lambda 'ótimo'
cv_lasso_fit$lambda.min
id_labda_min <- which(lasso_fit$lambda == cv_lasso_fit$lambda.min)
beta_hat_lasso <- coef(lasso_fit, s=lasso_fit$lambda[id_labda_min])
sum(beta_hat_lasso != 0)
# png(file="linfoma_estimativas_lasso.png",
#     width=600, height=500, res = 100)
par(mar = c(5,4,1,1), mgp = c(2.5, 1, 0))
plot(1:p, rep(0, p), ylim = c(min(beta_hat_lasso), max(beta_hat_lasso)),
pch=' ', ylab = expression(beta[i]), xlab = 'i', cex.lab=1.5)
id <- which(beta_hat_lasso == 0)
points(id, beta_hat_lasso[id], pch=20, col='red', cex=2)
id <- which(beta_hat_lasso != 0)
points(id, beta_hat_lasso[id], pch=8, col='blue', cex=2)
legend("topright", c(expression(hat(beta[i])==0), expression(hat(beta[i])!=0)),
pch = c(20, 8), col = c("red", "blue"), pt.cex=1.5)
# valores ajustados do preditor linear
fitted_eta_values <- x%*%beta_hat_lasso # modelo de Cox não tem beta_0
# dummy se o preditor linear ajustado é positivo
eta_pos <- as.numeric(fitted_eta_values>0)
# Kaplan-Meier usando a dummy acima como grupo
km_fit_eta_pos <- survival::survfit(y ~ eta_pos)
km_fit_eta_pos
# png(file="linfoma_kaplan_etavalues.png",
#     width=600, height=500, res = 100)
plot(km_fit_eta_pos, xlab = 'Tempos', ylab = 'S(t)',
col = c('Green', 'Red'), lwd=2)
# Kaplan-Meier
id_etapos <- which(fitted_eta_values > 0)
km_fit_eta_pos <- survival::survfit(y[id_etapos] ~ 1)
id_etaneg <- which(fitted_eta_values <= 0)
id_etaneg <- which(fitted_eta_values <= 0)
km_fit_eta_neg <- survival::survfit(y[id_etaneg] ~ 1)
plot(km_fit, xlab = 'Tempos', ylab = 'S(t)',
col = 'Black', lwd=2)
plot(km_fit, xlab = 'Tempos', ylab = 'S(t)',
col = 'Black', lwd=2, conf.int = FALSE)
lines(km_fit_eta_pos, conf.int = FALSE, col = 'Green', lwd=2)
lines(km_fit_eta_pos, conf.int = FALSE, col = 'Red', lwd=2)
lines(km_fit_eta_neg, conf.int = FALSE, col = 'Green', lwd=2)
legend("topright", c(expression(hat(eta)<=0), expression(hat(eta)>0), 'todos'),
fill = c('Green', 'Red', 'Black'), cex=1.2)
png(file="linfoma_kaplan_etavalues.png",
width=600, height=500, res = 100)
plot(km_fit, xlab = 'Tempos', ylab = 'S(t)',
col = 'Black', lwd=2, conf.int = FALSE)
lines(km_fit_eta_pos, conf.int = FALSE, col = 'Red', lwd=2)
lines(km_fit_eta_neg, conf.int = FALSE, col = 'Green', lwd=2)
legend("topright", c(expression(hat(eta)<=0), expression(hat(eta)>0), 'todos'),
fill = c('Green', 'Red', 'Black'), cex=1.2)
dev.off()
rm(list = ls())
library(glmnet)
library(survival)
set.seed(2025)
# Dados do estudo de linfoma
data_x <- read.table('./lymphx.txt', header = FALSE)
data_y <- read.table('./lymphtim.txt', header = FALSE)
data_cens <- read.table('./lymphstatus.txt', header = FALSE)
dim(data_x)
dim(data_y)
dim(data_cens)
head(data_y)
head(data_cens)
table(data_cens)
# matriz de preditores
x <- as.matrix(data_x)
# objeto Surv com tempos de sobrevivencia e indicador de censura
y <- survival::Surv(data_y$V1, data_cens$V1)
n <- dim(x)[1]
p <- dim(x)[2]
# Estimativa da função de sobrevivência (Kaplan-Meier)
km_fit <- survival::survfit(y ~ 1)
# png(file="linfoma_kaplan.png",
#     width=600, height=500, res = 100)
plot(km_fit, xlab = 'Tempos', ylab = 'S(t)', conf.int = 0.95)
lasso_fit = glmnet(x, y, family = "cox", alpha = 1)
# png(file="linfoma_lasso_path.png",
#     width=600, height=500, res = 100)
plot(lasso_fit, xvar = 'lambda', sign.lambda = 1,
lwd=2, ylab='Coeficientes')
# validação cruzada para escolher lambda
cv_lasso_fit <- glmnet::cv.glmnet(x, y, family = 'cox', alpha = 1,
type.measure = 'deviance', nfolds = 10)
# png(file="linfoma_lasso_deviance_cv.png",
#     width=600, height=500, res = 100)
plot(cv_lasso_fit, sign.lambda = 1)
# coeficientes estimados usando o lambda 'ótimo'
cv_lasso_fit$lambda.min
id_labda_min <- which(lasso_fit$lambda == cv_lasso_fit$lambda.min)
beta_hat_lasso <- coef(lasso_fit, s=lasso_fit$lambda[id_labda_min])
sum(beta_hat_lasso != 0)
# png(file="linfoma_estimativas_lasso.png",
#     width=600, height=500, res = 100)
par(mar = c(5,4,1,1), mgp = c(2.5, 1, 0))
plot(1:p, rep(0, p), ylim = c(min(beta_hat_lasso), max(beta_hat_lasso)),
pch=' ', ylab = expression(beta[i]), xlab = 'i', cex.lab=1.5)
id <- which(beta_hat_lasso == 0)
points(id, beta_hat_lasso[id], pch=20, col='red', cex=2)
id <- which(beta_hat_lasso != 0)
points(id, beta_hat_lasso[id], pch=8, col='blue', cex=2)
legend("topright", c(expression(hat(beta[i])==0), expression(hat(beta[i])!=0)),
pch = c(20, 8), col = c("red", "blue"), pt.cex=1.5)
# valores ajustados do preditor linear
fitted_eta_values <- x%*%beta_hat_lasso # modelo de Cox não tem beta_0
# Kaplan-Meier para casos com o preditor linear ajustado positivo
id_etapos <- which(fitted_eta_values > 0)
km_fit_eta_pos <- survival::survfit(y[id_etapos] ~ 1)
# Kaplan-Meier para casos com o preditor linear ajustado negativo
id_etaneg <- which(fitted_eta_values <= 0)
km_fit_eta_neg <- survival::survfit(y[id_etaneg] ~ 1)
# png(file="linfoma_kaplan_etavalues.png",
#    width=600, height=500, res = 100)
plot(km_fit, xlab = 'Tempos', ylab = 'S(t)',
col = 'Black', lwd=2, conf.int = FALSE)
lines(km_fit_eta_pos, conf.int = FALSE, col = 'Red', lwd=2)
lines(km_fit_eta_neg, conf.int = FALSE, col = 'Green', lwd=2)
legend("topright", c(expression(hat(eta)<=0), expression(hat(eta)>0), 'todos'),
fill = c('Green', 'Red', 'Black'), cex=1.2)
