## R version information # R.Version() # $platform # [1] "x86_64-w64-mingw32" # $version.string # [1] "R version 3.2.1 (2015-06-18)" # $nickname # [1] "World-Famous Astronaut" ## NOTE: Split_df_v4_2/ filelist[[2]] is the main experiment reported in the manuscript. ## Split_df_v4_1 and _3 / filelist[[1]] & [[3]] are the experiments reported in the appendix (experiment 2 and 3, respectively) ## THESE NAMES ARE NOW CHANGED to split1.txt ... ## load packages require(plyr) require(ggplot2) require(grid) require(lme4) require(multcomp) require(reshape2) ## workspace (after running the following commented code) load("/DDM_JDM_v2.RData") # # ## read in raw data # setwd("/dataframes_v4/") # filenames <- list.files(pattern="split_df_v4*.*txt") # filelist <- as.list(lapply(filenames, read.table, header=T)) # names(filelist) <- c("S1","S2","S3") ## note: S1 = reduced nutrition, S2=full info (main paper), S3= reduced, no percentages # # ## data type conversion # for (x in seq_along(filelist)) { # filelist[[x]]$GDA_indi = as.factor(filelist[[x]]$GDA_indi) ## 1=GDA, 2=TL # filelist[[x]]$RT_choice = as.numeric(as.character(filelist[[x]]$RT_choice)) # filelist[[x]]$rating_bins = as.factor(filelist[[x]]$rating_bins) # filelist[[x]]$rating_diff = as.numeric(as.character(filelist[[x]]$rating_diff)) # } # # ## join dataframe for data cleaning # joined_df <- do.call("rbind", filelist) # ###################################### # ######## data cleaning ############### # ###################################### # # ## first remove all RTs > 30 -- ztree errors, mean and SD not robust enough for really long outliers # joined_df<-joined_df[joined_df$RT_choice<=30,] # # ## subject - specific RT, remove 2 SD above subject-specific mean RT # temp<-split(joined_df, f=joined_df$ID) # ## create empty list of dataframes # mittel<-lapply(1:130,function(i) data.frame()) # stabw<-lapply(1:130,function(i) data.frame()) # # for (x in seq_along(temp)){ # mittel[[x]]<-mean(temp[[x]]$RT_choice) # stabw[[x]]<-sd(temp[[x]]$RT_choice) # ## then remove RT more than 2 SD above individual mean # temp[[x]]<-temp[[x]][temp[[x]]$RT_choice<= mittel[[x]]+2*stabw[[x]],] # } # # ## how many participants excluded in main experiment (sbj- IDs 44-87 are from main experiment) # excl <- sapply(temp, nrow) # 350-mean(excl[44:87]) # sd(excl[44:87]) # range(excl[44:87]) # # ## bind again # joined_df <- do.call("rbind", temp) # filelist <-split(joined_df, f=joined_df$session) # # ## save (for diffusion modeling/ drift as a function (here session 2 only)) # write.table(filelist[[2]], "/one_drift_theta/split_df_2_v4.csv", sep=";", quote=FALSE, row.names=FALSE) # write.table(filelist[[2]][,c(8,10,11)], "/one_drift_theta/split_df_2_v4_fastdm.txt", sep="\t", quote=FALSE, row.names=FALSE) # write.table(filelist[[2]][,c(8,10,11,12)], "/one_drift_theta/split_df_2_v4_fastdm_ratingbins.txt", sep="\t", quote=FALSE, row.names=FALSE) # # ########################################################################################### # ###############################log-mixed model ########################################### # ########################################################################################### ## model "Label" logmixed1<-lapply(filelist, function(z) glmer(h_choice ~ GDA_indi + (1 + GDA_indi|ID), data=z, family="binomial")) ## stats stats1<-lapply(logmixed1, function(z) summary(z)) ## model Liking + Label ## interaction with ratings logmixed2<-lapply(filelist, function(z) glmer(h_choice ~ GDA_indi + rating_diff + (1 + GDA_indi+rating_diff |ID), data=z, family="binomial")) ## stats stats2<-lapply(logmixed2, function(z) summary(z)) ## model Liking * Label logmixed3<-lapply(filelist, function(z) glmer(h_choice ~ rating_diff*GDA_indi + (1 + GDA_indi*rating_diff | ID),data=z ,family="binomial")) ## stats stats3<-lapply(logmixed3, function(z) summary(z)) ############################## ### Reaction time ############ ############################## ## log transform RT due to skewness in binary choice paradigms for (b in seq_along(filelist)) { filelist[[b]]$RT_choice_log = log(filelist[[b]]$RT_choice) } RT.model <- lapply(filelist, function (z) lmer(RT_choice_log ~ GDA_indi + (1 + GDA_indi|ID), data =z, REML=FALSE)) ## stats ### summary RT RT_sum <-ddply(filelist[[2]], "GDA_indi", summarize, RT.mean=mean(RT_choice_log), RT.sd=sd(RT_choice_log)) ############################################ ######## plots ############################# ############################################ ## percentage of healthy choices session 2 (main experiment) ### FIGURE 3 ## split into individual dataframes temp<-split(filelist[[2]], paste(filelist[[2]]$ID, filelist[[2]]$GDA_indi)) ## empty vector for looping vec<-vector("list", length(temp)) ## sum healthy choices for (x in seq_along(temp)){ vec[[x]] <- sum(temp[[x]]$h_choice)/ nrow(temp[[x]]) } ## bind into dataframe temp_binded<-as.data.frame(do.call("rbind", vec)) ## GDA_indi temp_binded$GDA_indi<-c(1,2) ## add column names colnames(temp_binded)<-c("Percent_healthy_choice", "GDA_indi") temp_binded$GDA_indi<-as.factor(temp_binded$GDA_indi) ### summary stats fig3_plo<-ddply(temp_binded, "GDA_indi", summarize, Percent.mean=mean(Percent_healthy_choice), Percent.sd=sd(Percent_healthy_choice), Lower=Percent.mean - Percent.sd/sqrt(length(unique(filelist[[2]]$ID))), Upper=Percent.mean + Percent.sd/sqrt(length(unique(filelist[[2]]$ID)))) ## plot Figure 3 fig3 <- ggplot(fig3_plo, aes(x=GDA_indi, y=Percent.mean, group=factor(GDA_indi),shape=factor(GDA_indi), colour=factor(GDA_indi)))+ geom_bar(fill=c("gray80", "gray20"),stat="identity", colour="black", size=.6, width=0.5) + geom_errorbar(aes(ymin=Lower, ymax=Upper), width=0.1, colour="black")+ theme_classic(base_size = 14, base_family ="serif")+theme(panel.grid = element_blank()) + scale_colour_manual(values=c("gray80", "gray20"), guide=FALSE)+ coord_cartesian(ylim=c(0.0, 1))+ scale_x_discrete(name="Label", labels=c("GDA", "TL"))+ scale_y_continuous(name="Probability of healthy choices", expand=c(0,0))+ theme(legend.key.width =unit(1.5, "cm"), axis.ticks.x = element_blank()) + geom_text(x = 1.5, y = 0.63, label = "**", size=10) ## add aterisks fig3 ggsave("E:/Figure3.tiff", width=6, height=8, dpi=300) ggsave("E:/Figure3.eps", family="serif", width=6, height=8, dpi=300) ### Figure 4 ### Interaction with liking of taste, using empirical vs. predicted probabilities ## predict with random effect (ID) # IDs: 44-87 ## note that liking ratings were binned for illustration purposes only logmixed_ranks_fig4<-glmer(h_choice ~ rating_bins + GDA_indi + rating_bins*GDA_indi + (1|ID),data=filelist[[2]],family="binomial") new1<-data.frame(GDA_indi=factor(c(1,1,1,1,1,1,1,2,2,2,2,2,2,2)), rating_bins=factor(1:7), ID=factor(rep(c(44:87), each=14)), h_choice=0) new1$prob<-predict(logmixed_ranks_fig4,newdata=new1, re.form=NULL, type="response") ## predicted data new1<-ddply(new1, .(GDA_indi, rating_bins), summarize, prob.mean=mean(prob)) ## empirical data h_choice<-ddply(filelist[[2]], .(rating_bins,GDA_indi), summarise, h_choice_sum = sum(h_choice)) occurences<-data.frame(table(filelist[[2]]$GDA_indi, filelist[[2]]$rating_bins)) colnames(occurences)<-(c("GDA_indi", "rating_bins", "Number_of_choices")) emp_h_choice <- join(h_choice, occurences, by=c("GDA_indi", "rating_bins"),type="left") ## percent healthy choices emp_h_choice$prob.mean<- emp_h_choice$h_choice_sum/emp_h_choice$Number_of_choices ## GDA healthy ## empirical == 1, predicted ==2 healthy_choice_GDA_emppred<-rbind.fill(emp_h_choice,new1) ## rbind fill from plyr package when uneven columns/differnct columns healthy_choice_GDA_emppred$emppred<-as.factor(rep(c(1:2), each = 14 )) ## 7 rating bins * 2 label types ## PLOT Figure 4 ### to connect lines of only predicted, but not empirical: subset (see below) fig4 <- ggplot(healthy_choice_GDA_emppred, aes(x=rating_bins, y=prob.mean, group=emppred, color=GDA_indi)) + geom_point(aes(shape=emppred),size=4) + scale_shape_manual(values=c(4,1), labels = c("Empirical", "predicted"), name="") + scale_x_discrete(breaks = c(1:7)) + theme_bw(base_size = 14) + xlab("Taste unhealthy > healthy Taste healthy > unhealthy") + ylab("Probability of healthy choice")+ scale_colour_manual(values=c("gray60", "gray20"),name="Label",labels=c("GDA", "TL")) + geom_line(data=subset(healthy_choice_GDA_emppred,emppred==2), aes(factor(rating_bins), prob.mean, group=GDA_indi)) + ggtitle("Empirical vs. predicted probability of healthy choices") + theme_classic(base_size = 14, base_family ="serif")+theme(panel.grid = element_blank()) fig4 ggsave("E:/Figure4.tiff",width=8, height=6, dpi=300) ggsave("E:/Figure4.eps", family ="serif", width=8, height=6, dpi=300) ####################################################### ############# test for weights effect (APPENDIX) ###### ####################################################### ## other factors might contribute to the choice and so in theory the labels could make the subjects ## put more weight on taste and health, while lowering the weight on other unnamed factors. ## One simple way to address this is to just run a logit with only taste predicting choice, once for each label type. ## Then comparing the coefficients -- If they go down in the TL condition, we can say that the weight on taste ## has indeed gone down in the TL condition. ## Then the same thing with only health and see if indeed that coefficient goes up in TL. ## Recode so that we can use choice_left as DV filelist[[2]]$choice_left[filelist[[2]]$h_left == 1 & filelist[[2]]$h_choice == 1]<-1 filelist[[2]]$choice_left[filelist[[2]]$h_left == 1 & filelist[[2]]$h_choice == 0]<-0 filelist[[2]]$choice_left[filelist[[2]]$h_left == 0 & filelist[[2]]$h_choice == 1]<-0 filelist[[2]]$choice_left[filelist[[2]]$h_left == 0 & filelist[[2]]$h_choice == 0]<-1 filelist[[2]]$choice_left<-as.factor(filelist[[2]]$choice_left) filelist[[2]]$h_left<-as.factor(filelist[[2]]$h_left) ## now adjust rating differences to left right vs. healthy unhealthy filelist[[2]]$taste_left <- ifelse(filelist[[2]]$h_left == 1, filelist[[2]]$rating_h , filelist[[2]]$rating_u) filelist[[2]]$taste_right <- ifelse(filelist[[2]]$h_left == 0, filelist[[2]]$rating_h , filelist[[2]]$rating_u) filelist[[2]]$taste_left_right <- filelist[[2]]$taste_left - filelist[[2]]$taste_right ## split into TL and GDA, first file is GDA, second is TL onlyTLGDA <- (split(filelist[[2]], filelist[[2]]$GDA_indi)) summary(rev1<- glmer(choice_left ~ taste_left_right + (1 | ID),data=onlyTLGDA[[1]],family="binomial")) summary(rev2<- glmer(choice_left ~ taste_left_right + (1 | ID),data=onlyTLGDA[[2]],family="binomial")) summary(rev3<- glmer(choice_left ~ h_left + (1 | ID),data=onlyTLGDA[[1]],family="binomial")) summary(rev4<- glmer(choice_left ~ h_left + (1 | ID),data=onlyTLGDA[[2]],family="binomial")) ############################################################### ################# fast dm ##################################### ############################################################### ## you need individual dataframes per person for fast-dm # # overall<-filelist[[2]][,c("ID", "GDA_indi", "h_choice", "RT_choice" )] # ID_overall<-split(overall, overall$ID) # ID_overall<-lapply(ID_overall, function(x) x[c(2,3,4)]) # ## save as dat file # sapply(names(ID_overall), function (x) write.table(ID_overall[[x]], file=paste("E:/ID_overall_", x, ".dat", sep=""), # sep="\t", col.names = F, row.names = F, quote=F)) # # ## now you can run the fast-dm code (separate code file used in fast dm, code is provided) ## then read in fast-dm output setwd("/fast_dm/DDM_v4_overall_drift_and_startingpoint_2_singlesubject/") results_drift <-read.table("overall_cs_drift.log", header=T) results_drift_z <- read.table("overall_cs_drift_and_startingpoint.log", header=T) results_drift_t0 <-read.table("overall_cs_drift_and_t0.log", header=T) results_drift_z_t0 <-read.table("overall_cs_drift_and_startingpoint_and_t0.log", header=T) ## convert columns to numeric results_drift<-as.data.frame(apply(results_drift, 2, function(x) as.numeric(as.character(x)))) results_drift_z<-as.data.frame(apply(results_drift_z, 2, function(x) as.numeric(as.character(x)))) results_drift_t0<-as.data.frame(apply(results_drift_t0, 2, function(x) as.numeric(as.character(x)))) results_drift_z_t0<-as.data.frame(apply(results_drift_z_t0, 2, function(x) as.numeric(as.character(x)))) ## model drift t test t.test(results_drift$v_1,results_drift$v_2, paired=TRUE) ## means and sd print(mean<-apply(results_drift, 2, function(x) mean(x)), na.rm=TRUE) sd<- as.data.frame(apply(results_drift, 2, function(x) sd(x))) print(sem<- apply(sd, 2, function(x) sqrt(x)/43)) ## model drift and starting point t test t.test(results_drift_z$v_1,results_drift_z$v_2, paired=TRUE) t.test(results_drift_z$zr_1,results_drift_z$zr_2, paired=TRUE) ## means and sd print(mean<-apply(results_drift_z, 2, function(x) mean(x))) sd<- as.data.frame(apply(results_drift_z, 2, function(x) sd(x))) print(sem<- apply(sd, 2, function(x) sqrt(x)/43)) ## model drift and nondecision time t test t.test(results_drift_t0$v_1,results_drift_t0$v_2, paired=TRUE) t.test(results_drift_t0$t0_1,results_drift_t0$t0_2, paired=TRUE) ## means and sd print(mean<-apply(results_drift_t0, 2, function(x) mean(x))) sd<- as.data.frame(apply(results_drift_t0, 2, function(x) sd(x))) print(sem<- apply(sd, 2, function(x) sqrt(x)/43)) ## model drift and nondecision and startingpoint t test t.test(results_drift_z_t0$v_1,results_drift_z_t0$v_2, paired=TRUE) t.test(results_drift_z_t0$zr_1,results_drift_z_t0$zr_2, paired=TRUE) t.test(results_drift_z_t0$t0_1,results_drift_z_t0$t0_2, paired=TRUE) ## means and sd print(mean<-apply(results_drift_z_t0, 2, function(x) mean(x))) sd<- as.data.frame(apply(results_drift_z_t0, 2, function(x) sd(x))) print(sem<- apply(sd, 2, function(x) sqrt(x)/43)) ## melt for plotting results_drift_z_t0 <- results_drift_z_t0[, c(8, 5, 3, 2, 9, 6)] ## only columns of interest results_drift_z_t0_melt<-melt(results_drift_z_t0) colnames(results_drift_z_t0_melt)<-c("labeling", "Estimate") ## for drift and starting point and t0 results_drift_z_t0_melt$Label[results_drift_z_t0_melt$labeling=="v_1"]<-"GDA" results_drift_z_t0_melt$Label[results_drift_z_t0_melt$labeling=="zr_1"]<-"GDA" results_drift_z_t0_melt$Label[results_drift_z_t0_melt$labeling=="t0_1"]<-"GDA" results_drift_z_t0_melt$Label[results_drift_z_t0_melt$labeling=="v_2"]<-"TL" results_drift_z_t0_melt$Label[results_drift_z_t0_melt$labeling=="zr_2"]<-"TL" results_drift_z_t0_melt$Label[results_drift_z_t0_melt$labeling=="t0_2"]<-"TL" results_drift_z_t0_melt$variable<-rep(c("Drift rate", "Starting point", "Non-decision time"), each=nrow(results_drift_z_t0_melt)/3) results_drift_z_t0_melt$Label <- as.factor(results_drift_z_t0_melt$Label) ## zr vs v vs. t0 plot summary ## FIGURE 5 results_drift_z_t0_melt$Label<-as.factor(results_drift_z_t0_melt$Label) overall_plot_Fastdm<-ddply(results_drift_z_t0_melt, .(Label, variable), summarize, Estimate.mean=mean(Estimate), Estimate.sd=sd(Estimate), Lower=Estimate.mean - Estimate.sd/sqrt(nrow(results_drift_z_t0_melt)/2), Upper=Estimate.mean + Estimate.sd/sqrt(nrow(results_drift_z_t0_melt)/2)) fig5 <- ggplot(overall_plot_Fastdm, aes(y=Estimate.mean, x=Label, fill=Label)) + facet_grid(.~variable)+ geom_bar(stat="identity", colour="black", size=.6, width=0.5) + scale_fill_manual(values=rep(c("gray80", "gray20"), times = 3)) + labs(fill="")+ geom_errorbar(aes(ymin=Lower, ymax=Upper), width=0.1)+ ylab("Mean Parameter Estimates")+ facet_grid(.~variable)+ scale_x_discrete(labels=c("GDA", "TL")) + geom_hline(yintercept=0, colour="gray50") + ## adds horizontal line at zero theme_classic(base_size = 20, base_family ="serif")+theme(panel.grid = element_blank())+ theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank()) ## add aterisk only to first facet of plot fig5 + geom_text(data=data.frame(x=1.5, y=0.15, label=c("*", "",""), variable=c("Drift rate", "Non-decision time", "Starting point")), aes(x,y,label=label), inherit.aes=FALSE, size=10) ggsave("E:/Figure5.tiff",width=8, height=6, dpi=300) ggsave("E:/Figure5.eps", family ="serif", width=8, height=6, dpi=300) ################################################################## ################# Drift as function ############################## ################################################################## # # ### function # rm(list=ls()) # require(RWiener) # label <- c(1,2) ## 1= GDA; 2= TL # # pull in the data files # # for (y in 1:length(label)){ # labels = label[y] # data <- read.table('/one_drift_theta/split_df_2_v4.csv', sep=";", header = TRUE) # # ## find min RT (for modeling later) # minRT <- min(data$RT_choice) # # # Personal deviance function # theta_dev <- function(startPar) # { # alpha <- startPar[1] # tau <- startPar[2] # beta <- startPar[3] # thetaA <- startPar[4] # thetaB <- startPar[5] # # delta <- thetaA+(thetaB*rateDiff) # # p1 = rep(0, length(dat$RT)) # p0 = rep(0, length(dat$RT)) # # for (t in 1:length(dat$RT)) # { # RTt <- dat$RT[t] # deltat <- delta[t] # # p1[t] <- dwiener(RTt, alpha, tau, beta, deltat, resp = "upper", give_log = FALSE) # p0[t] <- dwiener(RTt, alpha, tau, beta, deltat, resp = "lower", give_log = FALSE) # } # deviance <- -2*sum((log(p1)*choice)+(log(p0)*(1-choice))) # if (is.finite(deviance) == FALSE) # { # deviance <- 100000 # } # return(deviance) # } # # # ## trinary rating difference (coarse) # data$rating_3<-NA # data$rating_3[data$rating_diff>=-10]<- -1 # data$rating_3[data$rating_diff>=-3]<-0 # data$rating_3[data$rating_diff> 3]<- 1 # # # Only take the variables we need right now # data <- data.frame(subjectNum = data$ID, RT = data$RT, Acc = data$h_choice, rateDiff = data$rating_3, GDA_indi=data$GDA_indi) # # # create data list to put all the info into # allProbPar <- data.frame(alpha = numeric(), tau = numeric(), beta = numeric(), a = numeric(), b = numeric(), deviance = numeric(), convergence = numeric()) # # # First, rename the subject numbers # currentSub = 1 # for (i in 1:max(data$subjectNum)) # { # if (length(data[data$subjectNum == i, ]$subjectNum > 0)) # { # data[data$subjectNum == i, ]$subjectNum <- rep(currentSub, length(data[data$subjectNum == i, "subjectNum"])) # currentSub = currentSub+1 # } # } # # ## split data according to TL and GDA # data_part<-split(data, data$GDA_indi) # # if(labels == 1) # {data_optim<-data_part[[1]] } # else if(labels ==2) # {data_optim<-data_part[[2]]} # # # # Now do all calculations on the individual subject level # # for (i in 1:max(data_optim$subjectNum)) # { # # First pull only the subject you're working on # dat <- data_optim[data_optim$subjectNum == i, ] # # # Now RT cut the data (necessary for estimation purposes, dep. on empirical data) # # dat <- dat[dat$RT > minRT, ] # # rateDiff <- dat$rateDiff # choice <- dat$Acc # # dat$rateDiff <- NULL # dat$subjectNum <- NULL # dat$GDA_indi <- NULL # dat[dat$Acc == 1, 'Acc'] <- 'upper' # dat[dat$Acc ==0, 'Acc'] <- 'lower' # # startPar <- c(sample(seq(.5,4.5,.1), 1), sample(seq(.1, .4, .01), 1), sample(seq(.2, .8, .01), 1), sample(seq(0, 1, .01), 1), sample(seq(0, 1, .01), 1)) # # # Get the paramaters! # probPar <- optim(startPar, theta_dev, method = "L-BFGS-B", lower = c(.5, .2, .4, -1, -1), upper = c(4.5, 1.5, 1, 1, 1), control = list(maxit = 5000)) # # allProbPar[nrow(allProbPar)+1, ] <- c(probPar$par, probPar$value, probPar$convergence) # } # if(labels == 1) # {write.table(allProbPar, '/one_drift_theta/individualTheta_choiceProb_GDA_L-BFGS.csv', sep = ",")} # else if(labels ==2) # {write.table(allProbPar, '/one_drift_theta/individualTheta_choiceProb_TL_L-BFGS.csv', sep = ",")} # } ## Read OUTPUt form drift-as-function (DAS) after estimation loop GDA_das_fun <- read.table('/one_drift_theta/individualTheta_choiceProb_GDA_L-BFGS.csv', sep=",", header=TRUE) GDA_das_fun <- GDA_das_fun[, c("a", "b")] TL_das_fun <- read.table('/one_drift_theta/individualTheta_choiceProb_TL_L-BFGS.csv', sep=",", header=TRUE) TL_das_fun <- TL_das_fun[, c("a", "b")] GDA_TL_das_fun <- cbind(GDA_das_fun, TL_das_fun) colnames(GDA_TL_das_fun) <- c("GDA_a", "GDA_b", "TL_a", "TL_b") ## test t.test(GDA_TL_das_fun$GDA_a, GDA_TL_das_fun$TL_a, paired=TRUE) t.test(GDA_TL_das_fun$GDA_b, GDA_TL_das_fun$TL_b, paired=TRUE) ## means and sd print(mean<-apply(GDA_TL_das_fun, 2, function(x) mean(x))) sd<- as.data.frame(apply(GDA_TL_das_fun, 2, function(x) sd(x))) print(sem<- apply(sd, 2, function(x) sqrt(x)/43)) ## Plotting FIGURE 6 data_part1 <- melt(GDA_TL_das_fun) ## add labels and which coefficient data_part1$GDA_indi<-rep(c("GDA", "TL"), each=88) data_part1$coeff<-rep(c("a", "b"), each=44) fig6_plot<-ddply(data_part1, c("GDA_indi", "coeff"), summarize, coeff.mean=mean(value, na.rm=T),coeff.sd=sd(value, na.rm=T), Lower=coeff.mean - coeff.sd/sqrt(44), Upper=coeff.mean + coeff.sd/sqrt(44)) fig6 <- ggplot(data=fig6_plot, aes(x=coeff, y=coeff.mean, fill = GDA_indi, group=GDA_indi)) + geom_bar(stat="identity", position=position_dodge(), colour="black", size=.6, width=0.5) + geom_errorbar(aes(ymin=Lower, ymax=Upper), width=0.1, colour="black", position=position_dodge(width=0.5))+ scale_fill_manual(values=c("gray80", "gray20", "gray80", "gray20")) + labs(fill="")+ theme_classic(base_size = 14, base_family ="serif")+theme(panel.grid = element_blank()) + coord_cartesian(ylim=c(-0.1, 1.1))+ scale_x_discrete(name="Coefficient", labels=c("health sensitivity", "taste weight"))+ scale_y_continuous(name="Coefficient mean", expand=c(0,0)) + geom_hline(yintercept=0 )+ geom_vline(xintercept=0.4) + theme(axis.line=element_blank()) + theme(legend.key.width =unit(1.5, "cm"), axis.ticks.x = element_blank()) ## add aterisks fig6 + annotate("text", x = c(1,2), y = c(0.2, 0.95), label = c("*", "*"), size=10) ggsave("E:/Figure6.tiff",width=8, height=6, dpi=300) ggsave("E:/Figure6.eps", family ="serif", width=8, height=6, dpi=300)