************************************************** * Generates all results that are used in * * Klos, A. (2013): Conditions under which the * * influence of myopic loss aversion is mitigated * * * * The do-File contains all analyses in the paper * * plus some additional analyses. * * * * Last modified: June 3, 2013 * ************************************************** capture log close log using logAllResults.txt, replace text set scheme s1manual *********** * Study I * *********** // Choice experiment insheet using StudyIChoice.csv, clear sort framing by framing: sum yesno, detail ranksum yesno, by(framing) save choicedata, replace // Certainty equivalent experiment insheet using StudyICertaintyEquivalent.csv, clear sort framing by framing: sum ce, detail ranksum ce, by(framing) cumul ce if framing == 0, gen(ceCDF_0) qui sum ce if framing == 0 local noObsSeg = r(N) cumul ce if framing == 1, gen(ceCDF_1) qui sum ce if framing == 1 local noObsAgg = r(N) twoway (scatter ceCDF_0 ce if framing == 0, msymbol(smcircle)) (scatter ceCDF_1 ce if framing == 1, msymbol(triangle_hollow)), legend(order(2 "Aggregated (N=`noObsAgg')" 1 "Segregated (N=`noObsSeg')") col(1)) xtitle("Certainty Equivalent") ytitle("Percentile") xlabel(-2500(2500)10000) graph save CDFdistributionCE.gph, replace graph export CDFdistributionsStudyIce.emf, replace graph export CDFdistributionsStudyIce.eps, replace // Implicit acceptance rates generate yesno = 0 replace yesno = 1 if ce > 0 by framing: sum yesno // Merge with choice data append using choicedata generate choiceDataDummy = 0 replace choiceDataDummy = 1 if id < 999 // Logit model with interactions // use a 0/1-dummy for the segregated condition and not the aggregated condition // because I have it the same way in study II generate framingSeg = 1 if framing == 0 replace framingSeg = 0 if framing == 1 generate interactionChoiceTreatment = choiceDataDummy * framingSeg regress yesno framingSeg choiceDataDummy interactionChoiceTreatment logit yesno framingSeg choiceDataDummy interactionChoiceTreatment margins, dydx(*) save choicedata, replace // probability matching data insheet using StudyIProbEquivalent.csv, clear sort framing by framing: sum prob ranksum prob, by(framing) ttest prob, by(framing) by framing: sum prob if englishcourse == 1 by framing: sum prob if englishcourse == 0 ranksum prob, by(englishcourse) cumul prob if framing == 0, gen(probCDF_0) qui sum prob if framing == 0 local noObsSeg = r(N) cumul prob if framing == 1, gen(probCDF_1) qui sum prob if framing == 1 local noObsAgg = r(N) twoway (scatter probCDF_0 prob if framing == 0, msymbol(smcircle)) (scatter probCDF_1 prob if framing == 1, msymbol(triangle_hollow)), legend(order(2 "Aggregated (N=`noObsAgg')" 1 "Segregated (N=`noObsSeg')") col(1)) xtitle("Probability Equivalent") ytitle("Percentile") graph save CDFdistributionProb.gph, replace graph export CDFdistributionsStudyIprob.emf, replace graph export CDFdistributionsStudyIprob.eps, replace graph combine CDFdistributionCE.gph CDFdistributionProb.gph, altshrink graph export CDFdistributionsStudyI.emf, replace graph export CDFdistributionsStudyI.eps, replace ************ * Study II * ************ insheet using StudyII.csv, clear desc // Consider share in risky assets as dependent variable replace allocation = 100 - allocation // any missing values? drop if allocation == . & estimationbonds == . & estimationstocks == . drop if allocation == . // drop one subject drop if id == 169 // provided negative value for the future value of 1 euro invested in bonds generate missingEstimations = 0 replace missingEstimations = 1 if estimationbonds == . | estimationstocks == . capture label drop treatmentValueLabels label define treatmentValueLabels 1 "Segregated" 0 "Aggregated" label value treatment treatmentValueLabels generate relativeEstimation = ln(estimationstocks) - ln(estimationbonds) cumul relativeEstimation, gen(relativeEstimationCDF) generate implicitBondReturn = estimationbonds^(1/30) - 1 generate implicitStockReturn = estimationstocks^(1/30) - 1 cumul implicitBondReturn, gen(implicitBondReturnCDF) cumul implicitStockReturn, gen(implicitStockReturnCDF) // Replication of Benartzi and Thaler sum allocation if treatment == 1 & threequestions == 0 sum allocation if treatment == 1 & threequestions == 1 sum allocation if treatment == 0 & threequestions == 0 sum allocation if treatment == 0 & threequestions == 1 sum allocation if treatment == 1 & threequestions == 1 & missingEstimations == 0 sum allocation if treatment == 1 & threequestions == 1 & missingEstimations == 1 sum allocation if treatment == 0 & threequestions == 1 & missingEstimations == 0 sum allocation if treatment == 0 & threequestions == 1 & missingEstimations == 1 ranksum missingEstimations if threequestions == 1, by(treatment) generate interaction = treatment * threequestions tobit allocation treatment threequestions, ul(100) ll(0) tobit allocation treatment threequestions interaction, ul(100) ll(0) test threequestions interaction drop interaction ranksum allocation if threequestions == 0, by(treatment) ttest allocation if threequestions == 0, by(treatment) ranksum allocation if threequestions == 1, by(treatment) ttest allocation if threequestions == 1, by(treatment) sum allocation if treatment == 1 sum allocation if treatment == 0 ranksum allocation, by(treatment) ttest allocation, by(treatment) cumul allocation if threequestions == 0 & treatment == 1, gen(allocationWithout3QuestionsCDF_1) cumul allocation if threequestions == 1 & treatment == 1, gen(allocationWith3QuestionsCDF_1) twoway (scatter allocation allocationWith3QuestionsCDF_1 if threequestions == 1, msymbol(circle_hollow) msize(large)) (scatter allocation allocationWithout3QuestionsCDF_1 if threequestions == 0, msymbol(triangle)), legend(order(1 "With Est." 2 "Without Est.")) ytitle("Allocation To Risky Assets") title("CDF -- Segregated") xtitle("Percentile") graph save allocationDifferencesSeg.gph, replace cumul allocation if threequestions == 0 & treatment == 0, gen(allocationWithout3QuestionsCDF_0) cumul allocation if threequestions == 1 & treatment == 0, gen(allocationWith3QuestionsCDF_0) twoway (scatter allocation allocationWith3QuestionsCDF_0 if threequestions == 1, msymbol(circle_hollow) msize(large)) (scatter allocation allocationWithout3QuestionsCDF_0 if threequestions == 0, msymbol(triangle)), legend(order(1 "With Est." 2 "Without Est.")) ytitle("Allocation To Risky Assets") title("CDF -- Aggregated") xtitle("Percentile") graph save allocationDifferencesAgg.gph, replace graph combine allocationDifferencesSeg.gph allocationDifferencesAgg.gph, ycommon graph export allocationDifferences.emf, replace // Continue with the respondents who answered all questions drop if estimationbonds == . & threequestions == 1 drop if estimationstocks == . & threequestions == 1 // Descriptive evidence on the estimation questions sort treatment by treatment: sum implicitBondReturn by treatment: sum implicitStockReturn generate differenceInReturns = implicitStockReturn - implicitBondReturn sort treatment by treatment: sum differenceInReturns, detail ranksum implicitBondReturn, by (treatment) ranksum implicitStockReturn, by (treatment) ranksum differenceInReturns, by (treatment) cumul implicitBondReturn if treatment == 0, gen(implicitBondReturnCDF_0) cumul implicitBondReturn if treatment == 1, gen(implicitBondReturnCDF_1) qui sum implicitBondReturnCDF_1 if treatment == 1 local segN = r(N) qui sum implicitBondReturnCDF_0 if treatment == 0 local aggN = r(N) twoway (scatter implicitBondReturnCDF_1 implicitBondReturn if treatment == 1, msymbol(smcircle)) (scatter implicitBondReturnCDF_0 implicitBondReturn if treatment == 0, msymbol(triangle_hollow)), legend(order(1 "Segregated (N=`segN')" 2 "Aggregated (N=`aggN')")) xtitle("Implicit Annualized Bond Return") ytitle("Percentile") xline(0.1) graph export cdfimplicitBondReturn.emf, replace graph export cdfimplicitBondReturn.eps, replace cumul implicitStockReturn if treatment == 0, gen(implicitStockReturnCDF_0) cumul implicitStockReturn if treatment == 1, gen(implicitStockReturnCDF_1) qui sum implicitStockReturnCDF_1 if treatment == 1 local segN = r(N) qui sum implicitStockReturnCDF_0 if treatment == 0 local aggN = r(N) twoway (scatter implicitStockReturnCDF_1 implicitStockReturn if treatment == 1, msymbol(smcircle)) (scatter implicitStockReturnCDF_0 implicitStockReturn if treatment == 0, msymbol(triangle_hollow)), legend(order(1 "Segregated (N=`segN')" 2 "Aggregated (N=`aggN')")) xtitle("Implicit Annualized Stock Return") ytitle("Percentile") xline(0.3) graph export cdfimplicitStockReturn.emf, replace graph export cdfimplicitStockReturn.eps, replace // // Build reasonable estimation dummies // // Reasonable estimations criteria 1: Stocks outperfom bonds generate stocksPerformBetterDummy = 0 if estimationbonds != . & estimationstocks != . replace stocksPerformBetterDummy = 1 if implicitBondReturn < implicitStockReturn & estimationbonds != . & estimationstocks != . sort treatment by treatment: sum stocksPerformBetterDummy sum allocation if treatment == 1 & threequestions == 1 & stocksPerformBetterDummy == 0 sum allocation if treatment == 1 & threequestions == 1 & stocksPerformBetterDummy == 1 sum allocation if treatment == 0 & threequestions == 1 & stocksPerformBetterDummy == 0 sum allocation if treatment == 0 & threequestions == 1 & stocksPerformBetterDummy == 1 ranksum allocation if treatment == 1 & threequestions == 1, by(stocksPerformBetterDummy) ranksum allocation if treatment == 0 & threequestions == 1, by(stocksPerformBetterDummy) ranksum stocksPerformBetterDummy if threequestions == 1, by(treatment) // Reasonable estimations criteria 2: Absurd high returns for stocks or bonds cumul relativeEstimation if treatment == 0, gen(relative0EstimationCDF) cumul relativeEstimation if treatment == 1, gen(relative1EstimationCDF) twoway (scatter relativeEstimation relative1EstimationCDF if treatment == 1, msymbol(circle_hollow)) (scatter relativeEstimation relative0EstimationCDF if treatment == 0, msymbol(triangle_hollow)), legend(order(1 "Segregated" 2 "Aggregated")) ytitle("ln(Est.Stocks/Est.Bonds)") xtitle("Percentile") graph export cdfRelativeEstimation.emf, replace cumul differenceInReturns if treatment == 0, gen(differenceInReturnsCDF_0) cumul differenceInReturns if treatment == 1, gen(differenceInReturnsCDF_1) qui sum differenceInReturnsCDF_1 if treatment == 1 local segN = r(N) qui sum differenceInReturnsCDF_0 if treatment == 0 local aggN = r(N) twoway (scatter differenceInReturnsCDF_1 differenceInReturns if treatment == 1, msymbol(smcircle)) (scatter differenceInReturnsCDF_0 differenceInReturns if treatment == 0, msymbol(triangle_hollow)), legend(order(1 "Segregated (N=`segN')" 2 "Aggregated (N=`aggN')")) xtitle("Implicit Annualized Return Difference") ytitle("Percentile") xline(0) xlabel(-.3(.1).4) graph export cdfReturnDifferences.emf, replace graph export cdfReturnDifferences.eps, replace generate absurdHighDummy = 0 if estimationbonds != . & estimationstocks != . replace absurdHighDummy = 1 if implicitStockReturn > 0.299999999 & estimationbonds != . & estimationstocks != . replace absurdHighDummy = 1 if implicitBondReturn > 0.099999999 & estimationbonds != . & estimationstocks != . sort treatment by treatment: sum absurdHighDummy // // Build dummy for reasonable estimate // generate goodEstimateDummy = 0 if estimationbonds != . & estimationstocks != . replace goodEstimateDummy = 1 if absurdHighDummy == 0 & stocksPerformBetterDummy == 1 sort treatment by treatment: sum goodEstimateDummy ranksum goodEstimateDummy, by(treatment) ttest goodEstimateDummy, by(treatment) sort treatment by treatment: sum allocation if goodEstimateDummy == 0 by treatment: sum allocation if goodEstimateDummy == 1 by treatment: sum allocation if absurdHighDummy == 1 & missingEstimations == 0 by treatment: sum allocation if implicitStockReturn > 0.299999999 & implicitBondReturn > 0.099999999 & estimationbonds != . & estimationstocks != . by treatment: sum allocation if implicitStockReturn > 0.299999999 & implicitBondReturn < 0.099999999 & estimationbonds != . & estimationstocks != . by treatment: sum allocation if implicitStockReturn < 0.299999999 & implicitBondReturn > 0.099999999 & estimationbonds != . & estimationstocks != . by treatment: sum allocation if implicitStockReturn < 0.299999999 & implicitBondReturn < 0.099999999 & estimationbonds != . & estimationstocks != . by treatment: sum differenceInReturns if absurdHighDummy == 1 by treatment: sum differenceInReturns if goodEstimateDummy == 1 by treatment: sum differenceInReturns if stocksPerformBetterDummy == 0 // // Significant difference in both conditions? // ranksum allocation if treatment == 1 & missingEstimations == 0, by(goodEstimateDummy) ttest allocation if treatment == 1 & missingEstimations == 0, by(goodEstimateDummy) ranksum allocation if treatment == 0 & missingEstimations == 0, by(goodEstimateDummy) ttest allocation if treatment == 0 & missingEstimations == 0, by(goodEstimateDummy) // // Regression results; two-sided tobit // generate interaction = goodEstimateDummy * treatment tobit allocation goodEstimateDummy treatment interaction, ll(0) ul(100) // // Mediation analysis // regress allocation treatment regress allocation treatment if threequestions == 1 & allocation != . & goodEstimateDummy != . regress goodEstimateDummy treatment if allocation != . & goodEstimateDummy != . local alphaC = _b[treatment] local alpha_var = _se[treatment]^2 regress allocation goodEstimateDummy treatment if allocation != . & goodEstimateDummy != . local betaC = _b[goodEstimateDummy] local beta_var = _se[goodEstimateDummy]^2 di "`alphaC' `alpha_var' " `alpha_var'^0.5 " `betaC' `beta_var' "`beta_var'^0.5 display "Coeff: " `alphaC' * `betaC' display " SE : " sqrt((`alphaC')^2 * `beta_var' + (`betaC')^2 * `alpha_var') display " p : " (1 - normal(abs(`alphaC' * `betaC' / ((`alphaC')^2 * `beta_var' + (`betaC')^2 * `alpha_var') ^ 0.5))) * 2 // Soebl and bootstrap sgmediation allocation if allocation != . & goodEstimateDummy != ., mv(goodEstimateDummy) iv(treatment) bootstrap r(ind_eff) r(dir_eff) if allocation != . & goodEstimateDummy != ., reps(5000) level(95): sgmediation allocation, iv(treatment) mv(goodEstimateDummy) matrix coeff = e(b) matrix se = e(se) di (1 - normal(abs(coeff[1,1]/se[1,1]))) * 2 estat bootstrap, percentile // Robustnesscheck: Use binary_mediation, see http://www.ats.ucla.edu/stat/stata/faq/binary_mediation.htm binary_mediation if allocation != . & goodEstimateDummy != ., dv(allocation) mv(goodEstimateDummy) iv(treatment) diagram bootstrap r(indir_1) r(tot_ind) r(dir_eff) r(tot_eff) if allocation != . & goodEstimateDummy != ., reps(5000): binary_mediation, dv(allocation) mv(goodEstimateDummy) iv(treatment) diagram estat bootstrap, percentile // Allocation by goodEstimateDummy capture label drop dummyValueLabels label define dummyValueLabels 0 "Unreasonable Estimates" 1 "Reasonable Estimates" label value goodEstimateDummy dummyValueLabels graph bar allocation if missingEstimations == 0, over(treatment) over(goodEstimateDummy) ytitle("Allocation to Stocks (in %)") blabel(bar, format(%9.1f)) graph export allocation.emf, replace as(emf) graph export allocation.eps, replace // create matrix to store results from robustness checks matrix matResults = J(20,26,.) local counter = 1 // sensitivity analysis forvalues cutoffStock = 0.1999999999(0.05)0.3499999999999 { forvalues cutoffBond = 0.0799999999(0.01)0.1199999999999 { di "-----------------------------------" di "Stock Return : `cutoffStock'" di "Bond Return : `cutoffBond'" di "Counter: `counter'" di "-----------------------------------" qui { // just an id in the first column matrix matResults[`counter',1] = `cutoffStock'*1000 + `cutoffBond' // Reasonable estimations criteria 2: Absurd high or low returns for stocks realtive to bonds drop absurdHighDummy generate absurdHighDummy = 0 if estimationbonds != . & estimationstocks != . replace absurdHighDummy = 1 if implicitStockReturn > `cutoffStock' & estimationbonds != . & estimationstocks != . replace absurdHighDummy = 1 if implicitBondReturn > `cutoffBond' & estimationbonds != . & estimationstocks != . sort treatment sum absurdHighDummy if treatment == 0 matrix matResults[`counter',2] = r(mean) sum absurdHighDummy if treatment == 1 matrix matResults[`counter',3] = r(mean) // // Build dummy for reasonable estimate // drop goodEstimateDummy generate goodEstimateDummy = 0 if estimationbonds != . & estimationstocks != . replace goodEstimateDummy = 1 if absurdHighDummy == 0 & stocksPerformBetterDummy == 1 sort treatment sum goodEstimateDummy if treatment == 0 matrix matResults[`counter',4] = r(mean) sum goodEstimateDummy if treatment == 1 matrix matResults[`counter',5] = r(mean) ranksum goodEstimateDummy, by(treatment) matrix matResults[`counter',6] = (1 - normal(abs(r(z)))) * 2 ttest goodEstimateDummy, by(treatment) matrix matResults[`counter',7] = r(p_u) // // Difference for people with reasonable estimate in the aggregated condition? // sum allocation if goodEstimateDummy == 0 & treatment == 0 matrix matResults[`counter',8] = r(mean) sum allocation if goodEstimateDummy == 0 & treatment == 1 matrix matResults[`counter',9] = r(mean) sum allocation if goodEstimateDummy == 1 & treatment == 0 matrix matResults[`counter',10] = r(mean) sum allocation if goodEstimateDummy == 1 & treatment == 1 matrix matResults[`counter',11] = r(mean) ranksum allocation if goodEstimateDummy == 1, by(treatment) ttest allocation if goodEstimateDummy, by(treatment) // // Significant difference for unsophisticated persons in both conditions? // ranksum allocation if treatment == 1, by(goodEstimateDummy) ttest allocation if treatment == 1, by(goodEstimateDummy) ranksum allocation if treatment == 0, by(goodEstimateDummy) ttest allocation if treatment == 0, by(goodEstimateDummy) // // Mediation analysis // regress allocation treatment if threequestions == 1 & allocation != . & goodEstimateDummy != . matrix coeff = e(b) local tau = coeff[1,1] matrix matResults[`counter',14] = `tau' regress goodEstimateDummy treatment if allocation != . & goodEstimateDummy != . matrix coeff = e(b) matrix varco = e(V) local alphaC = coeff[1,1] local alpha_var = _se[treatment]^2 matrix matResults[`counter',12] = `alphaC' regress allocation goodEstimateDummy treatment if allocation != . & goodEstimateDummy != . matrix coeff = e(b) matrix varco = e(V) local betaC = coeff[1,1] local beta_var = _se[goodEstimateDummy]^2 local tauPrime = coeff[1,2] matrix matResults[`counter',13] = `betaC' matrix matResults[`counter',15] = `tauPrime' matrix matResults[`counter',16] = `alphaC' * `betaC' matrix matResults[`counter',17] = sqrt((`alphaC')^2 * `beta_var' + (`betaC')^2 * `alpha_var') local z = `alphaC' * `betaC' / sqrt((`alphaC')^2 * `beta_var' + (`betaC')^2 * `alpha_var') matrix matResults[`counter',18] = `z' matrix matResults[`counter',19] = (1 - normal(abs(`z'))) * 2 // bootstrap p-values bootstrap r(ind_eff) r(dir_eff) if allocation != . & goodEstimateDummy != ., reps(5000) level(95): sgmediation allocation, iv(treatment) mv(goodEstimateDummy) matrix coeff = e(b) matrix se = e(se) matrix matResults[`counter',20] = (1 - normal(abs(coeff[1,1]/se[1,1]))) * 2 // bootstrapped confidence interval estat bootstrap, percentile matrix confidenceInt = e(ci_percentile) matrix matResults[`counter',21] = confidenceInt[1,1] matrix matResults[`counter',22] = confidenceInt[2,1] // Robustnesscheck: Use binary_mediation, see http://www.ats.ucla.edu/stat/stata/faq/binary_mediation.htm binary_mediation if allocation != . & goodEstimateDummy != ., dv(allocation) mv(goodEstimateDummy) iv(treatment) diagram bootstrap r(indir_1) r(tot_ind) r(dir_eff) r(tot_eff) if allocation != . & goodEstimateDummy != ., reps(5000): binary_mediation, dv(allocation) mv(goodEstimateDummy) iv(treatment) diagram estat bootstrap, percentile matrix confidenceInt = e(ci_percentile) matrix matResults[`counter',25] = confidenceInt[1,1] matrix matResults[`counter',26] = confidenceInt[2,1] local counter = `counter' + 1 } } } // create matrix to store results from robustness checks matrix matResultsII = J(9,26,.) local counter = 1 // now sensitivity analysis with the two different thresholds // sensitivity analysis forvalues cutoffStock = 0.1399(0.01)0.15999 { forvalues cutoffBond = 0.0499(0.01)0.06999 { di "-----------------------------------" di "Stock Return : `cutoffStock'" di "Bond Return : `cutoffBond'" di "Counter: `counter'" di "-----------------------------------" qui { matrix matResultsII[`counter',1] = `counter' // Reasonable estimations criteria 2: Absurd high or low returns for stocks realtive to bonds drop absurdHighDummy generate absurdHighDummy = 0 if estimationbonds != . & estimationstocks != . replace absurdHighDummy = 1 if implicitStockReturn > 0.2999 & estimationbonds != . & estimationstocks != . & treatment == 1 replace absurdHighDummy = 1 if implicitBondReturn > 0.0999 & estimationbonds != . & estimationstocks != . & treatment == 1 replace absurdHighDummy = 1 if implicitStockReturn > `cutoffStock' & estimationbonds != . & estimationstocks != . & treatment == 0 replace absurdHighDummy = 1 if implicitBondReturn > `cutoffBond' & estimationbonds != . & estimationstocks != . & treatment == 0 sort treatment sum absurdHighDummy if treatment == 0 matrix matResultsII[`counter',2] = r(mean) sum absurdHighDummy if treatment == 1 matrix matResultsII[`counter',3] = r(mean) // // Build dummy for reasonable estimate (H3) // drop goodEstimateDummy generate goodEstimateDummy = 0 if estimationbonds != . & estimationstocks != . replace goodEstimateDummy = 1 if absurdHighDummy == 0 & stocksPerformBetterDummy == 1 sort treatment sum goodEstimateDummy if treatment == 0 matrix matResultsII[`counter',4] = r(mean) sum goodEstimateDummy if treatment == 1 matrix matResultsII[`counter',5] = r(mean) ranksum goodEstimateDummy, by(treatment) matrix matResultsII[`counter',6] = (1 - normal(abs(r(z)))) * 2 ttest goodEstimateDummy, by(treatment) matrix matResultsII[`counter',7] = r(p_u) // // Difference for people with reasonable estimate in the aggregated condition? (H4) // sum allocation if goodEstimateDummy == 0 & treatment == 0 matrix matResultsII[`counter',8] = r(mean) sum allocation if goodEstimateDummy == 0 & treatment == 1 matrix matResultsII[`counter',9] = r(mean) sum allocation if goodEstimateDummy == 1 & treatment == 0 matrix matResultsII[`counter',10] = r(mean) sum allocation if goodEstimateDummy == 1 & treatment == 1 matrix matResultsII[`counter',11] = r(mean) ranksum allocation if goodEstimateDummy == 1, by(treatment) ttest allocation if goodEstimateDummy, by(treatment) // // Significant difference for unsophisticated persons in both conditions? (H5) // ranksum allocation if treatment == 1, by(goodEstimateDummy) ttest allocation if treatment == 1, by(goodEstimateDummy) ranksum allocation if treatment == 0, by(goodEstimateDummy) ttest allocation if treatment == 0, by(goodEstimateDummy) // // Mediation analysis // regress allocation treatment if threequestions == 1 & allocation != . & goodEstimateDummy != . matrix coeff = e(b) local tau = coeff[1,1] matrix matResultsII[`counter',14] = `tau' regress goodEstimateDummy treatment if allocation != . & goodEstimateDummy != . matrix coeff = e(b) matrix varco = e(V) local alphaC = coeff[1,1] local alpha_var = _se[treatment]^2 matrix matResultsII[`counter',12] = `alphaC' regress allocation goodEstimateDummy treatment if allocation != . & goodEstimateDummy != . matrix coeff = e(b) matrix varco = e(V) local betaC = coeff[1,1] local beta_var = _se[goodEstimateDummy]^2 local tauPrime = coeff[1,2] matrix matResultsII[`counter',13] = `betaC' matrix matResultsII[`counter',15] = `tauPrime' matrix matResultsII[`counter',16] = `alphaC' * `betaC' matrix matResultsII[`counter',17] = sqrt((`alphaC')^2 * `beta_var' + (`betaC')^2 * `alpha_var') local z = `alphaC' * `betaC' / sqrt((`alphaC')^2 * `beta_var' + (`betaC')^2 * `alpha_var') matrix matResultsII[`counter',18] = `z' matrix matResultsII[`counter',19] = (1 - normal(abs(`z'))) * 2 // bootstrap p-values bootstrap r(ind_eff) r(dir_eff) if allocation != . & goodEstimateDummy != ., reps(5000) level(95): sgmediation allocation, iv(treatment) mv(goodEstimateDummy) matrix coeff = e(b) matrix se = e(se) matrix matResultsII[`counter',20] = (1 - normal(abs(coeff[1,1]/se[1,1]))) * 2 // bootstrapped confidence interval estat bootstrap, percentile matrix confidenceInt = e(ci_percentile) matrix matResultsII[`counter',21] = confidenceInt[1,1] matrix matResultsII[`counter',22] = confidenceInt[2,1] // Robustnesscheck: Use binary_mediation, see http://www.ats.ucla.edu/stat/stata/faq/binary_mediation.htm binary_mediation if allocation != . & goodEstimateDummy != ., dv(allocation) mv(goodEstimateDummy) iv(treatment) diagram bootstrap r(indir_1) r(tot_ind) r(dir_eff) r(tot_eff) if allocation != . & goodEstimateDummy != ., reps(5000): binary_mediation, dv(allocation) mv(goodEstimateDummy) iv(treatment) diagram estat bootstrap, percentile matrix confidenceInt = e(ci_percentile) matrix matResultsII[`counter',25] = confidenceInt[1,1] matrix matResultsII[`counter',26] = confidenceInt[2,1] local counter = `counter' + 1 } } } matrix list matResults xml_tab matResults, save("sensitivityAnalysis.xml") replace matrix list matResultsII xml_tab matResultsII, save("sensitivityAnalysisII.xml") replace log close