> (city_mean <- as.vector(tapply(cl.score,city,mean))) [1] 19.75 14.25 11.00 > (inst1_mean <- as.vector(tapply(cl.score,inst1,mean))) [1] 27.0 12.5 8.5 20.0 18.5 3.5 > (all_mean <- mean(cl.score)) [1] 15 > > stripchart(cl.score ~ city,pch=c(1,2)[inst]) > > training.mod1 <- aov(cl.score ~ city + city/inst1) > summary(training.mod1) Df Sum Sq Mean Sq F value Pr(>F) city 2 156.5 78.25 11.179 0.009473 ** city:inst1 3 567.5 189.17 27.024 0.000697 *** Residuals 6 42.0 7.00 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > anova(training.mod1) Analysis of Variance Table Response: cl.score Df Sum Sq Mean Sq F value Pr(>F) city 2 156.5 78.25 11.179 0.009473 ** city:inst1 3 567.5 189.17 27.024 0.000697 *** Residuals 6 42.0 7.00 --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 > > (sse <- deviance(training.mod1)) [1] 42 > (dfe <- df.residual(training.mod1)) [1] 6 > (mse <- sse/dfe) [1] 7 > > > e <- residuals(training.mod1) > > qqnorm(e); qqline(e) > > stripchart(cl.score ~ e) > > TukeyHSD(training.mod1,"city") Tukey multiple comparisons of means 95% family-wise confidence level Fit: aov(formula = cl.score ~ city + city/inst1) $city diff lwr upr p adj 2-1 -5.50 -11.240216 0.2402158 0.0586129 3-1 -8.75 -14.490216 -3.0097842 0.0081224 3-2 -3.25 -8.990216 2.4902158 0.2676596 > > ### Bonferroni 90% CI's comparing each pair witin each school - 3 Comparisons > > Bon_MSD <- qt(1-(.10/(2*3)),dfe)*sqrt(2*mse/2) #### Each instructor is observed n=2 times > > ## Instuctor 1 vs Instructor 2 in City 1 > ((inst1_mean[1] - inst1_mean[2]) + c(-Bon_MSD,Bon_MSD)) [1] 7.226679 21.773321 > > ## Instuctor 1 vs Instructor 2 in City 2 > ((inst1_mean[3] - inst1_mean[4]) + c(-Bon_MSD,Bon_MSD)) [1] -18.773321 -4.226679 > > ## Instuctor 1 vs Instructor 2 in City 3 > ((inst1_mean[5] - inst1_mean[6]) + c(-Bon_MSD,Bon_MSD)) [1] 7.726679 22.273321