prop.test(x, n, p)
binom.test(x, n, p)
p = 1/num. of level
table(c(x1, n1), c(x2, n2)) prop.test(table)
z.test zsum.test
t.test tsum.test
wilcox.test(data = ?, y(NUM)~x(CAT/Factor)) wilcox_test(data = ?, y(NUM)~x(CAT/Factor))
oneway_test(y(NUM)~x(CAT/Factor))
shapiro.test lillie.test
chisq.test(contingency_table or c(x1, x2), p) <0.05 - reject H0 'no relationship' hypothesis because each trial in binomial distribution is independent, in order to use binom.test we need to make sure they're independent fisher.test(xtabs(~X1 + X2))
p.adjust(c(df1$p.value, df2$p.value), method='holm')
var.test()
model <- lm(data = ?, y ~ x) anova(model) summary(model) anova <- aov(data = ?, y ~ x) TukeyHSD(anova) plot(TukeyHSD(anova)) confint(anova)
lillie.test(residuals(model)) // < 0.1 = Not Normal shapiro.test(residuals(model)) // < 0.1 = Not Normal fligner.test(data = ?, y ~ x) // >0.05 = Equal Var leveneTest(data = ?, y ~ x) // >0.05 = Equal Var
kruskal.test(data = ?, y ~ x) kruskalmc(data = ?, y ~ x) oneway_test(y ~ x) // since it also a non-parametric method
densityplot(), qqnorm(), qqline() str(), subset(data = dataframe, subset = conditions) xtabs(data = dataframe, ~X) produce contingency table table(dataframe$X) also produce contingency table runif(1000) gen uniform dist rnorm(1000) *gen normal dist