-
Notifications
You must be signed in to change notification settings - Fork 18
/
01 moderation.R
201 lines (110 loc) · 5.23 KB
/
01 moderation.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
library(dplyr)
library(datawizard)
library(parameters)
library(performance) # for `compare_performance()`
library(bayestestR) # for `bayesfactor_models()`
library(emmeans) # for simple slope analysis
library(ggeffects) # for model plotting
parental_iris <- read.csv("parental_iris.csv")
glimpse(parental_iris)
head(parental_iris)
# Last time we saw that R doesn't need you to make dummy variables - if can deal
# with factors just fine. Today we will see that this is also true for
# moderation and interactions!
# (I will not be showing how to do these the long way...)
# Categorical moderator ---------------------------------------------------
# Let's prep the data for modeling:
parental_iris <- parental_iris |>
mutate(
attachment = relevel(factor(attachment), ref = "secure"),
involvement_c = center(parental_involvement)
)
# Here we don't want to get z scores (or maybe we do?) just to center the
# variable around 0.
## 1. Fit the model(s) ----------------------------------------------------
m_additive <- lm(child_satisfaction ~ involvement_c + attachment,
data = parental_iris)
# all you need is to tell R to add an interaction!
m_moderation <- lm(child_satisfaction ~ involvement_c + attachment + involvement_c:attachment,
data = parental_iris)
# or use `*` -- A * B = A + B + A:B
m_moderation <- lm(child_satisfaction ~ involvement_c * attachment,
data = parental_iris)
# Does the model with an interaction have a better fit?
compare_performance(m_additive, m_moderation)
anova(m_additive, m_moderation)
bayesfactor_models(m_moderation, denominator = m_additive)
# Look at the parameters. What do they mean?
model_parameters(m_moderation)
## 2. Explore the model --------------------------------------------------
# simple slope analysis!
# We don't (just) care about the model's parameters - what we usually care about
# are the different estimates the model can give us - the simple (conditional)
# slopes.
emtrends(m_moderation, ~ attachment, var = "involvement_c",
infer = TRUE)
# we can also use contrasts here to COMPARE slopes:
emtrends(m_moderation, ~ attachment, var = "involvement_c") |>
contrast(method = "pairwise",
infer = TRUE)
## Plot
ggemmeans(m_moderation, c("involvement_c","attachment")) |>
plot(add.data = TRUE, jitter = 0)
# Continuous moderator ----------------------------------------------------
## Prep the data
parental_iris <- parental_iris |>
mutate(
strictness_c = center(parental_strictness)
)
## 1. Fit the model(s) ----------------------------------------------------
m_additive <- lm(child_satisfaction ~ involvement_c + strictness_c,
data = parental_iris)
m_moderation <- lm(child_satisfaction ~ involvement_c * strictness_c,
data = parental_iris)
# compare models:
compare_performance(m_additive, m_moderation)
anova(m_additive, m_moderation)
bayesfactor_models(m_additive, m_moderation)
# Look at the parameters. What do they mean?
model_parameters(m_moderation)
## 2. Explore the model --------------------------------------------------
# simple slope analysis!
emtrends(m_moderation, ~ strictness_c, var = "involvement_c")
# Unfortunately, emmeans/emmtrends reduce covariables (numerical predictors) to
# their mean! If we want to probe the moderation at other multiple values, we
# have two ways to do so:
### A. Probe with a function --------
emtrends(m_moderation, ~strictness_c, "involvement_c",
# Reduce `strictness_c` to its mean+-sd:
cov.reduce = list(strictness_c = mean_sd),
infer = TRUE)
ggemmeans(m_moderation, c("involvement_c","strictness_c [meansd]")) |>
plot(add.data = TRUE, jitter = 0)
### B. Pick-a-point --------
# We can also ask for specific values at which to probe:
emtrends(m_moderation, ~strictness_c, "involvement_c",
# Probe when `strictness_c` is -4, 78
at = list(strictness_c = c(-4, 78)),
infer = TRUE)
ggemmeans(m_moderation, c("involvement_c","strictness_c [-4, 78]")) |>
plot(add.data = TRUE, jitter = 0)
# Once again, note that in R, model fitting and hypothesis testing are not as
# closely knit as they are in SPSS. For simple slope analysis, in SPSS, we would
# re-fit new models for each simple slope, in R we instead (1) fit a model, (2)
# test the simple slope of interest.
# Categorical by Categorical ----------------------------------------------
# What about a categorical variable moderating the effect of another categorical
# variable? We can do that just the same! We will see more of this in a few
# weeks, when we talk about ANOVAs...
# Exercise ----------------------------------------------------------------
# 1. The `ggeffects::johnson_neyman()` function can be used to plot a
# Johnson-Neyman plot. What does it do? (Hint, read the axis titles.)
jn <- ggemmeans(m_moderation, c("involvement_c","strictness_c")) |>
johnson_neyman()
jn
plot(jn)
# 2. Fit the same moderation model with the original *non-centered* predictors.
# How does it differ from the moderation model with the centered predictors?
# A. How do the parameters differ? Interpret them.
# B. How does the model fit (R^2) differ?
# C. How do the simple slopes analysis differ?