Last updated: 2022-01-20
Checks: 4 2
Knit directory: Padgett-Dissertation/
This reproducible R Markdown analysis was created with workflowr (version 1.6.2). The Checks tab describes the reproducibility checks that were applied when the results were created. The Past versions tab lists the development history.
Great job! The global environment was empty. Objects defined in the global environment can affect the analysis in your R Markdown file in unknown ways. For reproduciblity itโs best to always run the code in an empty environment.
The command set.seed(20210401)
was run prior to running the code in the R Markdown file. Setting a seed ensures that any results that rely on randomness, e.g. subsampling or permutations, are reproducible.
Great job! Recording the operating system, R version, and package versions is critical for reproducibility.
To ensure reproducibility of the results, delete the cache directory study4_model2_results_cache
and re-run the analysis. To have workflowr automatically delete the cache directory prior to building the file, set delete_cache = TRUE
when running wflow_build()
or wflow_publish()
.
Great job! Using relative paths to the files within your workflowr project makes it easier to run your code on other machines.
Tracking code development and connecting the code version to the results is critical for reproducibility. To start using Git, open the Terminal and type git init
in your project directory.
This project is not being versioned with Git. To obtain the full reproducibility benefits of using workflowr, please see ?wflow_start
.
# Load packages & utility functions
source("code/load_packages.R")
source("code/load_utility_functions.R")
# environment options
options(scipen = 999, digits=3)
# Load diffIRT package with data
library(diffIRT)
data("extraversion")
mydata <- na.omit(extraversion)
# separate data then recombine
d1 <- mydata %>%
as.data.frame() %>%
select(contains("X"))%>%
mutate(id = 1:n()) %>%
pivot_longer(
cols=contains("X"),
names_to = c("item"),
values_to = "Response"
) %>%
mutate(
item = ifelse(nchar(item)==4,substr(item, 3,3),substr(item, 3,4))
)
d2 <- mydata %>%
as.data.frame() %>%
select(contains("T"))%>%
mutate(id = 1:n()) %>%
pivot_longer(
cols=starts_with("T"),
names_to = c("item"),
values_to = "Time"
) %>%
mutate(
item = ifelse(nchar(item)==4,substr(item, 3,3),substr(item, 3,4))
)
dat <- left_join(d1, d2)
Joining, by = c("id", "item")
dat_sum <- dat %>%
select(item, Response, Time) %>%
group_by(item) %>%
summarize(
M1 = mean(Response, na.rm=T),
Mt = mean(Time, na.rm=T),
SDt = sd(Time, na.rm=T),
Mlogt = mean(log(Time), na.rm=T),
)
colnames(dat_sum) <-
c(
"Item",
"Proportion Endorsed",
"Mean Response Time",
"SD Response Time",
"Mean Log Response Time"
)
kable(dat_sum, format = "html", digits = 3) %>%
kable_styling(full_width = T)
Item | Proportion Endorsed | Mean Response Time | SD Response Time | Mean Log Response Time |
---|---|---|---|---|
1 | 0.739 | 1.488 | 0.805 | 0.288 |
10 | 0.866 | 0.979 | 0.520 | -0.115 |
2 | 0.535 | 1.354 | 0.648 | 0.208 |
3 | 0.852 | 1.115 | 0.632 | 0.002 |
4 | 0.923 | 1.001 | 0.664 | -0.114 |
5 | 0.542 | 1.301 | 0.706 | 0.163 |
6 | 0.901 | 1.255 | 0.682 | 0.119 |
7 | 0.944 | 1.143 | 0.546 | 0.054 |
8 | 0.965 | 1.067 | 0.575 | -0.030 |
9 | 0.824 | 1.728 | 0.745 | 0.463 |
# covariance among items
kable(cov(mydata[,colnames(mydata) %like% "X"]), digits = 3) %>%
kable_styling(full_width = T)
X[1] | X[2] | X[3] | X[4] | X[5] | X[6] | X[7] | X[8] | X[9] | X[10] | |
---|---|---|---|---|---|---|---|---|---|---|
X[1] | 0.194 | -0.001 | 0.039 | 0.029 | 0.000 | 0.002 | 0.014 | 0.005 | 0.011 | 0.015 |
X[2] | -0.001 | 0.251 | 0.023 | 0.006 | 0.077 | 0.011 | 0.002 | 0.012 | 0.031 | 0.030 |
X[3] | 0.039 | 0.023 | 0.127 | 0.038 | 0.024 | 0.028 | 0.020 | 0.016 | 0.016 | 0.051 |
X[4] | 0.029 | 0.006 | 0.038 | 0.072 | 0.014 | 0.006 | 0.017 | 0.019 | 0.029 | 0.025 |
X[5] | 0.000 | 0.077 | 0.024 | 0.014 | 0.250 | 0.004 | 0.017 | 0.005 | 0.032 | 0.031 |
X[6] | 0.002 | 0.011 | 0.028 | 0.006 | 0.004 | 0.090 | 0.009 | 0.011 | 0.004 | 0.015 |
X[7] | 0.014 | 0.002 | 0.020 | 0.017 | 0.017 | 0.009 | 0.054 | 0.019 | 0.004 | 0.007 |
X[8] | 0.005 | 0.012 | 0.016 | 0.019 | 0.005 | 0.011 | 0.019 | 0.034 | 0.008 | 0.009 |
X[9] | 0.011 | 0.031 | 0.016 | 0.029 | 0.032 | 0.004 | 0.004 | 0.008 | 0.146 | 0.033 |
X[10] | 0.015 | 0.030 | 0.051 | 0.025 | 0.031 | 0.015 | 0.007 | 0.009 | 0.033 | 0.117 |
# correlation matrix
psych::polychoric(mydata[,colnames(mydata) %like% "X"])
Warning in cor.smooth(mat): Matrix was not positive definite, smoothing was done
Call: psych::polychoric(x = mydata[, colnames(mydata) %like% "X"])
Polychoric correlations
X[1] X[2] X[3] X[4] X[5] X[6] X[7] X[8] X[9] X[10]
X[1] 1.00
X[2] -0.01 1.00
X[3] 0.45 0.24 1.00
X[4] 0.50 0.11 0.70 1.00
X[5] 0.00 0.46 0.26 0.23 1.00
X[6] 0.04 0.15 0.50 0.21 0.06 1.00
X[7] 0.32 0.05 0.52 0.58 0.36 0.32 1.00
X[8] 0.18 0.38 0.57 0.71 0.17 0.48 0.78 1.00
X[9] 0.12 0.29 0.24 0.55 0.31 0.08 0.13 0.31 1.00
X[10] 0.19 0.34 0.69 0.54 0.35 0.32 0.22 0.39 0.47 1.00
with tau of
1
X[1] -0.642
X[2] -0.088
X[3] -1.046
X[4] -1.422
X[5] -0.106
X[6] -1.290
X[7] -1.586
X[8] -1.809
X[9] -0.930
X[10] -1.109
cat(read_file(paste0(w.d, "/code/study_4/model_2.txt")))
model {
### Model
for(p in 1:N){
for(i in 1:nit){
# data model
y[p,i] ~ dbern(pi[p,i,2])
# LRV
ystar[p,i] ~ dnorm(lambda[i]*eta[p], 1)
# Pr(nu = 2)
pi[p,i,2] = phi(ystar[p,i] - tau[i,1])
# Pr(nu = 1)
pi[p,i,1] = 1 - phi(ystar[p,i] - tau[i,1])
# log-RT model
dev[p,i]<-lambda[i]*(eta[p] - tau[i,1])
mu.lrt[p,i] <- icept[i] - speed[p] - rho * abs(dev[p,i])
lrt[p,i] ~ dnorm(mu.lrt[p,i], prec[i])
}
}
### Priors
# person parameters
for(p in 1:N){
eta[p] ~ dnorm(0, 1) # latent ability
speed[p]~dnorm(sigma.ts*eta[p],prec.s) # latent speed
}
sigma.ts ~ dnorm(0, 0.1)
prec.s ~ dgamma(.1,.1)
# transformations
sigma.t = pow(prec.s, -1) + pow(sigma.ts, 2) # speed variance
cor.ts = sigma.ts/(pow(sigma.t,0.5)) # LV correlation
for(i in 1:nit){
# lrt parameters
icept[i]~dnorm(0,.1)
prec[i]~dgamma(.1,.1)
# Thresholds
tau[i, 1] ~ dnorm(0.0,0.1)
# loadings
lambda[i] ~ dnorm(0, .44)T(0,)
# LRV total variance
# total variance = residual variance + fact. Var.
theta[i] = 1 + pow(lambda[i],2)
# standardized loading
lambda.std[i] = lambda[i]/pow(theta[i],0.5)
}
rho~dnorm(0,.1)I(0,)
# compute omega
lambda_sum[1] = lambda[1]
for(i in 2:nit){
#lambda_sum (sum factor loadings)
lambda_sum[i] = lambda_sum[i-1]+lambda[i]
}
reli.omega = (pow(lambda_sum[nit],2))/(pow(lambda_sum[nit],2)+nit)
}
This model is similar to the BL-IRT model for jointly modeling item responses and response times (Molenaar et al., 2015).
# Save parameters
jags.params <- c("tau",
"lambda","lambda.std",
"theta",
"icept",
"prec",
"prec.s",
"sigma.ts",
"rho",
"reli.omega")
# initial-values
jags.inits <- function(){
list(
"tau"=matrix(c(-0.64, -0.09, -1.05, -1.42, -0.11, -1.29, -1.59, -1.81, -0.93, -1.11), ncol=1, nrow=10),
"lambda"=rep(0.7,10),
"eta"=rnorm(142),
"speed"=rnorm(142),
"ystar"=matrix(c(0.7*rep(rnorm(142),10)), ncol=10),
"rho"=0.1,
"icept"=rep(0, 10),
"prec.s"=10,
"prec"=rep(4, 10),
"sigma.ts"=0.1
)
}
jags.data <- list(
y = mydata[,1:10],
lrt = mydata[,11:20],
N = nrow(mydata),
nit = 10
)
# Run model
# Model 2
model.fit <- R2jags::jags(
model = paste0(w.d, "/code/study_4/model_2.txt"),
parameters.to.save = jags.params,
inits = jags.inits,
data = jags.data,
n.chains = 4,
n.burnin = 5000,
n.iter = 10000
)
module glm loaded
Compiling model graph
Resolving undeclared variables
Allocating nodes
Graph information:
Observed stochastic nodes: 2840
Unobserved stochastic nodes: 1747
Total graph size: 18995
Initializing model
print(model.fit, width=1000)
Inference for Bugs model at "C:/Users/noahp/Documents/GitHub/Padgett-Dissertation/code/study_4/model_2.txt", fit using jags,
4 chains, each with 10000 iterations (first 5000 discarded), n.thin = 5
n.sims = 4000 iterations saved
mu.vect sd.vect 2.5% 25% 50% 75% 97.5% Rhat n.eff
icept[1] 1.555 0.080 1.402 1.501 1.553 1.606 1.721 1.00 520
icept[2] 1.403 0.057 1.290 1.365 1.403 1.441 1.515 1.00 620
icept[3] 1.539 0.160 1.270 1.427 1.524 1.637 1.897 1.01 360
icept[4] 1.641 0.312 1.144 1.413 1.602 1.831 2.332 1.03 99
icept[5] 1.357 0.064 1.235 1.312 1.355 1.398 1.483 1.01 340
icept[6] 1.428 0.105 1.256 1.355 1.416 1.490 1.673 1.01 330
icept[7] 1.506 0.175 1.221 1.380 1.489 1.610 1.902 1.01 1200
icept[8] 1.571 0.242 1.196 1.394 1.534 1.718 2.120 1.02 160
icept[9] 1.864 0.089 1.708 1.805 1.860 1.920 2.051 1.00 830
icept[10] 1.452 0.166 1.161 1.335 1.446 1.556 1.807 1.02 130
lambda[1] 0.517 0.228 0.104 0.358 0.502 0.662 1.006 1.00 1100
lambda[2] 0.568 0.240 0.139 0.398 0.556 0.728 1.064 1.00 2200
lambda[3] 1.706 0.406 1.022 1.423 1.665 1.936 2.670 1.01 230
lambda[4] 1.739 0.485 0.876 1.415 1.703 2.023 2.855 1.02 150
lambda[5] 0.619 0.245 0.185 0.449 0.603 0.770 1.144 1.02 350
lambda[6] 0.742 0.287 0.233 0.549 0.722 0.916 1.353 1.00 3300
lambda[7] 1.120 0.335 0.507 0.895 1.099 1.328 1.845 1.01 230
lambda[8] 1.275 0.379 0.610 1.008 1.234 1.519 2.068 1.01 390
lambda[9] 0.779 0.255 0.294 0.611 0.771 0.948 1.313 1.01 540
lambda[10] 1.734 0.357 1.097 1.477 1.711 1.967 2.491 1.01 520
lambda.std[1] 0.439 0.155 0.103 0.337 0.449 0.552 0.709 1.00 1200
lambda.std[2] 0.471 0.155 0.137 0.370 0.486 0.588 0.729 1.00 2700
lambda.std[3] 0.849 0.057 0.715 0.818 0.857 0.888 0.936 1.01 270
lambda.std[4] 0.848 0.071 0.659 0.817 0.862 0.896 0.944 1.01 230
lambda.std[5] 0.503 0.148 0.182 0.410 0.516 0.610 0.753 1.02 440
lambda.std[6] 0.568 0.149 0.227 0.481 0.585 0.676 0.804 1.00 2800
lambda.std[7] 0.721 0.110 0.452 0.667 0.740 0.799 0.879 1.01 450
lambda.std[8] 0.762 0.098 0.521 0.710 0.777 0.835 0.900 1.02 330
lambda.std[9] 0.592 0.130 0.282 0.521 0.610 0.688 0.795 1.01 690
lambda.std[10] 0.856 0.049 0.739 0.828 0.863 0.891 0.928 1.01 650
prec[1] 1.772 0.219 1.385 1.616 1.764 1.912 2.226 1.00 4000
prec[2] 3.690 0.493 2.805 3.344 3.665 4.005 4.714 1.00 1000
prec[3] 4.164 0.555 3.169 3.776 4.135 4.519 5.311 1.00 2200
prec[4] 2.544 0.326 1.962 2.311 2.525 2.761 3.214 1.00 1700
prec[5] 2.858 0.373 2.185 2.601 2.842 3.100 3.642 1.00 3200
prec[6] 3.033 0.390 2.328 2.761 3.017 3.284 3.840 1.00 2500
prec[7] 5.000 0.681 3.766 4.522 4.969 5.434 6.426 1.00 3800
prec[8] 3.914 0.514 2.972 3.553 3.888 4.245 5.015 1.00 630
prec[9] 2.613 0.337 2.004 2.372 2.593 2.838 3.293 1.00 4000
prec[10] 6.814 1.014 5.037 6.098 6.748 7.450 9.010 1.00 4000
prec.s 9.691 1.605 6.931 8.580 9.560 10.654 13.271 1.00 760
reli.omega 0.918 0.018 0.877 0.908 0.920 0.931 0.949 1.01 200
rho 0.112 0.033 0.056 0.087 0.110 0.133 0.185 1.04 75
sigma.ts 0.003 0.056 -0.110 -0.034 0.004 0.042 0.111 1.02 120
tau[1,1] -0.948 0.174 -1.298 -1.064 -0.944 -0.827 -0.620 1.00 3700
tau[2,1] -0.095 0.156 -0.410 -0.198 -0.094 0.007 0.216 1.00 3200
tau[3,1] -2.238 0.399 -3.145 -2.463 -2.199 -1.962 -1.559 1.01 340
tau[4,1] -3.179 0.593 -4.580 -3.519 -3.115 -2.767 -2.185 1.03 160
tau[5,1] -0.185 0.162 -0.500 -0.295 -0.184 -0.075 0.126 1.00 1500
tau[6,1] -2.071 0.277 -2.642 -2.244 -2.055 -1.887 -1.587 1.00 3600
tau[7,1] -2.845 0.412 -3.771 -3.106 -2.812 -2.554 -2.129 1.02 150
tau[8,1] -3.484 0.559 -4.749 -3.815 -3.426 -3.088 -2.569 1.01 540
tau[9,1] -1.463 0.218 -1.913 -1.604 -1.452 -1.312 -1.063 1.00 660
tau[10,1] -2.448 0.372 -3.246 -2.689 -2.422 -2.178 -1.796 1.00 1100
theta[1] 1.320 0.267 1.011 1.128 1.252 1.439 2.012 1.00 1000
theta[2] 1.381 0.302 1.019 1.159 1.309 1.529 2.131 1.00 1100
theta[3] 4.075 1.505 2.044 3.025 3.773 4.747 8.128 1.01 230
theta[4] 4.260 1.862 1.768 3.001 3.899 5.093 9.149 1.03 130
theta[5] 1.443 0.340 1.034 1.202 1.363 1.593 2.308 1.02 210
theta[6] 1.633 0.488 1.054 1.301 1.521 1.840 2.832 1.00 4000
theta[7] 2.365 0.799 1.257 1.801 2.207 2.764 4.404 1.02 150
theta[8] 2.768 1.044 1.372 2.016 2.522 3.306 5.278 1.01 400
theta[9] 1.672 0.423 1.086 1.373 1.594 1.898 2.723 1.01 380
theta[10] 4.134 1.295 2.203 3.183 3.928 4.868 7.204 1.01 480
deviance 2998.634 35.415 2931.106 2974.014 2998.348 3022.400 3069.817 1.01 470
For each parameter, n.eff is a crude measure of effective sample size,
and Rhat is the potential scale reduction factor (at convergence, Rhat=1).
DIC info (using the rule, pD = var(deviance)/2)
pD = 623.6 and DIC = 3622.2
DIC is an estimate of expected predictive error (lower deviance is better).
jags.mcmc <- as.mcmc(model.fit)
a <- colnames(as.data.frame(jags.mcmc[[1]]))
fit.mcmc <- data.frame(as.matrix(jags.mcmc, chains = T, iters = T))
colnames(fit.mcmc) <- c("chain", "iter", a)
fit.mcmc.ggs <- ggmcmc::ggs(jags.mcmc) # for GRB plot
# save posterior draws for later
write.csv(x=fit.mcmc, file=paste0(getwd(),"/data/study_4/posterior_draws_m2.csv"))
# tau
bayesplot::mcmc_areas(fit.mcmc, regex_pars = "tau", prob = 0.8); ggsave("fig/study4_model2_tau_dens.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_acf(fit.mcmc, regex_pars = "tau"); ggsave("fig/study4_model2_tau_acf.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_trace(fit.mcmc, regex_pars = "tau"); ggsave("fig/study4_model2_tau_trace.pdf")
Saving 7 x 5 in image
ggmcmc::ggs_grb(fit.mcmc.ggs, family = "tau"); ggsave("fig/study4_model2_tau_grb.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_areas(fit.mcmc, regex_pars = "lambda", prob = 0.8)
bayesplot::mcmc_acf(fit.mcmc, regex_pars = "lambda")
bayesplot::mcmc_trace(fit.mcmc, regex_pars = "lambda")
ggmcmc::ggs_grb(fit.mcmc.ggs, family = "lambda")
bayesplot::mcmc_areas(fit.mcmc, regex_pars = "lambda.std", prob = 0.8); ggsave("fig/study4_model2_lambda_dens.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_acf(fit.mcmc, regex_pars = "lambda.std"); ggsave("fig/study4_model2_lambda_acf.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_trace(fit.mcmc, regex_pars = "lambda.std"); ggsave("fig/study4_model2_lambda_trace.pdf")
Saving 7 x 5 in image
ggmcmc::ggs_grb(fit.mcmc.ggs, family = "lambda.std"); ggsave("fig/study4_model2_lambda_grb.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_areas(fit.mcmc, regex_pars = "theta", prob = 0.8); ggsave("fig/study4_model2_theta_dens.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_acf(fit.mcmc, regex_pars = "theta"); ggsave("fig/study4_model2_theta_acf.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_trace(fit.mcmc, regex_pars = "theta"); ggsave("fig/study4_model2_theta_trace.pdf")
Saving 7 x 5 in image
ggmcmc::ggs_grb(fit.mcmc.ggs, family = "theta"); ggsave("fig/study4_model2_theta_grb.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_areas(fit.mcmc, regex_pars = "icept", prob = 0.8); ggsave("fig/study4_model2_icept_dens.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_acf(fit.mcmc, regex_pars = "icept"); ggsave("fig/study4_model2_icept_acf.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_trace(fit.mcmc, regex_pars = "icept"); ggsave("fig/study4_model2_icept_trace.pdf")
Saving 7 x 5 in image
ggmcmc::ggs_grb(fit.mcmc.ggs, family = "icept"); ggsave("fig/study4_model2_icept_grb.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_areas(fit.mcmc,pars = paste0("prec[",1:5,"]"), prob = 0.8); ggsave("fig/study4_model2_prec_dens.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_acf(fit.mcmc, pars = paste0("prec[",1:5,"]")); ggsave("fig/study4_model2_prec_acf.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_trace(fit.mcmc, pars = paste0("prec[",1:5,"]")); ggsave("fig/study4_model2_prec_trace.pdf")
Saving 7 x 5 in image
ggmcmc::ggs_grb(fit.mcmc.ggs, family = "prec"); ggsave("fig/study4_model2_prec_grb.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_areas(fit.mcmc, regex_pars = "prec.s", prob = 0.8); ggsave("fig/study4_model2_precs_dens.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_acf(fit.mcmc, regex_pars = "prec.s"); ggsave("fig/study4_model2_precs_acf.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_trace(fit.mcmc, regex_pars = "prec.s"); ggsave("fig/study4_model2_precs_trace.pdf")
Saving 7 x 5 in image
ggmcmc::ggs_grb(fit.mcmc.ggs, family = "prec.s"); ggsave("fig/study4_model2_precs_grb.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_areas(fit.mcmc, regex_pars = "sigma.ts", prob = 0.8); ggsave("fig/study4_model2_sigmats_dens.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_acf(fit.mcmc, regex_pars = "sigma.ts"); ggsave("fig/study4_model2_sigmats_acf.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_trace(fit.mcmc, regex_pars = "sigma.ts"); ggsave("fig/study4_model2_sigmats_trace.pdf")
Saving 7 x 5 in image
ggmcmc::ggs_grb(fit.mcmc.ggs, family = "sigma.ts"); ggsave("fig/study4_model2_sigmats_grb.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_areas(fit.mcmc, regex_pars = "rho", prob = 0.8); ggsave("fig/study4_model2_rho_dens.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_acf(fit.mcmc, regex_pars = "rho"); ggsave("fig/study4_model2_rho_acf.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_trace(fit.mcmc, regex_pars = "rho"); ggsave("fig/study4_model2_rho_trace.pdf")
Saving 7 x 5 in image
ggmcmc::ggs_grb(fit.mcmc.ggs, family = "rho"); ggsave("fig/study4_model2_rho_grb.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_areas(fit.mcmc, regex_pars = "reli.omega", prob = 0.8); ggsave("fig/study4_model2_omega_dens.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_acf(fit.mcmc, regex_pars = "reli.omega"); ggsave("fig/study4_model2_omega_acf.pdf")
Saving 7 x 5 in image
bayesplot::mcmc_trace(fit.mcmc, regex_pars = "reli.omega"); ggsave("fig/study4_model2_omega_trace.pdf")
Saving 7 x 5 in image
ggmcmc::ggs_grb(fit.mcmc.ggs, family = "reli.omega"); ggsave("fig/study4_model2_omega_grb.pdf")
Saving 7 x 5 in image
# extract omega posterior for results comparison
extracted_omega <- data.frame(model_2 = fit.mcmc$reli.omega)
write.csv(x=extracted_omega, file=paste0(getwd(),"/data/study_4/extracted_omega_m2.csv"))
# Posterior Predictive Check
Niter <- 200
model.fit$model$recompile()
Compiling model graph
Resolving undeclared variables
Allocating nodes
Graph information:
Observed stochastic nodes: 2840
Unobserved stochastic nodes: 1747
Total graph size: 18995
Initializing model
fit.extra <- rjags::jags.samples(model.fit$model, variable.names = "pi", n.iter = Niter)
NOTE: Stopping adaptation
N <- 142
nit <- 10
nchain <- 4
C <- 2
n <- i <- iter <- ppc.row.i <- 1
y.prob.ppc <- array(dim=c(Niter*nchain, nit, C))
for(chain in 1:nchain){
for(iter in 1:Niter){
# initialize simulated y for this iteration
y <- matrix(nrow=N, ncol=nit)
# loop over item
for(i in 1:nit){
# simulated data for item i for each person
for(n in 1:N){
y[n,i] <- sample(1:C, 1, prob = fit.extra$pi[n, i, 1:C, iter, chain])
}
# computer proportion of each response category
for(c in 1:C){
y.prob.ppc[ppc.row.i,i,c] <- sum(y[,i]==c)/N
}
}
# update row of output
ppc.row.i = ppc.row.i + 1
}
}
yppcmat <- matrix(c(y.prob.ppc), ncol=1)
z <- expand.grid(1:(Niter*nchain), 1:nit, 1:C)
yppcmat <- data.frame( iter = z[,1], nit=z[,2], C=z[,3], yppc = yppcmat)
ymat <- model.fit$model$data()[[4]]
y.prob <- matrix(ncol=C, nrow=nit)
for(i in 1:nit){
for(c in 1:C){
y.prob[i,c] <- sum(ymat[,i]==c-1)/N
}
}
yobsmat <- matrix(c(y.prob), ncol=1)
z <- expand.grid(1:nit, 1:C)
yobsmat <- data.frame(nit=z[,1], C=z[,2], yobs = yobsmat)
plot.ppc <- full_join(yppcmat, yobsmat)
Joining, by = c("nit", "C")
p <- plot.ppc %>%
mutate(C = as.factor(C),
item = nit) %>%
ggplot()+
geom_boxplot(aes(x=C,y=y.prob.ppc), outlier.colour = NA)+
geom_point(aes(x=C,y=yobs), color="red")+
lims(y=c(0, 1))+
labs(y="Posterior Predictive Category Proportion", x="Item Category")+
facet_wrap(.~nit, nrow=1)+
theme_bw()+
theme(
panel.grid = element_blank(),
strip.background = element_rect(fill="white")
)
p
ggsave(filename = "fig/study4_model2_ppc_y.pdf",plot=p,width = 6, height=3,units="in")
ggsave(filename = "fig/study4_model2_ppc_y.png",plot=p,width = 6, height=3,units="in")
ggsave(filename = "fig/study4_model2_ppc_y.eps",plot=p,width = 6, height=3,units="in")
# print to xtable
print(
xtable(
model.fit$BUGSoutput$summary,
caption = c("study4 Model 2 posterior distribution summary")
,align = "lrrrrrrrrr"
),
include.rownames=T,
booktabs=T
)
% latex table generated in R 4.0.5 by xtable 1.8-4 package
% Thu Jan 20 14:05:11 2022
\begin{table}[ht]
\centering
\begin{tabular}{lrrrrrrrrr}
\toprule
& mean & sd & 2.5\% & 25\% & 50\% & 75\% & 97.5\% & Rhat & n.eff \\
\midrule
deviance & 2998.63 & 35.42 & 2931.11 & 2974.01 & 2998.35 & 3022.40 & 3069.82 & 1.01 & 470.00 \\
icept[1] & 1.56 & 0.08 & 1.40 & 1.50 & 1.55 & 1.61 & 1.72 & 1.01 & 520.00 \\
icept[2] & 1.40 & 0.06 & 1.29 & 1.37 & 1.40 & 1.44 & 1.51 & 1.00 & 620.00 \\
icept[3] & 1.54 & 0.16 & 1.27 & 1.43 & 1.52 & 1.64 & 1.90 & 1.01 & 360.00 \\
icept[4] & 1.64 & 0.31 & 1.14 & 1.41 & 1.60 & 1.83 & 2.33 & 1.03 & 99.00 \\
icept[5] & 1.36 & 0.06 & 1.24 & 1.31 & 1.35 & 1.40 & 1.48 & 1.01 & 340.00 \\
icept[6] & 1.43 & 0.10 & 1.26 & 1.35 & 1.42 & 1.49 & 1.67 & 1.01 & 330.00 \\
icept[7] & 1.51 & 0.17 & 1.22 & 1.38 & 1.49 & 1.61 & 1.90 & 1.01 & 1200.00 \\
icept[8] & 1.57 & 0.24 & 1.20 & 1.39 & 1.53 & 1.72 & 2.12 & 1.02 & 160.00 \\
icept[9] & 1.86 & 0.09 & 1.71 & 1.80 & 1.86 & 1.92 & 2.05 & 1.00 & 830.00 \\
icept[10] & 1.45 & 0.17 & 1.16 & 1.33 & 1.45 & 1.56 & 1.81 & 1.02 & 130.00 \\
lambda[1] & 0.52 & 0.23 & 0.10 & 0.36 & 0.50 & 0.66 & 1.01 & 1.00 & 1100.00 \\
lambda[2] & 0.57 & 0.24 & 0.14 & 0.40 & 0.56 & 0.73 & 1.06 & 1.00 & 2200.00 \\
lambda[3] & 1.71 & 0.41 & 1.02 & 1.42 & 1.67 & 1.94 & 2.67 & 1.01 & 230.00 \\
lambda[4] & 1.74 & 0.49 & 0.88 & 1.41 & 1.70 & 2.02 & 2.85 & 1.02 & 150.00 \\
lambda[5] & 0.62 & 0.25 & 0.19 & 0.45 & 0.60 & 0.77 & 1.14 & 1.02 & 350.00 \\
lambda[6] & 0.74 & 0.29 & 0.23 & 0.55 & 0.72 & 0.92 & 1.35 & 1.00 & 3300.00 \\
lambda[7] & 1.12 & 0.33 & 0.51 & 0.90 & 1.10 & 1.33 & 1.85 & 1.01 & 230.00 \\
lambda[8] & 1.27 & 0.38 & 0.61 & 1.01 & 1.23 & 1.52 & 2.07 & 1.02 & 390.00 \\
lambda[9] & 0.78 & 0.26 & 0.29 & 0.61 & 0.77 & 0.95 & 1.31 & 1.01 & 540.00 \\
lambda[10] & 1.73 & 0.36 & 1.10 & 1.48 & 1.71 & 1.97 & 2.49 & 1.01 & 520.00 \\
lambda.std[1] & 0.44 & 0.15 & 0.10 & 0.34 & 0.45 & 0.55 & 0.71 & 1.00 & 1200.00 \\
lambda.std[2] & 0.47 & 0.15 & 0.14 & 0.37 & 0.49 & 0.59 & 0.73 & 1.00 & 2700.00 \\
lambda.std[3] & 0.85 & 0.06 & 0.71 & 0.82 & 0.86 & 0.89 & 0.94 & 1.01 & 270.00 \\
lambda.std[4] & 0.85 & 0.07 & 0.66 & 0.82 & 0.86 & 0.90 & 0.94 & 1.01 & 230.00 \\
lambda.std[5] & 0.50 & 0.15 & 0.18 & 0.41 & 0.52 & 0.61 & 0.75 & 1.02 & 440.00 \\
lambda.std[6] & 0.57 & 0.15 & 0.23 & 0.48 & 0.59 & 0.68 & 0.80 & 1.00 & 2800.00 \\
lambda.std[7] & 0.72 & 0.11 & 0.45 & 0.67 & 0.74 & 0.80 & 0.88 & 1.01 & 450.00 \\
lambda.std[8] & 0.76 & 0.10 & 0.52 & 0.71 & 0.78 & 0.84 & 0.90 & 1.02 & 330.00 \\
lambda.std[9] & 0.59 & 0.13 & 0.28 & 0.52 & 0.61 & 0.69 & 0.80 & 1.01 & 690.00 \\
lambda.std[10] & 0.86 & 0.05 & 0.74 & 0.83 & 0.86 & 0.89 & 0.93 & 1.01 & 650.00 \\
prec[1] & 1.77 & 0.22 & 1.39 & 1.62 & 1.76 & 1.91 & 2.23 & 1.00 & 4000.00 \\
prec[2] & 3.69 & 0.49 & 2.80 & 3.34 & 3.67 & 4.00 & 4.71 & 1.00 & 1000.00 \\
prec[3] & 4.16 & 0.56 & 3.17 & 3.78 & 4.13 & 4.52 & 5.31 & 1.00 & 2200.00 \\
prec[4] & 2.54 & 0.33 & 1.96 & 2.31 & 2.52 & 2.76 & 3.21 & 1.00 & 1700.00 \\
prec[5] & 2.86 & 0.37 & 2.18 & 2.60 & 2.84 & 3.10 & 3.64 & 1.00 & 3200.00 \\
prec[6] & 3.03 & 0.39 & 2.33 & 2.76 & 3.02 & 3.28 & 3.84 & 1.00 & 2500.00 \\
prec[7] & 5.00 & 0.68 & 3.77 & 4.52 & 4.97 & 5.43 & 6.43 & 1.00 & 3800.00 \\
prec[8] & 3.91 & 0.51 & 2.97 & 3.55 & 3.89 & 4.24 & 5.01 & 1.00 & 630.00 \\
prec[9] & 2.61 & 0.34 & 2.00 & 2.37 & 2.59 & 2.84 & 3.29 & 1.00 & 4000.00 \\
prec[10] & 6.81 & 1.01 & 5.04 & 6.10 & 6.75 & 7.45 & 9.01 & 1.00 & 4000.00 \\
prec.s & 9.69 & 1.61 & 6.93 & 8.58 & 9.56 & 10.65 & 13.27 & 1.00 & 760.00 \\
reli.omega & 0.92 & 0.02 & 0.88 & 0.91 & 0.92 & 0.93 & 0.95 & 1.01 & 200.00 \\
rho & 0.11 & 0.03 & 0.06 & 0.09 & 0.11 & 0.13 & 0.18 & 1.04 & 75.00 \\
sigma.ts & 0.00 & 0.06 & -0.11 & -0.03 & 0.00 & 0.04 & 0.11 & 1.02 & 120.00 \\
tau[1,1] & -0.95 & 0.17 & -1.30 & -1.06 & -0.94 & -0.83 & -0.62 & 1.00 & 3700.00 \\
tau[2,1] & -0.10 & 0.16 & -0.41 & -0.20 & -0.09 & 0.01 & 0.22 & 1.00 & 3200.00 \\
tau[3,1] & -2.24 & 0.40 & -3.15 & -2.46 & -2.20 & -1.96 & -1.56 & 1.01 & 340.00 \\
tau[4,1] & -3.18 & 0.59 & -4.58 & -3.52 & -3.11 & -2.77 & -2.19 & 1.03 & 160.00 \\
tau[5,1] & -0.18 & 0.16 & -0.50 & -0.29 & -0.18 & -0.07 & 0.13 & 1.00 & 1500.00 \\
tau[6,1] & -2.07 & 0.28 & -2.64 & -2.24 & -2.05 & -1.89 & -1.59 & 1.00 & 3600.00 \\
tau[7,1] & -2.85 & 0.41 & -3.77 & -3.11 & -2.81 & -2.55 & -2.13 & 1.02 & 150.00 \\
tau[8,1] & -3.48 & 0.56 & -4.75 & -3.82 & -3.43 & -3.09 & -2.57 & 1.01 & 540.00 \\
tau[9,1] & -1.46 & 0.22 & -1.91 & -1.60 & -1.45 & -1.31 & -1.06 & 1.00 & 660.00 \\
tau[10,1] & -2.45 & 0.37 & -3.25 & -2.69 & -2.42 & -2.18 & -1.80 & 1.01 & 1100.00 \\
theta[1] & 1.32 & 0.27 & 1.01 & 1.13 & 1.25 & 1.44 & 2.01 & 1.00 & 1000.00 \\
theta[2] & 1.38 & 0.30 & 1.02 & 1.16 & 1.31 & 1.53 & 2.13 & 1.00 & 1100.00 \\
theta[3] & 4.07 & 1.50 & 2.04 & 3.02 & 3.77 & 4.75 & 8.13 & 1.01 & 230.00 \\
theta[4] & 4.26 & 1.86 & 1.77 & 3.00 & 3.90 & 5.09 & 9.15 & 1.03 & 130.00 \\
theta[5] & 1.44 & 0.34 & 1.03 & 1.20 & 1.36 & 1.59 & 2.31 & 1.02 & 210.00 \\
theta[6] & 1.63 & 0.49 & 1.05 & 1.30 & 1.52 & 1.84 & 2.83 & 1.00 & 4000.00 \\
theta[7] & 2.37 & 0.80 & 1.26 & 1.80 & 2.21 & 2.76 & 4.40 & 1.02 & 150.00 \\
theta[8] & 2.77 & 1.04 & 1.37 & 2.02 & 2.52 & 3.31 & 5.28 & 1.01 & 400.00 \\
theta[9] & 1.67 & 0.42 & 1.09 & 1.37 & 1.59 & 1.90 & 2.72 & 1.01 & 380.00 \\
theta[10] & 4.13 & 1.29 & 2.20 & 3.18 & 3.93 & 4.87 & 7.20 & 1.01 & 480.00 \\
\bottomrule
\end{tabular}
\caption{study4 Model 2 posterior distribution summary}
\end{table}
sessionInfo()
R version 4.0.5 (2021-03-31)
Platform: x86_64-w64-mingw32/x64 (64-bit)
Running under: Windows 10 x64 (build 22000)
Matrix products: default
locale:
[1] LC_COLLATE=English_United States.1252
[2] LC_CTYPE=English_United States.1252
[3] LC_MONETARY=English_United States.1252
[4] LC_NUMERIC=C
[5] LC_TIME=English_United States.1252
attached base packages:
[1] stats graphics grDevices utils datasets methods base
other attached packages:
[1] car_3.0-10 carData_3.0-4 mvtnorm_1.1-1
[4] LaplacesDemon_16.1.4 runjags_2.2.0-2 lme4_1.1-26
[7] Matrix_1.3-2 sirt_3.9-4 R2jags_0.6-1
[10] rjags_4-12 eRm_1.0-2 diffIRT_1.5
[13] statmod_1.4.35 xtable_1.8-4 kableExtra_1.3.4
[16] lavaan_0.6-7 polycor_0.7-10 bayesplot_1.8.0
[19] ggmcmc_1.5.1.1 coda_0.19-4 data.table_1.14.0
[22] patchwork_1.1.1 forcats_0.5.1 stringr_1.4.0
[25] dplyr_1.0.5 purrr_0.3.4 readr_1.4.0
[28] tidyr_1.1.3 tibble_3.1.0 ggplot2_3.3.5
[31] tidyverse_1.3.0 workflowr_1.6.2
loaded via a namespace (and not attached):
[1] minqa_1.2.4 TAM_3.5-19 colorspace_2.0-0 rio_0.5.26
[5] ellipsis_0.3.1 ggridges_0.5.3 rprojroot_2.0.2 fs_1.5.0
[9] rstudioapi_0.13 farver_2.1.0 fansi_0.4.2 lubridate_1.7.10
[13] xml2_1.3.2 codetools_0.2-18 splines_4.0.5 mnormt_2.0.2
[17] knitr_1.31 jsonlite_1.7.2 nloptr_1.2.2.2 broom_0.7.5
[21] dbplyr_2.1.0 compiler_4.0.5 httr_1.4.2 backports_1.2.1
[25] assertthat_0.2.1 cli_2.3.1 later_1.1.0.1 htmltools_0.5.1.1
[29] tools_4.0.5 gtable_0.3.0 glue_1.4.2 reshape2_1.4.4
[33] Rcpp_1.0.7 cellranger_1.1.0 jquerylib_0.1.3 vctrs_0.3.6
[37] svglite_2.0.0 nlme_3.1-152 psych_2.0.12 xfun_0.21
[41] ps_1.6.0 openxlsx_4.2.3 rvest_1.0.0 lifecycle_1.0.0
[45] MASS_7.3-53.1 scales_1.1.1 ragg_1.1.1 hms_1.0.0
[49] promises_1.2.0.1 parallel_4.0.5 RColorBrewer_1.1-2 curl_4.3
[53] yaml_2.2.1 sass_0.3.1 reshape_0.8.8 stringi_1.5.3
[57] highr_0.8 zip_2.1.1 boot_1.3-27 rlang_0.4.10
[61] pkgconfig_2.0.3 systemfonts_1.0.1 evaluate_0.14 lattice_0.20-41
[65] labeling_0.4.2 tidyselect_1.1.0 GGally_2.1.1 plyr_1.8.6
[69] magrittr_2.0.1 R6_2.5.0 generics_0.1.0 DBI_1.1.1
[73] foreign_0.8-81 pillar_1.5.1 haven_2.3.1 withr_2.4.1
[77] abind_1.4-5 modelr_0.1.8 crayon_1.4.1 utf8_1.1.4
[81] tmvnsim_1.0-2 rmarkdown_2.7 grid_4.0.5 readxl_1.3.1
[85] CDM_7.5-15 pbivnorm_0.6.0 git2r_0.28.0 reprex_1.0.0
[89] digest_0.6.27 webshot_0.5.2 httpuv_1.5.5 textshaping_0.3.1
[93] stats4_4.0.5 munsell_0.5.0 viridisLite_0.3.0 bslib_0.2.4
[97] R2WinBUGS_2.1-21