Skip to content

Instantly share code, notes, and snippets.

@JimGrange
Created August 28, 2015 07:38
Show Gist options
  • Save JimGrange/0f1c7bed4204156c7534 to your computer and use it in GitHub Desktop.
Save JimGrange/0f1c7bed4204156c7534 to your computer and use it in GitHub Desktop.
Response Time Reliability Simulations
#------------------------------------------------------------------------------
# generate data from a multivariate normal distribution with known
# correlations between values
mvrnorm <- function(n, nValues, meanValues, sdValues, corMatrix) {
Z <- matrix(rnorm(n * nValues), nValues, n)
t(meanValues + sdValues * t(chol(corMatrix)) %*% Z)
}
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# get set of parameters for all subjects
getParameters <- function(nSubs, nParms, lbaMeans, lbaSD, varMatrix){
# get the subject parameters
parms <- mvrnorm(nSubs, nParms, lbaMeans, lbaSD, varMatrix)
# if the drift rate is lower than the boundary value, re-generate parameters
while(sum(min(parms[, 1], parms[, 2]) < parms[, 3]) > 0){
parms <- mvrnorm(nSubs, nParms, lbaMeans, lbaSD, varMatrix)
}
# if there are negative parameter values, re-generate parameters until
# no negatives are present
while(min(parms) < 0){
parms <- mvrnorm(nSubs, 5, lbaMeans, lbaSD, varMatrix)
}
# truncate b values to be above 0.05
parms[, 3][parms[, 3] < 0.05] <- 0.05
# truncate sigma values to be above 0.01
parms[, 5][parms[, 5] < 0.01] <- 0.01
parms <- data.frame(parms)
colnames(parms) <- c("driftCBA", "driftABA", "boundary", "nonDecision",
"variability")
return(parms)
}
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# generate simulated data from the Linear Ballistic Accumulator Model
# http://bit.ly/1elK71H
simulateLBA <- function(parms, conditionNames, nTrials, nSubjects){
# initialise matrix to store the data in
finalData <- NULL
for(j in 1:nSubjects){
# the parameters passed must be in the order v1, v2, b, ter, sdV. Note
# that here A is fixed at a value (halfway between the values reported by
# Ravenzwaaij & Oberauer (see page 470).
if(nSubjects > 1){
v1 <- as.numeric(parms[j, 1])
v2 <- as.numeric(parms[j, 2])
b <- as.numeric(parms[j, 3])
ter <- as.numeric(parms[j, 4])
sdV <- as.numeric(parms[j, 5])
A <- 0.7 * b
} else {
v1 <- as.numeric(parms[1])
v2 <- as.numeric(parms[2])
b <- as.numeric(parms[3])
ter <- as.numeric(parms[4])
sdV <- as.numeric(parms[5])
A <- 0.7 * b
}
### do first condition
# matrix to store the subject data in
conditionA <- data.frame(matrix(0, nrow = nTrials, ncol = 5))
# add participant & condition information to the data
conditionA[, 1] <- j
conditionA[, 2] <- conditionNames[1]
# perform each trial
# NOTE: Simulating each trial like this isn't efficicent.
# It would be better to simulate all trials at once using vectorisation.
for(i in 1:nTrials){
# add the trial number to the final column
conditionA[i, 5] <- i
# to protect against both accumulators being below zero, performa a
# while loop. First, set both accumulators to negative...
a1 <- -1
a2 <- -1
# and then simulate accumulation time until one is positive
while(max(c(a1, a2)) <= 0){
# simulate the accumulator finishing times
a1 <- (b - runif(1, 0, A)) / (rnorm(1, v1, sdV))
a2 <- (b - runif(1, 0, A)) / rnorm(1, 1 - v1, sdV)
}
# if one of the accumulators are still negative, set it to a large value
# so it will never be chosen first
if(a1 < 0){a1 <- 1e6}; if(a2 < 0){a2 <- 1e6}
# log the response time by adding the accumulation time to ter
conditionA[i, 3] <- round((min(c(a1, a2)) + ter), digits = 3)
# set the accuracy. If a1 finished first, it's a correct response. Else,
# it's an error.
if(a1 < a2){
conditionA[i, 4] <- 1
} else {
conditionA[i, 4] <- 0
}
} # end of trials loop
### do second condition
# matrix to store the subject data in
conditionB <- data.frame(matrix(0, nrow = nTrials, ncol = 5))
# add participant & condition information to the data
conditionB[, 1] <- j
conditionB[, 2] <- conditionNames[2]
# perform each trial
for(i in 1:nTrials){
# add the trial number to the final column
conditionB[i, 5] <- i
# to protect against both accumulators being below zero, performa a
# while loop. First, set both accumulators to negative...
a1 <- -1
a2 <- -1
# and then simulate accumulation time until one is positive
while(max(c(a1, a2)) <= 0){
# simulate the accumulator finishing times
a1 <- (b - runif(1, 0, A)) / (rnorm(1, v2, sdV))
a2 <- (b - runif(1, 0, A)) / rnorm(1, 1 - v2, sdV)
}
# if one of the accumulators are still negative, set it to a large value
# so it will never be chosen first
if(a1 < 0){a1 <- 1e6}; if(a2 < 0){a2 <- 1e6}
# log the response time by adding the accumulation time to ter
conditionB[i, 3] <- round((min(c(a1, a2)) + ter), digits = 3)
# set the accuracy. If a1 finished first, it's a correct response. Else,
# it's an error.
if(a1 < a2){
conditionB[i, 4] <- 1
} else {
conditionB[i, 4] <- 0
}
} # end of trials loop
# add to the final data
finalData <- rbind(finalData, conditionA, conditionB)
} # end of subject loop
# update the final data frame
colnames(finalData) <- c("participant", "condition", "rt", "accuracy",
"trial")
return(finalData)
} # end of function
#------------------------------------------------------------------------------
rm(list = ls())
# http://www.donvanravenzwaaij.com/Papers_files/van%20Ravenzwaaij%20%26%20Oberauer,%202009.pdf
# http://mres.gmu.edu/pmwiki/uploads/Main/Schmiedek2007.pdf
# Put this file and the functions.R file in a folder somewhere on your
# computer, and then open this file from that location. Then, in R-Studio,
# go to the menu Session - Set working directory - To source file location.
# Copy the "setwd" text that appears in your console, and replace what is below.
setwd("D:/Work/PhD Supervision/Simulations")
# load required functions & packages
source("functions.R")
# package for RT trimming
if(require(trimr)==FALSE){
install.packages("trimr", dependencies=TRUE)
}
#------------------------------------------------------------------------------
### simulation parameters
# mean values of the LBA parameters to use for this simulation
lbaMeans <- c(1.05, # drift for "CBA" condition
0.90, # drift for "ABA" condition
0.28, # boundary height
0.25, # non-decision time
0.35) # variability in drift rate
# standard deviations for LBA parameters (same order as above)
lbaSD <- c(0.2, 0.2, 0.1, 0.03, 0.07)
# variance matrix for all parameters
varMatrix <- matrix(c(1.0, 0.8, 0.0, 0.0, 0.8,
0.8, 1.0, 0.0, 0.0, 0.8,
0.0, 0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 0.0,
0.8, 0.8, 0.0, 0.0, 1.0), nrow = 5, ncol = 5,
byrow = TRUE)
# how many subjects to simulate per experiment?
nSubs <- 72
# how many trials to simulate for each condition in each experiment?
nTrials = c(20, 50, 100, 250, 500, 1000)
# how many experiments to simulate?
nSimulations <- 100
# vector to store correlation coefficient for each simulated experiment,
# for each sample size
corData <- matrix(0, ncol = length(nTrials), nrow = nSimulations)
colnames(corData) <- nTrials
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
### Simulation starts here
# declare a progress bar to track how many simulations we have conducted
pb <- txtProgressBar(min = 0, max = nSimulations, style = 3)
# loop over each simulated experiment
for(i in 1:nSimulations){
# reset index for what "nTrials" we are currently using
j <- 1
# get the paramerter values for all subjects (use these same ones for each
# nTrials simulation)
parms <- getParameters(nSubs, nParms = 5, lbaMeans, lbaSD, varMatrix)
# loop over each number of trials
for(currTrials in nTrials){
# get the LBA response times for current trial size simulation
rts <- simulateLBA(parms, conditionNames = c("CBA", "ABA"),
nTrials = currTrials, nSubjects = nSubs)
# get the even & odd number trials
even <- rts[c(F, T), ]
odd <- rts[c(T, F), ]
# # check it worked
# unique(odd$trial)
# unique(even$trial)
# get the means by passing it to the trimr package with ridiculous trimming
# parameters
even <- absoluteRT(even, minRT = 0, maxRT = 1e06, omitErrors = TRUE)
odd <- absoluteRT(odd, minRT = 0, maxRT = 1e06, omitErrors = TRUE)
# just check whether overall RT is even reliable
biData <- data.frame(even$ABA, odd$ABA)
biData <- biData[complete.cases(biData), ]
# # calculate the n-2 repetition cost, move to data frame, and
# # only include complete cases
# evenBI <- even$ABA - even$CBA
# oddBI <- odd$ABA - odd$CBA
# biData <- data.frame(evenBI, oddBI)
# biData <- biData[complete.cases(biData), ]
# correlate the two, and record into the data frame
corData[i, j] <- round(cor(biData[, 1], biData[, 2]), 3)
# update progress bar
setTxtProgressBar(pb, i)
# updated index value
j <- j + 1
}
}
# apply the Spearman-Brown prediction formula
# sb = 2r / 1 + r
corData <- (2 * corData) / (1 + corData)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
### now do some plotting
boxplot(corData)
#------------------------------------------------------------------------------
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment