Created
June 21, 2019 20:19
-
-
Save cooperpellaton/876db126b8f2ea9f72fca3949a7b9082 to your computer and use it in GitHub Desktop.
Generate codings for FHAge.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python | |
# -*- coding: utf-8 -*- | |
"""Script for generating codings for participants. | |
This handles the coding of participants in FHAage | |
as variables from the master sheet (run_sheet) and | |
their invidivual trial runs. | |
""" | |
__author__ = "Cooper Pellaton" | |
__copyright__ = "Copyright 2019, Georgia Tech, CABI" | |
__version__ = "0.0.1" | |
__maintainer__ = "Cooper Pellaton" | |
__email__ = "[email protected]" | |
__status__ = "Alpha" | |
import argparse | |
import csv | |
import os | |
import sys | |
import pathlib | |
# Globals | |
encodings = {} | |
within = { | |
"VFFastCorr": 1, | |
"VFFastIncorr": 2, | |
"VFSlowCorr": 3, | |
"VFSlowIncorr": 4, | |
"IFFastCorr": 5, | |
"IFFastIncorr": 6, | |
"IFSlowcorr": 7, | |
"IFSlowIncorr": 8, | |
"VHFastCorr": 9, | |
"VHFastIncorr": 10, | |
"VHSlowCorr": 11, | |
"VHSlowIncorr": 12, | |
"IHFastCorr": 13, | |
"IHFastIncorr": 14, | |
"IHSlowCorr": 15, | |
"IHSlowIncorr": 16, | |
"NFFastCorr": 17, | |
"NFFastIncorr": 18, | |
"NFSlowCorr": 19, | |
"NFSlowIncorr": 20, | |
"NHFastCorr": 21, | |
"NHFastIncorr": 22, | |
"NHSlowCorr": 23, | |
"NHSlowIncorr": 24, | |
"VFNR": 25, | |
"IFNR": 26, | |
"VHNR": 27, | |
"IHNR": 28, | |
"NFNR": 29, | |
"NHNR": 30, | |
"F": 31, | |
"H": 32, | |
"N": 33, | |
} | |
types = {"face": "F", "house": "H", "neutral": "N"} | |
validities = {True: "V", False: "I"} | |
speed = {True: "Fast", False: "Slow"} | |
corr = {True: "Corr", False: "Incorr"} | |
# Setup and define the arg parser. | |
# This is so that we can take command line input. | |
parser = argparse.ArgumentParser( | |
description="Utility for generating number encodings from pariticpant files." | |
) | |
parser.add_argument( | |
"master file", | |
metvar="N", | |
type=str, | |
help="The path to the master file containing the handedness/encodings of each person.", | |
) | |
parser.add_argument( | |
"participant file", | |
metvar="P", | |
type=str, | |
help="The path to the participant file to be parsed.", | |
) | |
parser.add_argument( | |
"threshold time val", | |
metvar="T", | |
type=float, | |
help="The time below which is considered fast, and above which is considered slow.", | |
) | |
def main(): | |
args = parser.parse_args() | |
participant_path = args[1] | |
participant_id = os.path.basename(participant_path)[1:3] | |
fetch_participant_alignment(args[0], participant_id) | |
threshold = args[2] | |
output = {} | |
# Do a between trial. | |
output["between"] = generate_codings(participant_path, threshold) | |
write_to_disk(participant_id, output["between"]) | |
# Do a within trial. | |
output["within"] = generate_codings(participant_path, threshold, False) | |
write_to_disk(participant_id, output["within"], False) | |
# We've finished now. Exit cleanly. | |
sys.exit(0) | |
# We assume that the master sheet will be in CSV form. | |
def fetch_participant_alignment(master_sheet_path, p_id): | |
"""Determines what group the participant is in. | |
IE. whether face is left hand, or face is right hand. | |
Store this data to a dictionary for later use. | |
""" | |
sheet = csv.reader(master_sheet_path) | |
csvr = list(sheet) | |
global encodings | |
p_align = csvr[p_id + 1] # offset by one for the un-used row at the top | |
if p_align["Face hand"] == "LEFT": | |
encodings = {1: "face", 6: "house"} | |
else: | |
encodings = {6: "face", 1: "house"} | |
def generate_codings(participant_path, threshold, between=True): | |
"""Generate the coding for each row. | |
This generates the coding of the response to the associated types so | |
that they can be stored in the output file and used later. | |
""" | |
out_data = [] | |
with open(participant_path, "w") as f: | |
p_data = csv.reader(f) | |
# Some variables we need to keep track of to make later | |
# assingments for the labels. | |
is_correct = False | |
is_nr = False | |
is_fast = False | |
is_valid = False | |
is_catch = False | |
is_neutral = False | |
valid = "" | |
correct = "" | |
catch_type = "" | |
# Hardcode the initial start time. | |
# This is 3 * TR + 3 sec. fixation | |
base_time = 3 * 1.5 + 3 | |
for row in p_data: | |
# Estalbish what the participant response *should* have been. | |
if row["FaceHouseMov"] != "" or row["FaceHouseMov"] is not None: | |
if "face" in row["FaceHouseMov"]: | |
correct = "face" | |
else: | |
correct = "house" | |
if row["CueType"] == row["TrialType"]: | |
is_valid = True | |
valid = row["TrialType"] | |
elif row["TrialType"] == "catch": | |
is_catch = True | |
catch_type = row["CueType"] | |
else: | |
valid = row["TrialType"] | |
# Determine if the response was the right one, or valid. | |
resp = row["key_resp_2.keys"] | |
if resp is None or resp == "": | |
is_nr = True | |
if encodings[resp] == correct: | |
is_correct = True | |
# Determine if this response was Fast/Slow. | |
if row["key_resp_2.rt"] < threshold: | |
is_fast = True | |
# Determine if this was a neutral trial. | |
if row["CueText"] == "50N": | |
is_neutral = True | |
# Now piece together the string using our states. | |
name = "" | |
if is_neutral: | |
name += "N" + types[valid] | |
else: | |
name += validities[is_valid] + types[valid] | |
if is_nr: | |
name += "NR" | |
if is_catch: | |
name += types[catch_type] | |
else: | |
name += speed[is_fast] + corr[is_correct] | |
# Calculate timings for our entries. | |
if between: | |
base_time += row["JitterTime"] | |
# write data to array | |
out_data.append([name, base_time]) | |
# Calculated next time. | |
base_time += 3 + row["FHDuration"] | |
else: | |
# Calculate the cue time. | |
base_time += row["JitterTime"] | |
out_data.append([name, base_time]) | |
# Calculate the trial time. | |
base_time += 3 | |
out_data.append([name, base_time]) | |
# Calculated next time. | |
base_time += row["FHDuration"] | |
return out_data | |
def write_to_disk(p_id, out_data, between=True): | |
"""Writes the output data to disk. | |
Make sure the output path exists before writing. | |
Compensate for possibilty of between and within trials | |
and handle the naming for these appropriately. | |
""" | |
# Before writing output, make sure the directories exist. | |
pathlib.Path("zzz_output/S%s" % (p_id)).mkdir(parents=True, exist_ok=True) | |
if between: | |
f = open("zzz_output/S%s/S%s_between.txt" % (p_id, p_id), "w") | |
else: | |
f = open("zzz_output/S%s/S%s_within.txt" % (p_id, p_id), "w") | |
for item in out_data: | |
f.write("%s\n" % item) | |
f.close() | |
if __name__ == "__main__": | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment