Skip to content

Instantly share code, notes, and snippets.

@deconstructionalism
Created April 10, 2019 19:29
Show Gist options
  • Select an option

  • Save deconstructionalism/75233ad9c4372cb99d280edd66f8b2e5 to your computer and use it in GitHub Desktop.

Select an option

Save deconstructionalism/75233ad9c4372cb99d280edd66f8b2e5 to your computer and use it in GitHub Desktop.
#!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import csv
import json
import os
__author__ = "Arjun Ray"
__license__ = "MIT"
__email__ = "arjun.ray@generalassemb.ly"
def calc_req_completion(req_data):
'''
Returns percentage complete of all spec items in a req_data dict as
produced by iterate_requirements
ARGS:
req_data (dict) Dict of key-> spec, val -> fulfilled
(boolean)
RETURNS:
percentage_complete (float) Percentage (in range 0, 1) of spec
completeion
'''
categories = {}
total_specs = 0.0
total_passed = 0.0
for key, value in sorted(req_data.items()):
cat = key.split(':')[0]
if cat not in categories:
categories[cat] = {
'total': 0.0,
'passed': 0.0
}
categories[cat]['total'] += 1
categories[cat]['passed'] += value
total_specs += 1
total_passed += value
category_results = {}
for key, val in categories.items():
key_name = 'percent complete - {}'.format(key)
category_results[key_name] = val['passed'] / val['total']
total_result = total_passed / total_specs
return total_result, category_results
def iterate_requirements(csv_headers, raw_reqs):
'''
Iterate through requirements sections of individual student's JSON data
and gather the fulfilled or not data per spec and push it to csv_data. Also
adds spec headers to csv_headers. Data structure expected as generated
project-eval-server
ARGS:
csv_headers (list) List of csv headers to update
raw_reqs (dict) Values from JSON data per student inside of
'requirements' key
RETURNS:
csv_headers (list) Updated list of csv headers
req_dat (dict) Single string representation of each spec
along with value TRUE or FALSE if completed
or not, respectively
'''
# stores the single string representation of each spec as the key,
# fulfilled or not as the value
req_data = {}
# iterate through each key-value pair in raw_reqs
for category, contents in raw_reqs.items():
# iterate through each spec per category and put the fulfilled data in
# req_data
for _, result in contents.items():
fulfilled = result['fulfilled']
# generate key for req_data
req_string = '{}: {}'.format(category.upper(), result['spec'])
# if key is not in csv_headers, add it
if req_string not in csv_headers:
csv_headers.append(req_string)
# save data in req_data
req_data[req_string] = fulfilled
return csv_headers, req_data
def parse_student(csv_headers, file_path, cutoff):
'''
Take the path to a student's JSON outbox file, along with the spec
completion cutoff and return a row of data for that student
ARGS:
csv_headers (list) List of csv headers to update
file_path (str) Full path to student's JSON outbox file
cutoff (float) Percentage cutoff of spec completeion
percentage used to determine PASS or FAIL
RETURNS:
csv_headers (list) Updated list of csv headers
csv_row (list) Row data for student to append to out_file
csv
'''
# empty list to store row of student data
csv_row = []
# get data from file as dict
with open(file_path, 'r') as f:
file = f.read()
data = json.loads(file)
# generate spec data for the student
csv_headers, req_data = iterate_requirements(csv_headers, data['requirements'])
# add non data to csv_row
csv_row.append(data['developer-first-name'])
csv_row.append(data['developer-last-name'])
csv_row.append(data['developer-email'])
csv_row.append(data['notes'])
# calculate percentage of spec complete
total_percentage, percentage_complete = calc_req_completion(req_data)
# add total percent complete and 'MEETS' data to csv_row
csv_row.append(total_percentage)
csv_row.append(total_percentage > cutoff),
# add per-spec category percent complete headers to csv_headers
for category, percent in percentage_complete.items():
if category not in csv_headers:
csv_headers.insert(6, category)
# add per-spec category percent complete data to req_data
req_data.update(percentage_complete)
# add per-spec category percent complete and per-spec data to csv_row
for req in csv_headers[6:]:
csv_row.append(req_data[req])
return csv_headers, csv_row
def write_csv(csv_headers, csv_data, out_file):
'''
Write data to csv
ARGS:
csv_data (list) List of lists where each sublist is a row
out_file (string) File to write output csv to
'''
with open(out_file, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(csv_headers)
for row in csv_data:
writer.writerow(row)
def print_summary_data(csv_headers, csv_row):
'''
Print data from csv_row as generated by parse_students
ARGS:
csv_headers (list) String Headers that correspond with each
element in csv_row
csv_row (list) Row of mixed data that corresponds with each
header
'''
# create a dict of data per student
data_dict = dict(zip(csv_headers, csv_row))
# print the full name and passing status
print('{first_name} {last_name}\n Pass: {meets}'.format(**data_dict))
# iterate through each key value pair in data_dict and print summary data
# for all keys starting with "percentage meets"
for k, v in data_dict.items():
# print total percentage meets
if k == 'percentage meets':
print(' Percentage Complete: {}'.format(round(v, 2)))
# print per category percentage meets
elif k.startswith('percent complete'):
print(' + {}: {}'.format(k.lstrip('percent complete - '),
round(v, 2)))
print('\n')
def main(root_dir, cutoff, out_file):
'''
Give basic summary data on projects graded via project-eval-server
including percentage completion of spec, per spec Pass Fail data and
save data as csv
ARGS:
csv_data (list) List of lists where each sublist is a row
out_file (string) File to write output csv to
'''
# check root_dir exists
assert os.path.exists(root_dir) and os.path.isdir(root_dir), \
'"{}" is not a valid directory'.format(root_dir)
# check cutoff in range [0, 1]
assert cutoff > 0.0 and cutoff <= 1.0, \
'cutoff value of {} not between 0 and 1'.format(cutoff)
# print args
print('ARGUMENTS \n{}'.format('-' * 80))
print('ROOT_DIR: "{}"'.format(root_dir))
print('CUTOFF: {}'.format(cutoff))
print('OUT_FILE: "{}"'.format(out_file))
print('\n')
# get list of .json files in root_dir
files = [file for file in os.listdir(root_dir) if file.endswith('.json')]
# print files found
print('FILES FOUND IN "{}"\n{}'.format(root_dir, '-' * 80))
for file in files:
print('+ {}'.format(file))
print('\n')
# list to hold rows of data per student
csv_data = []
# set up generic csv headers. spec-specific headers will be added to this
csv_headers = [
'first_name',
'last_name',
'email',
'notes',
'percentage meets',
'meets',
]
for file in files:
# generate file path for student file
file_path = os.path.join(root_dir, file)
# print file being parsed
print('READING FILE "{}"\n{}'.format(file_path, '-' * 80))
# get a row of data for a given student and add it to csv_data
csv_headers, csv_row = parse_student(csv_headers, file_path, cutoff)
csv_data.append(csv_row)
# print summary of student data
print_summary_data(csv_headers, csv_row)
# write results to csv
write_csv(csv_headers, csv_data, out_file)
# give csv info
print('WROTE RESULTS TO CSV\n{}\n File Name: "{}"'
.format('-' * 80, out_file))
if __name__ == '__main__':
formatter_class = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(description='Give basic summary data ' +
'on projects graded via ' +
'project-eval-server ' +
'including percentage ' +
'completion of spec, per ' +
'spec Pass Fail data and ' +
'save data as csv',
formatter_class=formatter_class)
parser.add_argument('-r',
action='store',
dest='root_dir',
type=str,
help='Directory containing JSON files that are ' +
'generated by grading. Usually in ' +
'"projects/<project-name>/json-outbox/"',
default='./projects/game/json-outbox/')
parser.add_argument('-c',
dest='cutoff',
type=float,
help='Cutoff of percentage of project spec to use ' +
'as PASS/FAIL threshold.',
default=0.8)
parser.add_argument('-o',
dest='out_file',
type=str,
help='File to save csv results to',
default='./results.csv')
args = parser.parse_args()
main(**vars(args))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment