Last active
February 21, 2017 22:57
-
-
Save reicolina/eb99a56a5be04ec947272da4a9220ea8 to your computer and use it in GitHub Desktop.
A Python script that extracts SR&ED (Scientific Research and Experimental Development Tax Incentive Program) data from a Jira generated JSON file. See http://www.cra-arc.gc.ca/txcrdt/sred-rsde/menu-eng.html for details on SR&ED. It gathers "Start Date", "Resolution Date" and "# of People" and calculates the "Person Weeks" for each Jira Story.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# \\Y|M| | |
# ~~..'~ | |
# ( \ ) | |
# \ =/ | |
# _\/_ | |
# / -_ | |
# < \ | |
# \^-_-^\ \ iii | |
# <\v =======uu==> | |
# SR&ED DATA GENERATOR | |
# ==================== | |
# 1.- Use the Jira REST API to grab the JSON data that belongs to a given Epic: | |
# https://your-jira-instance.com/rest/api/2/issue/115515?expand=changelog | |
# 2.- Save the JSON object in a .json file in the same folder as this script | |
# 3.- Repeat #1 and #2 as many times as Jira Epics you want to process | |
# 4.- Run this script | |
# 5.- The output data will be in the output.csv file. Same folder. | |
import json | |
import math | |
import csv | |
import os | |
from datetime import datetime, timedelta | |
from pprint import pprint | |
weeks_map = dict() | |
people_map = dict() | |
def dt_parse(t): | |
# From http://stackoverflow.com/questions/26165659/python-timezone-z-directive-for-datetime-strptime-not-available | |
ret = datetime.strptime(t[0:16],'%Y-%m-%dT%H:%M') | |
if t[18]=='+': | |
ret-=timedelta(hours=int(t[19:22]),minutes=int(t[23:])) | |
elif t[18]=='-': | |
ret+=timedelta(hours=int(t[19:22]),minutes=int(t[23:])) | |
return ret | |
def get_week(t): | |
return dt_parse(t).isocalendar()[1] | |
def get_weeks(d1, d2): | |
# Returns the difefrence between 2 dates (in weeks) | |
# Result is rounded up, so something that is less than a week | |
# counts as 1 week. | |
if d1 is None or d2 is None: | |
return 0 | |
d1 = dt_parse(d1) | |
d2 = dt_parse(d2) | |
return math.ceil((d2 - d1).total_seconds() / 604800) | |
def get_people(list_of_people, start, end): | |
out = dict() | |
for person in list_of_people: | |
out[person] = get_person_weeks(get_week(start), get_week(end), person) | |
return json.dumps(out) | |
def get_person_weeks(start_week, end_week, person): | |
worked = 0 | |
for week in range(start_week, end_week + 1): | |
people_from_this_week = weeks_map.get(str(week)) | |
if people_from_this_week is None: | |
people_from_this_week = [] | |
if person.lower() not in people_from_this_week: | |
# Increase the counter for this person | |
worked = worked + 1 | |
# Add the person to the array | |
people_from_this_week.append(person.lower()) | |
weeks_map[str(week)] = people_from_this_week | |
# Increment the global summary | |
if people_map.get(person) is None: | |
people_map[person] = 0 | |
people_map[person] = people_map[person] + worked | |
# Return the count for the given range | |
return worked | |
# Generate the CSV file and add headers to it | |
output = csv.writer(open("output.csv","w"), delimiter=',',quoting=csv.QUOTE_ALL) | |
headers = [ | |
'epic_key', | |
'story_key', | |
# 'assigned_date', | |
'into_progress_date', | |
'resolution_date', | |
# 'weeks', | |
'# of people', | |
'people' | |
] | |
output.writerow(headers) | |
# find the JSON files to be used as input | |
files = [f for f in os.listdir('.') if os.path.isfile(f)] | |
for f in files: | |
if f.endswith('.json'): | |
# Parse the JSON file | |
with open(f) as data_file: | |
try: | |
data = json.load(data_file) | |
# Go over the tickets that belong to a given epic | |
for issue in data['issues']: | |
issue_key = issue['key'] | |
issue_histories = issue['changelog']['histories'] | |
resolution_date = issue['fields']['resolutiondate'] | |
result = { | |
'epic_key': f, | |
'issue_key': issue_key, | |
'assigned_date': None, | |
'into_progress_date': None, | |
'resolution_date': resolution_date, | |
'people': [], | |
'weeks': 0.0 | |
} | |
# Go through the changelog for this particualr ticket | |
for history in issue_histories: | |
for event in history['items']: | |
if event['fromString'] == 'To Do'and event['toString'] == 'In Progress': | |
# When did we actually star working on this ticket? | |
into_progress_date = history['created'] | |
result['into_progress_date'] = into_progress_date | |
elif event['field'] == 'assignee' and event['from'] is None: | |
# See when the assignement happened, | |
# and who worked on the ticket | |
assigned_date = history['created'] | |
result['assigned_date'] = assigned_date | |
dev_person = event['to'] | |
if dev_person not in result['people']: | |
result['people'].append(dev_person.lower()) | |
elif event['field'] == 'resolution': | |
# Who is the QA person? | |
qa_person = history['author']['name'] | |
if qa_person not in result['people']: | |
result['people'].append(qa_person.lower()) | |
# Build a result object | |
result['weeks'] = get_weeks(result['into_progress_date'], | |
result['resolution_date']) | |
# Add a row to the CSV file if applicable | |
if result['into_progress_date'] is not None: | |
csv_row = [ | |
result['epic_key'], | |
result['issue_key'], | |
# result['assigned_date'], | |
result['into_progress_date'], | |
result['resolution_date'], | |
# result['weeks'], | |
len(result['people']), | |
get_people(result['people'], result['into_progress_date'], result['resolution_date']) | |
] | |
output.writerow(csv_row) | |
except Exception as e: | |
# Deal with errors and move along! | |
print "We had a problem with " + f | |
print e | |
print '' | |
print 'SUMMARY' | |
print '========' | |
pprint(people_map) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment