Last active
January 14, 2022 11:45
-
-
Save dwilkie/247401135d937206b1a5a6f2804a1764 to your computer and use it in GitHub Desktop.
Import UNICEF IF Workplan into Pivotal Tracker
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
## import_stories.rb | |
# Imports stories from the UNICEF Workplan into Pivotal Tracker from CSV file. | |
### CSV file format | |
# Export the UNICEF worklplan as CSV. | |
### Usage | |
# DATA_FILE="path/to/workplan.csv" TRACKER_AUTHOR="David Wilkie" ruby import_stories.rb | |
# Running this script will create a new file 'unicef_workplan_pivotal_tracker_import.csv' which can | |
# be used to import into Pivotal Tracker | |
#### Docker | |
# docker run --rm -it -v `pwd`:`pwd` -w `pwd` -e DATA_FILE="path/to/workplan.csv" -e TRACKER_AUTHOR="David Wilkie" ruby:alpine ruby import_stories.rb | |
require "csv" | |
require "pathname" | |
require "date" | |
require "bigdecimal" | |
DATA_FILE = ENV.fetch("DATA_FILE") | |
TRACKER_AUTHOR = ENV["TRACKER_AUTHOR"] | |
EPICS_PREFIX = "UNICEF IF2022" | |
TRACKER_STORY_HEADERS = [ | |
"Id", "Title", "Labels", "Type", "Estimate", "Current State", "Created at", "Accepted at", | |
"Deadline", "Requested By", "Owned By", "Description", "Comment" | |
].freeze | |
Story = Struct.new(:name, :labels, :title, :description, :type, :budget, :epic, :stories, keyword_init: true) | |
def parse_workflow | |
csv_text = Pathname.new(DATA_FILE).read | |
csv = CSV.parse(csv_text, headers: false) | |
current_quarter = nil | |
current_milestone = nil | |
activities = [] | |
stories = csv.each_with_object([]) do |row, stories| | |
row_type = row[0].to_s.downcase.strip | |
if row_type =~ /quarter/ | |
quarter_number = row_type.match(/\d+/)[0] | |
name = "#{EPICS_PREFIX} Q#{quarter_number}" | |
current_quarter = Story.new( | |
name: name, | |
title: name, | |
labels: [name], | |
budget: BigDecimal(row[4]), | |
type: "epic", | |
stories: [] | |
) | |
stories << current_quarter | |
elsif row_type =~ /milestone/ | |
milestone_number = row_type.match(/\d+/)[0] | |
name = "#{current_quarter.name}M#{milestone_number}" | |
title = row[1].to_s.strip | |
current_milestone = Story.new( | |
name: name, | |
title: "#{name} - #{title}", | |
epic: current_quarter, | |
labels: [name], | |
description: title, | |
budget: BigDecimal(row[4]), | |
type: "epic", | |
stories: [] | |
) | |
current_quarter.stories << current_milestone | |
stories << current_milestone | |
elsif row_type =~ /activity/ | |
current_activity = Story.new( | |
epic: current_milestone, | |
labels: [EPICS_PREFIX] + current_quarter.labels + current_milestone.labels, | |
name: row[0].to_s.strip, | |
title: row[1].to_s.strip, | |
description: row[2].to_s.strip, | |
type: "feature" | |
) | |
current_milestone.stories << current_activity | |
activities << current_activity | |
stories << current_activity | |
end | |
end | |
activities.each do |activity| | |
activity.budget = activity.epic.budget / activity.epic.stories.size | |
end | |
stories | |
end | |
def build_budget_comment(story) | |
budget_text = if story.budget.zero? | |
"Budget\n0 ETH" | |
else | |
"Budget\n[#{story.budget.to_f} ETH](https://www.google.com/search?q=#{story.budget.to_f}+eth+in+usd)" | |
end | |
return budget_text if TRACKER_AUTHOR.nil? | |
"#{budget_text} (#{TRACKER_AUTHOR} - #{Date.today.strftime})" | |
end | |
def generate_csv(stories) | |
CSV.open("unicef_workplan_pivotal_tracker_import.csv", "wb") do |csv| | |
csv << TRACKER_STORY_HEADERS | |
stories.each do |story| | |
csv << [ | |
nil, # Id | |
story.title, # Title | |
story.labels.join(","), # Labels | |
story.type, # Type | |
-1, # Estimate | |
nil, # Current State | |
nil, # Created at, | |
nil, # Accepted at, | |
nil, # Deadline | |
nil, # Requested By | |
nil, # Owned By | |
story.description, # Description | |
build_budget_comment(story) # Comment | |
] | |
end | |
end | |
end | |
stories = parse_workflow | |
generate_csv(stories) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment