Skip to content

Instantly share code, notes, and snippets.

Revisions

  1. Tod Karpinski revised this gist Apr 16, 2012. 1 changed file with 6 additions and 0 deletions.
    6 changes: 6 additions & 0 deletions github_issues_to_csv.rb
    Original file line number Diff line number Diff line change
    @@ -24,6 +24,7 @@
    "Date created",
    "Date modified",
    "Issue type",
    "Milestone",
    "Priority",
    "Status",
    "Reporter"
    @@ -72,6 +73,10 @@
    when issue['labels'].to_s =~ /LOW/i
    priority = "Minor"
    end
    milestone = issue['milestone'] || "None"
    if (milestone != "None")
    milestone = milestone['title']
    end

    # Needs to match the header order above, date format are based on Jira default
    row = [
    @@ -80,6 +85,7 @@
    DateTime.parse(issue['created_at']).new_offset(TIMEZONE_OFFSET).strftime("%d/%b/%y %l:%M %p"),
    DateTime.parse(issue['updated_at']).new_offset(TIMEZONE_OFFSET).strftime("%d/%b/%y %l:%M %p"),
    type,
    milestone,
    priority,
    issue['state'],
    issue['user']['login']
  2. @tkarpinski tkarpinski revised this gist Apr 12, 2012. 1 changed file with 31 additions and 48 deletions.
    79 changes: 31 additions & 48 deletions github_issues_to_csv.rb
    Original file line number Diff line number Diff line change
    @@ -1,25 +1,23 @@
    require 'json'
    require 'open-uri'
    require 'octokit'
    require 'csv'
    require 'date'

    # Github credentials to access your private project
    USERNAME="myusername"
    PASSWORD="mypassword"
    USERNAME="USER_NAME"
    PASSWORD="SEKRIT"

    # Project you want to export issues from
    USER="someuser"
    PROJECT="someproject"
    USER="REPO_OWNER"
    PROJECT="REPO_NAME"

    # Your local timezone offset to convert times
    TIMEZONE_OFFSET="+10"
    TIMEZONE_OFFSET="-4"

    BASE_URL="https://github.com/api/v2/json/issues"
    client = Octokit::Client.new(:login => USERNAME, :password => PASSWORD)

    csv = CSV.new(File.open(File.dirname(__FILE__) + "/issues.csv", 'w'))
    csv = CSV.new(File.open(File.dirname(__FILE__) + "/issues3.csv", 'w'))

    puts "Initialising CSV file..."
    # CSV Headers
    #CSV Headers
    header = [
    "Summary",
    "Description",
    @@ -31,17 +29,29 @@
    "Reporter"
    ]
    # We need to add a column for each comment, so this dictates how many comments for each issue you want to support
    20.times { header << "Comments" }
    #20.times { header << "Comments" }
    csv << header

    puts "Getting issues from Github..."
    closed_issues = JSON.parse(open("#{BASE_URL}/list/#{USER}/#{PROJECT}/closed", 'r', { :http_basic_authentication => [USERNAME, PASSWORD] }).read)
    open_issues = JSON.parse(open("#{BASE_URL}/list/#{USER}/#{PROJECT}/open", 'r', { :http_basic_authentication => [USERNAME, PASSWORD] }).read)

    all_issues = closed_issues['issues'] + open_issues['issues']

    puts "Processing #{all_issues.size} issues..."
    all_issues.each do |issue|
    temp_issues = []
    issues = []
    page = 0
    begin
    page = page +1
    temp_issues = client.list_issues("#{USER}/#{PROJECT}", :state => "closed", :page => page)
    issues = issues + temp_issues;
    end while not temp_issues.empty?
    temp_issues = []
    page = 0
    begin
    page = page +1
    temp_issues = client.list_issues("#{USER}/#{PROJECT}", :state => "open", :page => page)
    issues = issues + temp_issues;
    end while not temp_issues.empty?


    puts "Processing #{issues.size} issues..."
    issues.each do |issue|
    puts "Processing issue #{issue['number']}..."
    # Work out the type based on our existing labels
    case
    @@ -72,34 +82,7 @@
    type,
    priority,
    issue['state'],
    issue['user']
    issue['user']['login']
    ]

    if issue['comments'] > 0
    puts "Getting #{issue['comments']} comments for issue #{issue['number']} from Github..."
    # Get the comments
    comments = JSON.parse(open("#{BASE_URL}/comments/#{USER}/#{PROJECT}/#{issue['number']}", 'r', { :http_basic_authentication => [USERNAME, PASSWORD] }).read)

    comments['comments'].each do |c|
    # Date format needs to match hard coded format in the Jira importer
    comment_time = DateTime.parse(c['created_at']).new_offset(TIMEZONE_OFFSET).strftime("%m/%d/%y %r")

    # Map usernames for the comments importer
    comment_user = case c['user']
    when "Foo"
    "foo"
    when "baruser"
    "bar"
    when "myfunnyusername"
    "firstname"
    end

    # Put the comment in a format Jira can parse, removing #s as Jira thinks they're comments
    comment = "Comment: #{comment_user}: #{comment_time}: #{c['body'].gsub('#','')}"

    row << comment
    end
    end

    csv << row
    end
    end
  3. @henare henare created this gist Jul 26, 2011.
    105 changes: 105 additions & 0 deletions github_issues_to_csv.rb
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,105 @@
    require 'json'
    require 'open-uri'
    require 'csv'
    require 'date'

    # Github credentials to access your private project
    USERNAME="myusername"
    PASSWORD="mypassword"

    # Project you want to export issues from
    USER="someuser"
    PROJECT="someproject"

    # Your local timezone offset to convert times
    TIMEZONE_OFFSET="+10"

    BASE_URL="https://github.com/api/v2/json/issues"

    csv = CSV.new(File.open(File.dirname(__FILE__) + "/issues.csv", 'w'))

    puts "Initialising CSV file..."
    # CSV Headers
    header = [
    "Summary",
    "Description",
    "Date created",
    "Date modified",
    "Issue type",
    "Priority",
    "Status",
    "Reporter"
    ]
    # We need to add a column for each comment, so this dictates how many comments for each issue you want to support
    20.times { header << "Comments" }
    csv << header

    puts "Getting issues from Github..."
    closed_issues = JSON.parse(open("#{BASE_URL}/list/#{USER}/#{PROJECT}/closed", 'r', { :http_basic_authentication => [USERNAME, PASSWORD] }).read)
    open_issues = JSON.parse(open("#{BASE_URL}/list/#{USER}/#{PROJECT}/open", 'r', { :http_basic_authentication => [USERNAME, PASSWORD] }).read)

    all_issues = closed_issues['issues'] + open_issues['issues']

    puts "Processing #{all_issues.size} issues..."
    all_issues.each do |issue|
    puts "Processing issue #{issue['number']}..."
    # Work out the type based on our existing labels
    case
    when issue['labels'].to_s =~ /Bug/i
    type = "Bug"
    when issue['labels'].to_s =~ /Feature/i
    type = "New feature"
    when issue['labels'].to_s =~ /Task/i
    type = "Task"
    end

    # Work out the priority based on our existing labels
    case
    when issue['labels'].to_s =~ /HIGH/i
    priority = "Critical"
    when issue['labels'].to_s =~ /MEDIUM/i
    priority = "Major"
    when issue['labels'].to_s =~ /LOW/i
    priority = "Minor"
    end

    # Needs to match the header order above, date format are based on Jira default
    row = [
    issue['title'],
    issue['body'],
    DateTime.parse(issue['created_at']).new_offset(TIMEZONE_OFFSET).strftime("%d/%b/%y %l:%M %p"),
    DateTime.parse(issue['updated_at']).new_offset(TIMEZONE_OFFSET).strftime("%d/%b/%y %l:%M %p"),
    type,
    priority,
    issue['state'],
    issue['user']
    ]

    if issue['comments'] > 0
    puts "Getting #{issue['comments']} comments for issue #{issue['number']} from Github..."
    # Get the comments
    comments = JSON.parse(open("#{BASE_URL}/comments/#{USER}/#{PROJECT}/#{issue['number']}", 'r', { :http_basic_authentication => [USERNAME, PASSWORD] }).read)

    comments['comments'].each do |c|
    # Date format needs to match hard coded format in the Jira importer
    comment_time = DateTime.parse(c['created_at']).new_offset(TIMEZONE_OFFSET).strftime("%m/%d/%y %r")

    # Map usernames for the comments importer
    comment_user = case c['user']
    when "Foo"
    "foo"
    when "baruser"
    "bar"
    when "myfunnyusername"
    "firstname"
    end

    # Put the comment in a format Jira can parse, removing #s as Jira thinks they're comments
    comment = "Comment: #{comment_user}: #{comment_time}: #{c['body'].gsub('#','')}"

    row << comment
    end
    end

    csv << row
    end