Last active
February 18, 2022 05:22
-
-
Save lfepp/0a95ba23d55894e3ddb443b494aa284a to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python | |
import requests | |
import json | |
import sys | |
reload(sys) | |
sys.setdefaultencoding('utf-8') | |
# Your PagerDuty API key. A read-only key will work for this. | |
api_key = 'ENTER_YOUR_API_KEY' | |
# The API base url | |
base_url = 'https://api.pagerduty.com' | |
# The service ID you want to query. Leave this blank to query all services. | |
service_id = '' | |
# The start date that you want to query. | |
since = '2016-06-28' | |
# The end date that you would like to search. | |
until = '2016-06-29' | |
headers = { | |
'Authorization': 'Token token={0}'.format(api_key), | |
'Content-type': 'application/json', | |
'Accept': 'application/vnd.pagerduty+json;version=2' | |
} | |
def get_incidents(since, until, offset, service_id=None, total_incidents=[]): | |
params = { | |
'service_ids': [service_id], | |
'since': since, | |
'until': until, | |
'offset': offset, | |
'limit': 100 | |
} | |
r = requests.get( | |
'{0}/incidents'.format(base_url), | |
headers=headers, | |
data=json.dumps(params) | |
) | |
if r.json()['more']: | |
total_incidents.extend(r.json()['incidents']) | |
offset += 100 | |
return get_incidents(since, until, offset, service_id, total_incidents) | |
else: | |
total_incidents.extend(r.json()['incidents']) | |
return total_incidents | |
def get_incident_details(incident_id, incident_number, service, file_name): | |
start_time = '' | |
end_time = '' | |
summary = '' | |
has_details = False | |
has_summary = False | |
has_body = False | |
output = incident_number + ',' + service + ',' | |
f = open(file_name, 'a') | |
r = requests.get( | |
'{0}/incidents/{1}/log_entries?include[]=channels'.format( | |
base_url, incident_id | |
), | |
headers=headers | |
) | |
for log_entry in r.json()['log_entries']: | |
if log_entry['type'] == 'trigger_log_entry': | |
if log_entry['created_at'] > start_time: | |
start_time = log_entry['created_at'] | |
if ('summary' in log_entry['channel']): | |
has_summary = True | |
summary = log_entry['channel']['summary'] | |
if ('details' in log_entry['channel']): | |
has_details = True | |
details = log_entry['channel']['details'] | |
if ('body' in log_entry['channel']): | |
has_body = True | |
body = log_entry['channel']['body'] | |
elif log_entry['type'] == 'resolve_log_entry': | |
end_time = log_entry['created_at'] | |
output += start_time + ',' | |
output += end_time | |
if (has_summary): | |
output += ',"' + summary.replace(",", "-").replace("\"", "'").replace("\n", "").replace("\r", "") + '"' | |
if (has_details): | |
output += ',"' + str(details).replace(",", "-").replace("\"", "'").replace("\n", "").replace("\r", "") + '"' | |
if (has_body): | |
output += ',"' + str(body).replace(",", "-").replace("\"", "'").replace("\n", "").replace("\r", "") + '"' | |
output += '\n' | |
f.write(output) | |
def main(): | |
if service_id != '': | |
incidents = get_incidents(since, until, 0, service_id) | |
else: | |
incidents = get_incidents(since, until, 0) | |
for incident in incidents: | |
get_incident_details( | |
incident['id'], str(incident['incident_number']), | |
incident['service']['summary'], | |
'pagerduty_export_' + since + '.csv' | |
) | |
print 'Export completed successfully!' | |
if __name__ == '__main__': | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Where to put the location of file export?