Last active
January 28, 2019 21:13
-
-
Save TheMeanCanEHdian/01b9d3f4b90d102e829a87ab6de5c71e to your computer and use it in GitHub Desktop.
Grabs all release groups from Sonarr history for the most recently downloaded file per episode
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from __future__ import print_function | |
import requests | |
URL = "https://example.com/sonarr" | |
api = "APIKEY" | |
seriesIDs = [] | |
episodeIDs = [] | |
history = [] | |
releasegroups = {} | |
print ("Grabbing seasons") | |
counter = 0 | |
r = requests.get(url = URL + "/api/series", params = {'apiKey':api}) | |
data = r.json() | |
for x in data: | |
series = x['id'] | |
seriesIDs.append(series) | |
print('+', end='') | |
counter += 1 | |
print ("\n" + str(counter) + " seasons grabbed\n") | |
print ("Grabbing episodes") | |
counter = 0 | |
for x in seriesIDs: | |
r = requests.get(url = URL + "/api/episode", params = {'apiKey':api , 'seriesID':x}) | |
data = r.json() | |
for y in data: | |
hasfile = y['hasFile'] | |
if hasfile == True: | |
episode = y['id'] | |
episodeIDs.append(episode) | |
print('+', end='') | |
counter += 1 | |
else: | |
print('-', end='') | |
print ("\n" + str(counter) + " episodes on disk\n") | |
print ("Grabbing history") | |
counter = 0 | |
for x in episodeIDs: | |
r = requests.get(url = URL + "/api/history", params = {'apiKey':api , 'sortKey':'date' , 'episodeID':x}) | |
data = r.json() | |
if not len(data['records']) == 0: | |
sourcetitle = data['records'][0]['sourceTitle'] | |
history.append(sourcetitle) | |
print('+', end='') | |
counter += 1 | |
else: | |
print('-', end='') | |
print ("\n" + str(counter) + " items found with history\n") | |
print ("Parsing history") | |
counter = 0 | |
counter2 = 0 | |
for x in history: | |
try: | |
r = requests.get(url = URL + "/api/parse", params = {'apiKey':api , 'title':x , 'pageSize' : 1}) | |
data = r.json() | |
release = data['parsedEpisodeInfo'].get('releaseGroup') | |
if release is not None: | |
release = release.lower() | |
if release in releasegroups: | |
releasegroups[release] += 1 | |
print('.', end='') | |
else: | |
releasegroups[release] = 1 | |
print('+', end='') | |
counter += 1 | |
except: | |
print('!', end='') | |
counter2 += 1 | |
print ("\n" + str(counter) + " history entries parsed | " + str(counter2) + " were skipped") | |
for key, value in sorted(releasegroups.iteritems(), key=lambda (k,v): (v,k), reverse = True): | |
print ("%s: %s:" % (key, value)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment