Last active
June 8, 2020 18:03
-
-
Save alyssadev/ec2616a746f7ad456bcb372b389e0563 to your computer and use it in GitHub Desktop.
A script to parse lawtechie's post history and show, for each story that is linked to in another story, which stories link to it. Used for collecting multiple parts together. Does not show story singletons. More parsing work to do in the future
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3 | |
from requests import get | |
from json import load,dump,dumps | |
from collections import defaultdict | |
from markdown import markdown | |
from lxml import etree | |
from time import sleep | |
html = False | |
url = "https://www.reddit.com/search.json" | |
query = {"q": "subreddit:talesfromtechsupport author:lawtechie", "sort": "new", "limit": "1000"} | |
def get_results(): | |
try: | |
with open("lawtechie-search-20200609.json") as f: | |
return load(f) | |
except: | |
print("getting results") | |
data = get(url, params=query, headers={"User-Agent": "/u/lawtechie story directory by /u/suudo"}).json()["data"] | |
results = data["children"] | |
print("added {} results".format(data["dist"])) | |
while data["after"] is not None: | |
query["after"] = data["after"] | |
print("getting after: {}".format(data["after"])) | |
data = get(url, params=query, headers={"User-Agent": "/u/lawtechie story directory by /u/suudo"}).json()["data"] | |
results += data["children"] | |
print("added {} results".format(data["dist"])) | |
sleep(1) | |
with open("lawtechie-search-20200609.json", "w") as f: | |
dump(results,f) | |
return results | |
def l(i, alt=None): | |
global html | |
if html: | |
return "<a href='http://redd.it/{}'>{}</a>".format(i, alt or i) | |
else: | |
return "http://redd.it/" + i | |
def parse_url(url): | |
if "reddit.com/r/talesfromtechsupport" in url: | |
return url.split("/comments/",1)[1].split("/",1)[0] | |
elif "redd.it/" in url: | |
return url.split("redd.it/",1)[1][:6] | |
def main(html_param=False): | |
global html | |
html = html_param | |
results = get_results() | |
stories = {story["data"]["id"]: story["data"] for story in results} | |
story_links = defaultdict(list) | |
for story in results: | |
story = story["data"] | |
mkdn = "<body>" + markdown(story["selftext"]) + "</body>" | |
doc = etree.fromstring(mkdn) | |
for link in doc.xpath("//a"): | |
dest_id = parse_url(link.get("href")) | |
if not dest_id: | |
continue | |
dest_title = stories.get(dest_id, {}).get("title", "UNKNOWN") | |
story_links[l(dest_id, alt=dest_title)].append(l(story["id"], alt=story["title"])) | |
for s,links in story_links.items(): | |
if html: | |
print("<dt>\n {}\n</dt><dd><ul>\n{}\n</ul></dd>".format(s, "\n".join(" <li>{}</li>".format(link) for link in links)), end="") | |
else: | |
print("{}\n {}".format(s, "\n ".join(links))) | |
if html: | |
print() | |
return 0 | |
if __name__ == "__main__": | |
from sys import exit, argv | |
exit(main(html_param="--html" in argv)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment