Created
June 24, 2020 01:47
-
-
Save brucebentley/41533ca9204f61a1b86ac0fdc485a14e to your computer and use it in GitHub Desktop.
A script that downloads r/wallpaper's hottest 100 images and cycles through them as a wallpaper! Please Note:
All of the credentials are hidden. Refer to this post to set them up and get it working for yourself ( expected setup time: 5-10 mins ): https://www.storybench.org/how-to-scrape-reddit-with-python/
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import praw | |
| from bs4 import BeautifulSoup | |
| from urllib.request import Request, urlopen | |
| from urllib.parse import quote | |
| import requests | |
| import sys | |
| import os | |
| os.chdir("/Users/student/Desktop/Background_Images") | |
| print("Navigated to directory...") | |
| SCRIPT = "HIDDEN" | |
| SECRET = "HIDDEN" | |
| NUM_IMAGES = 100 | |
| reddit = praw.Reddit( | |
| client_id=SCRIPT, | |
| client_secret=SECRET, | |
| user_agent="ImageBot", | |
| username="LAcuber", | |
| password="HIDDEN", | |
| ) | |
| subreddit = reddit.subreddit("wallpaper") | |
| links = [] | |
| for submission in subreddit.hot(limit=NUM_IMAGES): | |
| links.append("https://reddit.com" + submission.permalink) | |
| images = [] | |
| for i in range(len(links)): | |
| sys.stdout.write(f"\rProcessing image {i+1} of {NUM_IMAGES}.") | |
| link_decoded = links[i][:21] + quote(links[i][21:]) | |
| req = Request(link_decoded, headers={"User-Agent": "Mozilla/5.0"}) | |
| html_page = urlopen(req) | |
| soup = BeautifulSoup(html_page, "lxml") | |
| for link in soup.findAll("a"): | |
| if ( | |
| "https://i.redd.it/" in str(link.get("href"))[0:18] | |
| and str(link.get("href")) not in images | |
| ): | |
| images.append(link.get("href")) | |
| for i in range(len(images)): # todo -> check image size, if under 200 KB | |
| sys.stdout.write(f"\rWriting image {i+1} of {len(images)}.") | |
| img_data = requests.get(images[i]).content | |
| with open(f"image_{i+1}.jpg", "wb") as handler: | |
| handler.write(img_data) | |
| if os.stat(f"image_{i+1}.jpg").st_size < 200000: # under 200 KB, too fuzzy | |
| os.remove(f"image_{i+1}.jpg") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment