Skip to content

Instantly share code, notes, and snippets.

@prgrmr-yn
Last active July 31, 2025 01:13
Show Gist options
  • Save prgrmr-yn/790cce9b4fc84d4dacf8b1351799bc95 to your computer and use it in GitHub Desktop.
Save prgrmr-yn/790cce9b4fc84d4dacf8b1351799bc95 to your computer and use it in GitHub Desktop.
This script extracts the number of shortcuts and total downloads from routinehub webpage for the user

To get it working run this command for dependencies

pip install requests beautifulsoup4 lxml

import requests
from bs4 import BeautifulSoup
# URL of the page you want to fetch (shortcut page)
url = 'https://routinehub.co/shortcut/15011/'
# Headers to mimic a browser request
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
# Send a GET request to the URL with custom headers
response = requests.get(url, headers=headers)
# Check if the request was successful (HTTP status code 200)
if response.status_code == 200:
# Parse the page content with BeautifulSoup using 'lxml' as the parser
soup = BeautifulSoup(response.text, 'lxml')
# Use the CSS selector to find the element with id 'count'
count_div = soup.select_one('#count')
if count_div:
print("Count:", count_div.text.strip()) # Print the text content of the div
else:
print("Couldn't find the count section.")
else:
print(f"Failed to retrieve page. Status code: {response.status_code}")
import requests
from bs4 import BeautifulSoup
# Get the username dynamically from user input
username = "prgrmr"
# URL of the page with dynamic username
url = f'https://routinehub.co/user/{username}'
# Headers to mimic a browser request
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
# Send a GET request to the URL with custom headers
response = requests.get(url, headers=headers)
# Check if the request was successful (HTTP status code 200)
if response.status_code == 200:
# Parse the page content with BeautifulSoup using 'lxml' as the parser
soup = BeautifulSoup(response.text, 'lxml')
# Use the CSS selector to find the element
stats_div = soup.select_one('#content > div > div > div.column.sidebar.is-2 > div.stats')
if stats_div:
print(stats_div.text.strip()) # Print the text content of the div
else:
print("Couldn't find the stats section.")
else:
print(f"Failed to retrieve page. Status code: {response.status_code}")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment