To get it working run this command for dependencies
pip install requests beautifulsoup4 lxml
To get it working run this command for dependencies
pip install requests beautifulsoup4 lxml
| import requests | |
| from bs4 import BeautifulSoup | |
| # URL of the page you want to fetch (shortcut page) | |
| url = 'https://routinehub.co/shortcut/15011/' | |
| # Headers to mimic a browser request | |
| headers = { | |
| 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36' | |
| } | |
| # Send a GET request to the URL with custom headers | |
| response = requests.get(url, headers=headers) | |
| # Check if the request was successful (HTTP status code 200) | |
| if response.status_code == 200: | |
| # Parse the page content with BeautifulSoup using 'lxml' as the parser | |
| soup = BeautifulSoup(response.text, 'lxml') | |
| # Use the CSS selector to find the element with id 'count' | |
| count_div = soup.select_one('#count') | |
| if count_div: | |
| print("Count:", count_div.text.strip()) # Print the text content of the div | |
| else: | |
| print("Couldn't find the count section.") | |
| else: | |
| print(f"Failed to retrieve page. Status code: {response.status_code}") |
| import requests | |
| from bs4 import BeautifulSoup | |
| # Get the username dynamically from user input | |
| username = "prgrmr" | |
| # URL of the page with dynamic username | |
| url = f'https://routinehub.co/user/{username}' | |
| # Headers to mimic a browser request | |
| headers = { | |
| 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36' | |
| } | |
| # Send a GET request to the URL with custom headers | |
| response = requests.get(url, headers=headers) | |
| # Check if the request was successful (HTTP status code 200) | |
| if response.status_code == 200: | |
| # Parse the page content with BeautifulSoup using 'lxml' as the parser | |
| soup = BeautifulSoup(response.text, 'lxml') | |
| # Use the CSS selector to find the element | |
| stats_div = soup.select_one('#content > div > div > div.column.sidebar.is-2 > div.stats') | |
| if stats_div: | |
| print(stats_div.text.strip()) # Print the text content of the div | |
| else: | |
| print("Couldn't find the stats section.") | |
| else: | |
| print(f"Failed to retrieve page. Status code: {response.status_code}") |