Created
November 15, 2017 18:10
-
-
Save ubaid-qureshi/3e2740c516f269630d6860475809ea55 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import time | |
import urllib | |
import bs4 | |
import requests | |
start_url = "https://en.wikipedia.org/wiki/Special:Random" | |
target_url = "https://en.wikipedia.org/wiki/Philosophy" | |
def find_first_link(url): | |
response = requests.get(url) | |
html = response.text | |
soup = bs4.BeautifulSoup(html, "html.parser") | |
# This div contains the article's body | |
# (Oct 2017 Note: Body nested in two div tags) | |
content_div = soup.find(id="mw-content-text").find(class_="mw-parser-output") | |
# stores the first link found in the article, if the article contains no | |
# links this value will remain None | |
article_link = None | |
# Finding all the direct children of content_div that are paragraphs | |
for element in content_div.find_all("p", recursive=False): | |
# Finding the first anchor tag that's a direct child of a paragraph. | |
# It removes other link types since they aren't direct | |
# children though, they're in divs of various classes. | |
article_link = element.find("a", recursive=False).get('href') | |
break | |
if not article_link: | |
return | |
# a full url from the relative article_link url | |
first_link = urllib.parse.urljoin('https://en.wikipedia.org/', article_link) | |
return first_link | |
def continue_crawl(search_history, target_url, max_steps=25): | |
if search_history[-1] == target_url: | |
print("We've found the target article!") | |
return False | |
elif len(search_history) > max_steps: | |
print("The search has gone on suspiciously long, aborting search!") | |
return False | |
elif search_history[-1] in search_history[:-1]: | |
print("We've arrived at an article we've already seen, aborting search!") | |
return False | |
else: | |
return True | |
article_chain = [start_url] | |
while continue_crawl(article_chain, target_url): | |
print(article_chain[-1]) | |
first_link = find_first_link(article_chain[-1]) | |
if not first_link: | |
print("We've arrived at an article with no links, aborting search!") | |
break | |
article_chain.append(first_link) | |
time.sleep(2) # Slow things down so as to not hammer Wikipedia's servers |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment