Last active
August 26, 2016 06:28
-
-
Save suriyadeepan/f2c4baeccb7a0f5eb74ab4b3ab716997 to your computer and use it in GitHub Desktop.
Scrap a wiki page to get the other wiki articles that are cited in it, using Beautiful Soup
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from bs4 import BeautifulSoup | |
import requests | |
url = 'https://en.wikipedia.org/wiki/Transhumanism' | |
# get contents from url | |
content = requests.get(url).content | |
# get soup | |
soup = BeautifulSoup(content,'lxml') # choose lxml parser | |
# find all the paragraph tags | |
p_tags = soup.findAll('p') | |
# gather all <a> tags | |
a_tags = [] | |
for p_tag in p_tags: | |
a_tags.extend(p_tag.findAll('a')) | |
# filter the list : remove invalid links | |
a_tags = [ a_tag for a_tag in a_tags if 'title' in a_tag.attrs and 'href' in a_tag.attrs ] | |
# print all links | |
for i,a_tag in enumerate(a_tags): | |
print('[{0}] {1} -> {2}'.format(i,a_tag.get('title'),a_tag.get('href'))) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment