Last active
December 15, 2015 06:09
-
-
Save swaroopsm/5214249 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Copyright (c) 2013 Swaroop SM <[email protected]> | |
# This program is free: you can redistribute it and/or modify | |
# it under the terms of the GNU General Public License as published by | |
# the Free Software Foundation, either version 3 of the License, or | |
# (at your option) any later version. | |
# This program is distributed in the hope that it will be useful, | |
# but WITHOUT ANY WARRANTY; without even the implied warranty of | |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
# GNU General Public License for more details. | |
# <http://www.gnu.org/licenses/>. | |
###################################################################################################################### | |
# Procedure to run the programs. # | |
# This python script is a demo for the paper presentation that was prepared for | |
# CMRIT, Bangalore. # # | |
# Make sure BeautifulSoup is installed. # | |
# $ python cultura.py gaming # | |
# # | |
# Eg.: # | |
# If the page that you want to scrape is named as tech-quiz.html then: # | |
# $ python cultura.py tech-quiz # | |
###################################################################################################################### | |
#!/usr/bin/python | |
import urllib2 | |
from bs4 import BeautifulSoup | |
import sys | |
url = "http://cultura13.com/"+sys.argv[1]+".html" | |
response = urllib2.urlopen(url) | |
info = response.read() | |
soup = BeautifulSoup(info) | |
def get_coordinators(): | |
content = soup.findAll('div', attrs={"class": "content"})[3].findAll("li") | |
print "\nCo-Ordinators:\n" | |
for i in content: | |
print i.text | |
print "\n" | |
def get_venue_time(): | |
content = soup.findAll('div', attrs={"class": "content"})[1].findAll("p") | |
print "\nVenue:\n" | |
for i in content: | |
print i.text | |
print "\n" | |
get_venue_time() | |
get_coordinators() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment