Created
April 17, 2011 02:38
-
-
Save reduxdj/923696 to your computer and use it in GitHub Desktop.
Scrapes a Craisglist posting and returns an object with what it can find
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" | |
scrape_craigslist.py | |
requires libxml and BeautifulSoup | |
Created by Patrick Lemiuex on 2011-04-12. | |
MIT Open Source License | |
Copyright (c) 2009-2010 the original author or authors | |
Permission is hereby granted, free of charge, to any person obtaining a | |
copy of this software and associated documentation files (the "Software"), | |
to deal in the Software without restriction, including without limitation the | |
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
copies of the Software, and to permit persons to whom the Software is furnished | |
to do so, subject to the following conditions: | |
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. | |
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE | |
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR | |
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
""" | |
import htmlentitydefs | |
import sys | |
import os | |
from BeautifulSoup import BeautifulSoup | |
import urllib2 | |
import re | |
from decimal import * | |
from BeautifulSoup import UnicodeDammit | |
def remove_html_tags(data): | |
#a = ''.join(BeautifulSoup(data).findAll(text=True)) | |
#a = a.encode('ascii', 'ignore') | |
#return a | |
#cleaner.clean_html(body).text_content().encode('ascii', 'ignore') | |
p = re.compile(r'<.*?>') | |
a = p.sub('', data) | |
p = re.compile(r'^[ \s]+') | |
a= p.sub('', a) | |
return a.encode('ascii', 'ignore') | |
def remove_extra_spaces(data): | |
p = re.compile(r'\s+') | |
return p.sub(' ', data) | |
def find_state_by_city_name(city): | |
regions = [ | |
('new york', 'NY'), | |
('austin','TX'), | |
('chicago', 'IL'), | |
('SF bay area','CA'), | |
('los angeles','CA'), | |
('seattle','WA'), | |
('portland','OR'), | |
] | |
for region in regions: | |
l = list(region) | |
if city==str(l[0]): | |
return l[1] | |
def convert_html_entities(s): | |
s = re.sub('([»]+|nbsp)', '', s) | |
s = re.sub('\|+', ',', s) | |
matches = re.findall("&#\d+;", s) | |
if len(matches) > 0: | |
hits = set(matches) | |
for hit in hits: | |
name = hit[2:-1] | |
try: | |
entnum = int(name) | |
s = s.replace(hit, unichr(entnum)) | |
except ValueError: | |
pass | |
matches = re.findall("&\w+;", s) | |
hits = set(matches) | |
amp = "&" | |
if amp in hits: | |
hits.remove(amp) | |
for hit in hits: | |
name = hit[1:-1] | |
if htmlentitydefs.name2codepoint.has_key(name): | |
s = s.replace(hit, unichr(htmlentitydefs.name2codepoint[name])) | |
s = s.replace(amp, "&") | |
return s | |
def parse_listing(contents): | |
#uses beautiful soup to parse listings... | |
soup = BeautifulSoup(''.join(contents)) | |
o = {} | |
#get the title | |
o['title'] = '' | |
heading = soup.find('h2') | |
#o['images'] = soup.findAll('img') | |
heading = heading.text | |
#find price | |
m = re.search('\d+', heading) | |
price = 0.00 | |
if m!=None: | |
price = m.group(0) | |
o['price'] = price | |
o['title'] = heading | |
o['title'] = o['title'].replace('$%s / ' % price,'') | |
o['title'] = o['title'].replace('(map)','') | |
#get city if in title | |
o['city'] = '' | |
m = re.search('\((.*?)\)',str(o['title'])) | |
if m !=None: | |
o['city'] = m.group(1) | |
o['title'] = o['title'].replace('(%s)' % o['city'],'') | |
o['title'] = convert_html_entities(o['title']) | |
o['title'] = o['title'].replace('*','') | |
#Find bedrooms from the title | |
o['rooms'] = 0 | |
o['bedrooms'] = 0 | |
m = re.search('[br(\d+)]',str(o['title'])) | |
if m!=None: | |
o['rooms'] = m.group(0) | |
o['bedrooms'] = m.group(0) | |
region = soup.find('div', { "class" : "bchead" }) | |
o['state'] = find_state_by_city_name(str(region)) | |
region = str(region('a')[1]).replace(' craigslist','').strip() | |
#strip html tags | |
m = re.search('(?<=\>)[\w ]+',str(region)) | |
o['region'] = m.group(0) | |
o['state'] = find_state_by_city_name(str(o['region'])) | |
#find blurbs with extra info | |
blurbs = soup.find('ul', { "class" : "blurbs" }) | |
body_str = '' | |
body = soup.find('div', { "id" : "userbody" }) | |
try: | |
body_str = str(body).encode("utf-8") | |
except: | |
body_str = decode_html(body_str) | |
li = soup.find('ul') | |
str_li = str(li) | |
#find fee | |
m = re.search('Fee Disclosure:',str(blurbs)) | |
o['fee'] = False | |
if m!=None: | |
o['fee'] = True | |
# find pets | |
o['pets'] = False | |
m = re.search('(cats are OK|dogs are OK)',str(blurbs)) | |
if m !=None: | |
o['pets'] = True | |
#find city if empty in title | |
if (o['city']==''): | |
m = re.search('(?<=Location: )[\w\- ]+',str_li) | |
o['city'] = "" | |
if m !=None: | |
o['city'] = str(m.group(0)) | |
#find contact | |
m = re.search('(?<=Listed By: )[\w\- ]+',str_li) | |
o['contact_name'] = "" | |
if m !=None: | |
o['contact_name'] = m.group(0) | |
#clean up description | |
try: | |
o['description'] = convert_html_entities(remove_extra_spaces(remove_html_tags(body_str))).encode("utf-8") | |
except: | |
o['description'] = body.text | |
#remove unicode | |
m = re.findall('(\\xc2|\\xbb|\\xc2|\\xa0|)',str(o['description'])) #remove these types of characters | |
if m!=None: | |
descrip = o['description'] | |
for match in m: | |
decrip = o['description'].replace(match,'') | |
#remove astericks | |
descrip = descrip.replace('*','') | |
#remove uppercase words | |
m = re.findall('[A-Z]{2}\w+',descrip) #remove these types of characters | |
if m!=None: | |
for match in m: | |
decrip = str(descrip.replace(match,match)).lower | |
#remove duplicate spaces - replace with single space | |
descrip = re.sub('(\t+|\s{2,})+', ' ', descrip) | |
o['description'] = descrip | |
#Find bathrooms from description like 2 Baths | |
m = re.search('(\d+\s)](B|b)aths?',o['description']) | |
if m!=None: | |
print m[0] | |
#find phone number | |
m = re.search('(\d{3})\W*(\d{3})\W*(\d{4})\W*(\d*)',o['description']) | |
o['contact_phone_1'] = "" | |
if m !=None: | |
o['contact_phone_1'] = m.group(0) | |
if len(o['contact_phone_1'])>14: | |
o['contact_phone_1'] = o['contact_phone_1'][:15] | |
#get email | |
m = re.search('[a-zA-Z0-9._%-+]+@[a-zA-Z0-9._%-]+.(com|edu|info|net|org)',body_str) | |
o['contact_email'] = "" | |
if m !=None: | |
email = str(m.group(0)) | |
o['contact_email'] = email | |
return o | |
def decode_html(html_string): | |
converted = UnicodeDammit(html_string, isHTML=True) | |
if not converted.unicode: | |
return html_string | |
return converted.unicode | |
def test(url): | |
req = urllib2.Request(url, None, {}) | |
contents = urllib2.urlopen(url).read() | |
try: | |
contents = contents.decode('utf-8') | |
except: | |
try: | |
contents = decode_html(contents) | |
except: | |
pass | |
soup = BeautifulSoup(''.join(contents)) | |
o = parse_listing(contents) | |
#print o['description'] | |
#test('http://newyork.craigslist.org/brk/abo/2330895495.html') | |
#print find_state_by_city_name('new york') |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment