import requests
r = requests.get("http://api.wunderground.com/api/<<wunderground-apikey>>/conditions/q/TX/Austin.json")
c = r.json()['current_observation']
location = c['display_location']
print("\n".join(["{}: {}".format(location['full'], c['weather']),
"{}".format(c['local_time_rfc822']),
"{} / Feels like {}".format(c['temperature_string'], c['feelslike_string'])]))
import requests
import textwrap
from bs4 import BeautifulSoup
def story(link):
r = requests.get("http://thin.npr.org{}".format(link)).text
story = BeautifulSoup(r, 'html.parser')
ps = []
author = False
wrapper = textwrap.TextWrapper(initial_indent=' ',
subsequent_indent=' ', width=80)
for p in story.find_all('p'):
# skip the header
if not author:
if p.get_text()[0:2] != "By":
continue
author = True
ps.append(wrapper.fill(p.get_text()))
return u"\n\n".join(ps)
headlines = requests.get("http://thin.npr.org").text
soup = BeautifulSoup(headlines, 'html.parser')
results = []
for link in soup.find_all('a'):
href = link.get('href')
if "s.php" in href:
result = u"*** {}\n\n{}\n".format(link.get_text(),
story(href))
results.append(result.encode('utf8'))
return "\n".join(results)
import feedparser
f = feedparser.parse("http://hosted2.ap.org/atom/APDEFAULT/3d281c11a96b4ad082fe88aa0db04305")
output = [u"*** Associated Press: {}".format(f.feed.title)]
for story in f.entries:
output.append(" - [[{}][{}]]".format(story.link, story.title))
return u'\n'.join(output)
import requests
import string
from bs4 import BeautifulSoup
r = requests.get("http://news.ycombinator.com")
soup = BeautifulSoup(r.text.encode('utf-8'), 'html.parser')
things = []
for l in soup.find_all('a', attrs={'class': 'storylink'}):
text = l.get_text()
href = l['href'].encode('utf-8')
text = text.encode('utf-8')
text = string.replace(text, '[', '(')
text = string.replace(text, ']', ')')
things.append("[[{}][{}]]".format(href, text))
return "\n".encode('utf-8').join(things)