Skip to content

Instantly share code, notes, and snippets.

@rjp
Created May 13, 2014 08:26
Show Gist options
  • Save rjp/294b1184eade4f898ba1 to your computer and use it in GitHub Desktop.
Save rjp/294b1184eade4f898ba1 to your computer and use it in GitHub Desktop.
Bufferme
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# Coded poorly by Brett Kelly
# http://nerdgap.com
# @inkedmn
# -*- coding: utf-8 -*-
import urllib2
import urllib
import re
import clipboard
import urlparse
import notification
import webbrowser
import requests
from bs4 import BeautifulSoup
import sys
import traceback
# Sent to Buffer between title and url
post_sep = ""
def de_utmise(a):
b = urlparse.urlparse(a)
c = urlparse.parse_qs(b.query)
non_utm = [
c.pop(i, None) for i in c.keys()
if re.match(r'^utm', i)
]
d = urllib.urlencode(c)
x = urlparse.urlunparse([b.scheme, b.netloc, b.path, b.params, d, b.fragment])
return x
def makeBufferUrl(url, title):
"""Cobble together the Buffer URL containing our datums"""
# em dashes avoid escaping in the quote() call, so
# maybe we just get rid of them.
if '—' in title:
title = title.replace('—', '—')
title = title.replace('»', '—')
if title is None:
title = "[no title]"
title += post_sep
nt = "\n\n« %s »\n" % (title)
out = "bufferapp://"
# UTF-8 encode and escape the page title
t = urllib.quote(nt.encode('utf-8'), safe='')
out += "?t=%s" % t
# Escape the URL
u = urllib.quote(url, safe='')
out += "&u=%s" % u
return out
# Grab url (or whatever) from the clipboard
u = clipboard.get()
# this won't throw an exception ever, apparently:
# http://docs.python.org/2/library/urlparse.html
parts = urlparse.urlparse(u)
if parts.scheme not in ['http', 'https']:
# Not the droid we're looking for
# Notify the user and exit.
notification.schedule("Clipboard is not a URL")
print u
print
raise SystemExit
else:
# Yes, yes, I know. Parsing HTML with regexes is stupid.
# It's ok because I'm not a real programmer.
p = re.compile("<title>(.*)</title>", re.IGNORECASE|re.DOTALL)
try:
# open and read the web page
u = de_utmise(u)
r = requests.get(u)
h = r.text
try:
soup = BeautifulSoup(h)
t = soup.title
if t is None:
t = "<no title>"
else:
t = t.string
print t
except Exception, e:
t = "(no title somehow)"
# r = urllib2.urlopen(clipboard.get())
# h = r.read()
# find and extract the title
# t = p.search(h).group(1)
# construct the buffer URL
burl = makeBufferUrl(u, t.strip())
# open the Buffer URL (opens the Buffer app)
webbrowser.open(burl)
except Exception, e:
# Something pooped the bed opening or reading the URL
# This could either print this generic message
# or just spit out whatever was on the clipboard.
print u
print e
print '[Title Unavailable]'
t_, v_, tb_ = sys.exc_info()
traceback.format_exc(tb_)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment