Skip to content

Instantly share code, notes, and snippets.

@sash13
Created June 26, 2012 21:22
Show Gist options
  • Select an option

  • Save sash13/2999170 to your computer and use it in GitHub Desktop.

Select an option

Save sash13/2999170 to your computer and use it in GitHub Desktop.
from urllib2 import (OpenerDirector,HTTPRedirectHandler, UnknownHandler, HTTPHandler,
HTTPDefaultErrorHandler, HTTPErrorProcessor, HTTPCookieProcessor, HTTPError, HTTPSHandler )
import lxml.html
import urllib, urllib2, cookielib, os, time, MultipartPostHandler
import poster
cook = '.cook'
email_my = ''
pass_my = ''
use_a = 'Mozilla/5.0 (Linux; U; Android 2.2; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1'
def input_parse(base):
input_list = {}
htmltree = lxml.html.fromstring(base)
for input_q in htmltree.xpath('//input'):
try:
input_list[input_q.attrib['name']]=input_q.attrib['value']
except:
pass
return input_list
opener = OpenerDirector()
h_classes = [UnknownHandler, HTTPHandler,
HTTPDefaultErrorHandler, HTTPErrorProcessor, HTTPSHandler, HTTPRedirectHandler]
cj = cookielib.LWPCookieJar()
if os.path.isfile(cook):
cj.load(cook)
cookieprocessor = HTTPCookieProcessor(cj)
opener.add_handler(cookieprocessor)
'''opener.addheaders = [('User-agent', use_a)]'''
for klass in h_classes:
opener.add_handler(klass())
def url_get_p(url,data):
time.sleep(2)
try:
res = opener.open(url,data)
except HTTPError, e:
print e.headers
print 'Cookies', list(cookieprocessor.cookiejar)
return res.read(), res.geturl()
def url_get(url):
time.sleep(2)
try:
res = opener.open(url)
except HTTPError, e:
print e.headers
print 'Cookies', list(cookieprocessor.cookiejar)
return res.read()
content = url_get('https://m.google.com/app/plus/')
print content
input_list = input_parse(content)
print input_list
data = urllib.urlencode(
{'continue': input_list['continue'], 'followup': input_list['followup'], 'service': input_list['service'],
'dsh': input_list['dsh'], 'btmpl':input_list['btmpl'], 'GALX':input_list['GALX'], 'timeStmp':input_list['timeStmp'],
'secTok': input_list['secTok'], 'Email':email_my, 'Passwd': pass_my, 'signIn':input_list['signIn']})
opener.add_header = ('Referer', 'https://m.google.com/app/plus/')
content, url = url_get_p('https://accounts.google.com/ServiceLoginAuth',data)
print url
opener.add_header = ('Referer', url)
htmltree = lxml.html.fromstring(content[38:])
pref=url.split('?')[0]
url = htmltree.xpath('//tr/td[2]/a[2]/@href')[0]
print pref+url
content = url_get(pref+url)
print content
'''photo = lxml.html.fromstring(content).xpath('//*[@method="post"]/@action')[0]
print photo
input_list = input_parse(content)
data = urllib.urlencode(
{'ie':input_list['ie'], 'at':input_list['at'], 'newcontent':'', 'editattachedphotos':input_list['editattachedphotos']})
content, url = url_get_p(pref+photo,data)
print content, ' ', url
'''
'''opener_upload = OpenerDirector()
for klass in h_classes:
opener_upload.add_handler(klass())
cookieprocessor = HTTPCookieProcessor(cj)
opener_upload.add_handler(cookieprocessor)
opener_upload.add_handler(MultipartPostHandler.MultipartPostHandler)'''
'''opener = poster.streaminghttp.register_openers()
opener.add_handler(urllib2.HTTPCookieProcessor(cj))
opener.add_header = ('Referer', url)
'''
'''params = {'file': open("test.txt", "rb"), 'name': 'upload test'}
'''
'''opener_upload = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj), MultipartPostHandler.MultipartPostHandler)'''
'''upload_url = 'https://plus.google.com'+lxml.html.fromstring(content).xpath('//*[@method="post"]/@action')[0]
opener.add_handler(MultipartPostHandler.MultipartPostHandler())
print upload_url
input_list = input_parse(content)
opener_upload = poster.streaminghttp.register_openers()
opener_upload.add_handler(urllib2.HTTPCookieProcessor(cj))
params = {'ie':input_list['ie'], 'at':input_list['at'],'Photo': open('zipit.jpg', 'rb'), 'uploadphoto':input_list['uploadphoto']}
datagen, headers = poster.encode.multipart_encode(params)
request = urllib2.Request(upload_url, datagen, headers)
result = urllib2.urlopen(request)
content = result.read()
'''
'''pup=opener_upload.open(upload_url, data)
content = pup.read()'''
'''print content'''
post_url = pref+lxml.html.fromstring(content).xpath('//*[@method="post"]/@action')[0]
input_list = input_parse(content)
data = urllib.urlencode({'ie':input_list['ie'], 'at':input_list['at'], 'post':input_list['post'].encode('utf-8'), 'newcontent':'cli'})
'''pphid':input_list['pphid']}'''
content, url = url_get_p(post_url,data)
print content, ' ', url
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment