Created
May 4, 2012 12:39
-
-
Save sash13/2594580 to your computer and use it in GitHub Desktop.
Скачивать ништяки мангу с сайтов ня
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import urllib | |
| import urllib2 | |
| import os | |
| user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)' | |
| headers = { 'User-Agent' : user_agent } | |
| def getImages(urls, dirName): | |
| try: | |
| os.makedirs(dirName) | |
| os.makedirs(dirName+'/error/') | |
| except OSError: | |
| pass | |
| for url in urls: | |
| if os.path.isfile(dirName+'/'+url.split("/")[-1]) == False: | |
| req = urllib2.Request(url, None, headers) | |
| uri =url.split("/")[-1] | |
| try: | |
| file_l = urllib2.urlopen(req) | |
| file = file_l.read() | |
| f=open(dirName+'/'+uri, 'wb') | |
| f.write(file) | |
| f.close() | |
| print(url+ ' Compleated') | |
| except urllib2.URLError as e: | |
| error=open(dirName+'/error/'+uri, 'wb') | |
| error.write(url) | |
| error.close() | |
| else: | |
| print url+' Allready yet' | |
| def getLinks(content): | |
| start = content.find('var pictures') | |
| images = content[start+15 : start+content[start:].find(']')+1] | |
| links = [] | |
| sta=0 | |
| while sta !=-1: | |
| sta=images[1:].find('http') | |
| new= images[sta+1:] | |
| if sta == -1: | |
| break | |
| end=new.find('"') | |
| print new[:end] | |
| links.append(new[:end]) | |
| images=new | |
| return links | |
| def getUrlsPage(url): | |
| f =urllib.urlopen(url) | |
| h = f.read() | |
| nexLinkStart = h.find('var nextChapterLink')+23 | |
| nextLink = h[nexLinkStart:h[nexLinkStart:].find('?')+nexLinkStart] | |
| NextDirName = '-'.join(nextLink.split('/')) | |
| links = getLinks(h) | |
| dirName = '-'.join(url[21:].split('/')) | |
| getImages(links, dirName) | |
| url = 'http://adultmanga.ru'+nextLink | |
| i=1 | |
| while i: | |
| f =urllib.urlopen(url) | |
| h = f.read() | |
| nexLinkStart = h.find('var nextChapterLink')+23 | |
| nextLink = h[nexLinkStart:h[nexLinkStart:].find('?')+nexLinkStart] | |
| if nextLink.split('/')[-1] == 'i_am_a_hero': | |
| i=0 | |
| NextDirName = '-'.join(nextLink.split('/')) | |
| links = getLinks(h) | |
| dirName = '-'.join(url[21:].split('/')) | |
| getImages(links, dirName) | |
| url = 'http://adultmanga.ru'+nextLink | |
| startLink = 'http://adultmanga.ru/i_am_a_hero/vol5/50' | |
| getUrlsPage(startLink) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment