Created
July 11, 2018 06:40
-
-
Save dhavalsavalia/2d4b32c1d8d6aa1255995a252dc40737 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python | |
# -*- coding: utf-8 -*- | |
""" | |
themify_downloader.py | |
Copyright (C) 2018 Dhaval Savalia <[email protected]> | |
All rights reserved. | |
""" | |
""" | |
NOTICE: | |
All content that you download through this script is owned by Themify.me (https://themify.me) | |
and you need to have active membership on their website. | |
This script is developed ONLY for educational and NON-commercial purposes only. | |
I DO NOT take any responsibility if you use this script to something illegal. | |
""" | |
""" | |
Themify.me (https://themify.me) | |
All Themify products are licensed under the GNU General Public License. | |
You may modify and use the themes on unlimited number of websites. | |
""" | |
import requests | |
from bs4 import BeautifulSoup | |
import sys | |
import getpass | |
url = 'https://themify.me/member/member' | |
def create_session(url, username, password): | |
""" | |
Create session at Themify with cookie. | |
@param string url | |
@param string username | |
@param string password | |
@return object | |
""" | |
## Start session | |
s = requests.Session() | |
## Make a request to url to generate a fresh cookie for us | |
s.get(url) | |
## Send POST data payload containing username and password | |
s.post(url, data={'amember_login': username, 'amember_pass': password}) | |
## Return `s` object that is authenticated at Themify.me | |
return s | |
## Get username and password from user | |
username = input("Please enter you themify username: ") | |
password = getpass.getpass("Please enter your themify password: ") | |
## Created the session | |
current_session = create_session(url, username, password) | |
## Get the HTTP page | |
page = current_session.get(url) | |
## Turn that page into bs4 object | |
soup = BeautifulSoup(page.content, 'html.parser') | |
## Get all `a` tags | |
all_li = soup.find_all('a') | |
## This is where all the links will be stored | |
all_links = [] | |
## Ask user if they want text file containing all the links | |
store_in_txt = input("Do you want to store all the links in txt(y/n)? ") | |
## If yes, open/create new `links.txt` file | |
if (store_in_txt == 'y') or (store_in_txt == 'Y'): | |
f = open('links.txt', 'w+') | |
## Append link containing only `zip` in them, so we get what we want | |
for new_link in all_li: | |
if 'zip' in new_link['href']: | |
all_links.append(new_link['href']) | |
if (store_in_txt == 'y') or (store_in_txt == 'Y'): | |
f.write('{}\n'.format(new_link['href'])) | |
## Kindly close the file | |
if (store_in_txt == 'y') or (store_in_txt == 'Y'): | |
f.close() | |
## Finally download files! | |
for link in all_links: | |
r = current_session.get(link) | |
open('{}'.format(link.split('/')[-1]), 'wb').write(r.content) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment