Skip to content

Instantly share code, notes, and snippets.

@jimbob88
Created August 20, 2022 16:26
Show Gist options
  • Save jimbob88/941d01001450c4d1d38ff8ffbca47370 to your computer and use it in GitHub Desktop.
Save jimbob88/941d01001450c4d1d38ff8ffbca47370 to your computer and use it in GitHub Desktop.
wordproject.org Audiobook Downloader
import argparse
import urllib.parse
import zipfile
from pathlib import Path
import logging
import requests
from bs4 import BeautifulSoup
from clint.textui import progress
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
def html_from_url(url: str, user_agent: str = None) -> bytes:
if user_agent is None:
user_agent = (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/102.0.5005.63 Safari/537.36 "
)
headers = {"User-Agent": user_agent}
result = requests.get(url, headers)
return result.content
def soup_from_url(
url: str, user_agent: str = None, library: str = None
) -> BeautifulSoup:
if library is None:
library = "lxml"
html = html_from_url(url, user_agent)
return BeautifulSoup(html, library)
def tags_by_class(soup: BeautifulSoup, tag: str, class_: str):
return soup.find_all(tag, attrs={"class": class_})
def fancy_file_download(url: str, path: str, chunk_size: int = 1024):
r = requests.get(url, stream=True)
with open(path, "wb") as file:
total_length = int(r.headers.get("content-length"))
for chunk in progress.bar(
r.iter_content(chunk_size=chunk_size),
expected_size=(total_length / chunk_size) + 1,
):
if chunk:
file.write(chunk)
file.flush()
def download_book(book_name: str, book_url: str):
logging.info(f"Downloading {book_name}")
book_soup = soup_from_url(book_url)
zip_download = book_soup.find("a", attrs={"class": "dl-button"}, text="Zip")
zip_url = zip_download.attrs["href"]
fancy_file_download(zip_url, f"./{book_name}.zip")
def fancy_extract_zip(zip_path: str, destination_folder: str):
logging.info(f"Extracting {zip_path} to {destination_folder}")
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(destination_folder)
for member in progress.bar(zip_ref.infolist()):
try:
zip_ref.extract(member, destination_folder)
except zipfile.error as e:
logging.warning(
f"Failed to extract {member} (Skipping file), Error: {e}"
)
def get_args():
parser = argparse.ArgumentParser(
description="Arguments", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"-u",
"--url",
required=False,
action="store",
default="https://www.wordproject.org/bibles/audio/09_german/index.htm",
help="The homepage to download from",
)
parser.add_argument(
"-s",
"--skip_exists",
required=False,
action="store_true",
help="If a zip file with that name already exists, don't download it",
)
return parser.parse_args()
def main():
args = get_args()
soup = soup_from_url(args.url)
ul_tags = tags_by_class(soup, "ul", "nav nav-tabs nav-stacked list-audio")
for ul in ul_tags:
for li in ul.find_all_next("li"):
a_tag = li.find_next("a")
book_name = a_tag.text.encode("utf8").decode("utf8")
book_url = urllib.parse.urljoin(args.url, a_tag.attrs["href"])
if Path(f"./{book_name}.zip").exists() and args.skip_exists:
continue
download_book(book_name, book_url)
Path(f"./{book_name}").mkdir(exist_ok=True)
fancy_extract_zip(f"./{book_name}.zip", f"./{book_name}")
if __name__ == "__main__":
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment