Created
October 26, 2021 16:24
-
-
Save phwelo/ca1491838f90295c502d5853b811818f to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3 | |
import requests | |
from bs4 import BeautifulSoup | |
import json | |
import sys | |
import transposer | |
import argparse | |
# transpose(source_chord, direction, to_key) | |
# ansi colors used: | |
class colors: | |
white = '\u001b[38;5;255;1m' | |
clear = '\u001b[0m' | |
magenta = '\u001b[0m\u001b[38;5;161;1m' | |
greenish = '\u001b[0m\u001b[38;5;101;1m[' | |
def find_content(tab_url): | |
r = requests.get(tab_url) | |
soup = BeautifulSoup(r.text, 'html.parser') | |
divs = soup.find_all(attrs={"class":"js-store"}) | |
if len(divs) == 1: | |
tab = json.loads(divs[0]['data-content']) | |
else: | |
print("error") # who knows what happened if this happens, something is wrong | |
sys.exit(1) | |
return tab["store"]["page"]["data"]["tab_view"]["wiki_tab"]["content"] | |
parser = argparse.ArgumentParser(description='Rip Off UltimateGuitarArchive\'s Content.') | |
parser.add_argument('--url', action='store', help='The UltimateGuitarArchive URL to the song chord/tab', required=True) | |
parser.add_argument('--term', | |
action='store_const', | |
default = 'unknown', | |
const = "term", | |
dest='output_format') | |
parser.add_argument('--html', | |
action='store_const', | |
const = "html", | |
dest='output_format') | |
parser.add_argument('--generate_site', | |
action='store_const', | |
default= False, | |
const = True, | |
dest='generate') | |
args = parser.parse_args() | |
tab_url = args.url | |
def tab_tags(doc_text): | |
return doc_text.replace( | |
'[tab]',colors.white | |
).replace( | |
'[/tab]',colors.clear | |
) | |
def ch_tags(doc_text): | |
return doc_text.replace( | |
'[ch]',colors.magenta | |
).replace( | |
'[/ch]', colors.clear + colors.white | |
) | |
def headers(doc_text): | |
lines = [] | |
# need to identify the lines that start with '[' | |
for line in doc_text.splitlines(): | |
if line.startswith('['): | |
lines.append( | |
line.replace( | |
'[', colors.greenish | |
) + colors.clear + "\n" | |
) | |
else: | |
lines.append(line + '\n') | |
return ''.join(lines) | |
def term_out(doc_text): | |
text = headers(tab_tags(ch_tags(doc_text))) | |
print(text) | |
sys.exit(0) | |
def html_out(doc_text): | |
print(doc_text) | |
sys.exit(0) | |
def plain_out(doc_text): | |
print(doc_text) | |
sys.exit(0) | |
def main(): | |
text_content = find_content(tab_url) | |
if args.output_format == "html": | |
html_out(text_content) | |
elif args.output_format == "term": | |
term_out(text_content) | |
elif args.generate == True: | |
print("generate the whole site") | |
else: | |
plain_out(text_content) | |
sys.exit(0) | |
tab = str(text_content) | |
print(tab) | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment