Last active
April 23, 2021 20:25
-
-
Save M-Mueller/340deb5f233f3db7d896f59e4aec2395 to your computer and use it in GitHub Desktop.
Convert recipes from vollcorner.de to Nextcloud Cookbook format
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import argparse | |
import re | |
import json | |
import os | |
import shutil | |
import requests | |
import datetime | |
from urllib.parse import urljoin | |
from urllib.request import urlopen | |
from bs4 import BeautifulSoup | |
from PIL import Image | |
def parse_duration(string: str): | |
""" | |
Converts a string to ISO 8601 duration. | |
>>> parse_duration('ca. 120 min') | |
'PT2H0M' | |
>>> parse_duration('45 Minuten') | |
'PT0H45M' | |
>>> parse_duration('ca 60 Minuten') | |
'PT1H0M' | |
>>> parse_duration('90 Minuten (inkl. Ruhezeit)') | |
'PT1H30M' | |
>>> parse_duration('etwa 30 Min.') | |
'PT0H30M' | |
>>> parse_duration('1 h') | |
'PT1H0M' | |
""" | |
match = re.match('(?:ca\.?|etwa)?\s*(\d+)\s*(?:minuten?|min)', string.lower().strip()) | |
if match: | |
minutes = int(match[1]) | |
return f'PT{minutes//60}H{minutes%60}M' | |
match = re.match('(?:ca\.?|etwa)?\s*(\d+)\s*(?:h)', string.lower().strip()) | |
if match: | |
hours = int(match[1]) | |
return f'PT{hours}H0M' | |
raise RuntimeError(f'Could not parse recipe duration: {string}') | |
def recipe_time(soup): | |
parent = soup.find('div', class_='rezept-time') | |
value = parent.find('span', class_='value') | |
return parse_duration(value.text) | |
def recipe_yields(soup): | |
parent = soup.find('div', class_='rezept-persons') | |
value = parent.find('span', class_='value') | |
return re.search(r'\d+', value.text).group() | |
def recipe_ingredients(soup): | |
table = soup.find('table', class_='ingredients-table') | |
rows = table.find_all('tr') | |
ingredients = [] | |
for row in rows: | |
amount = row.find('th').text.strip() | |
name = row.find('td').text.strip() | |
# A row might also be a heading for a section of ingredients (e.g. 'Für den Teig') | |
# in which case name is empty | |
if name: | |
ingredients.append((amount + " " + name).strip()) | |
return ingredients | |
def recipe_instructions(soup): | |
ordered_list = soup.find('ol', class_='preparation-list') | |
items = ordered_list.find_all('li') | |
instructions = [] | |
for item in items: | |
instruction = re.sub('^\d+\.', '', item.text, count=1) | |
instructions.append(instruction.strip()) | |
return instructions | |
def recipe_image(url, soup): | |
path = soup.find('div', class_='rezept-image').find('img')['src'] | |
return urljoin(url, path) | |
if __name__ == "__main__": | |
parser = argparse.ArgumentParser( | |
description='''Convert recipes from vollcorner.de to Nextcloud Cookbook format. | |
Each url results in a new folder inside the given output folder (or working directory if not specified). | |
These folders can be copied into the Recipes folder of Nextcloud and will be available in the | |
Cookbook without further interactions. | |
Example: | |
nc_cookbook_import.py -c 'Main dish' -k vollcorner vegetarian -- https://www.vollcorner.de/service/rezepte/spinatknoedel/ | |
''' | |
) | |
parser.add_argument( | |
'urls', metavar='url', nargs='+', type=str, | |
help='Recipe URL' | |
) | |
parser.add_argument( | |
'--output', '-o', default='', | |
help='Output folder, e.g. the "Recipes" folder in Nextcloud' | |
) | |
parser.add_argument( | |
'--category', '-c', nargs='?', default='Main dish', | |
help='The category of the recipe (default: Main dish)' | |
) | |
parser.add_argument( | |
'--keywords', '-k', nargs='*', default='', | |
help='Optional list of keywords for the recipe' | |
) | |
args = parser.parse_args() | |
for url in args.urls: | |
print('Parsing', url) | |
try: | |
response = requests.get(url) | |
except RuntimeError as e: | |
print("Could not download recipe from {}: {}", url, e); | |
soup = BeautifulSoup(response.text, 'html.parser') | |
name = soup.find(id="rezeptOuter").find("h1").text | |
image_url = recipe_image(url, soup) | |
recipe = { | |
"name": name, | |
"url": url, | |
"image": image_url, | |
"totalTime": recipe_time(soup), | |
"recipeCategory": args.category, | |
"keywords": ','.join(args.keywords), | |
"recipeYield": recipe_yields(soup), | |
"recipeIngredient": recipe_ingredients(soup), | |
"recipeInstructions": recipe_instructions(soup), | |
"@context": "http://schema.org", | |
"@type": "Recipe", | |
} | |
folder = os.path.join(args.output, name) | |
os.makedirs(folder, exist_ok=True) | |
with open(os.path.join(folder, 'recipe.json'), 'w') as f: | |
f.write(json.dumps(recipe)) | |
image = Image.open(urlopen(image_url)) | |
image.save(os.path.join(folder, 'full.jpg')) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment