Skip to content

Instantly share code, notes, and snippets.

@mrnugget
Created January 30, 2025 10:04
Show Gist options
  • Save mrnugget/e28471a9ba1402e16fb7150d6c4e1129 to your computer and use it in GitHub Desktop.
Save mrnugget/e28471a9ba1402e16fb7150d6c4e1129 to your computer and use it in GitHub Desktop.
Tools generated by Claude
import xml.etree.ElementTree as ET
import requests
from datetime import datetime
from typing import Optional
# Fetch the RSS feed
url = "https://registerspill.thorstenball.com/feed"
response = requests.get(url)
rss_content = response.content
# Parse the XML
root = ET.fromstring(rss_content)
# Extract entries
entries = []
for item in root.findall(".//item"):
title_elem = item.find("title")
link_elem = item.find("link")
pubDate_elem = item.find("pubDate")
title: Optional[str] = title_elem.text if title_elem is not None else None
link: Optional[str] = link_elem.text if link_elem is not None else None
pub_date_str: Optional[str] = pubDate_elem.text if pubDate_elem is not None else None
if title is None or link is None or pub_date_str is None:
continue # Skip this entry if any required field is missing
try:
pub_date = datetime.strptime(pub_date_str, "%a, %d %b %Y %H:%M:%S %Z")
except ValueError:
continue # Skip this entry if the date format is incorrect
entries.append((title, link, pub_date))
# Sort entries by date (newest first)
entries.sort(key=lambda x: x[2], reverse=True)
# Limit to the last 10 entries
entries = entries[:10]
# Generate Markdown content
markdown_template = """---
layout: default
title: Register Spill
---
# Register Spill
Since April 2023 I've been writing a weekly newsletter on Substack called <a href="https://registerspill.thorstenball.com/">Register Spill</a>.
It has thousands of subscribers now.
Here are the last 10 posts I've published:
<table class="posts">
{entries}
</table>
You can subscribe and find the full archive at <a href="https://registerspill.thorstenball.com/">registerspill.thorstenball.com</a>.
"""
entry_template = ' <tr>\n <td>{date}</td>\n <td><a href="{link}">{title}</a></td>\n </tr>'
entries_md = "\n".join(
entry_template.format(
date=pub_date.strftime("%d %b %Y"),
link=link,
title=title
) for title, link, pub_date in entries
)
# Write the Markdown file
with open("register-spill.md", "w", encoding="utf-8") as f:
f.write(markdown_template.format(entries=entries_md))
print("register-spill.md has been generated successfully.")
import os
import requests
import hashlib
from urllib.parse import quote
API_KEY = os.environ['GOOGLE_DRIVE_API_KEY']
FOLDER_ID = '1zoTiU9_lYN67cLzicHPNrpLdR2vPUrZs'
IMAGE_DIR = 'images/funny_tweets'
MARKDOWN_FILE = 'funny_tweets.md'
def get_file_list():
url = "https://www.googleapis.com/drive/v3/files"
params = {
'key': API_KEY,
'q': f"'{FOLDER_ID}' in parents and mimeType contains 'image/'",
'fields': "files(id, name, md5Checksum, webContentLink)",
'pageSize': 1000
}
try:
response = requests.get(url, params=params)
response.raise_for_status()
return response.json().get('files', [])
except requests.exceptions.RequestException as e:
print(f"Error fetching file list: {e}")
return []
def download_file(file_id, file_path):
# Direct download URL format
download_url = f"https://drive.google.com/uc?export=download&id={file_id}"
try:
response = requests.get(download_url)
response.raise_for_status()
with open(file_path, 'wb') as f:
f.write(response.content)
except requests.exceptions.RequestException as e:
print(f"Error downloading file {file_id}: {e}")
def get_local_files():
if not os.path.exists(IMAGE_DIR):
os.makedirs(IMAGE_DIR)
local_files = {}
for filename in os.listdir(IMAGE_DIR):
file_path = os.path.join(IMAGE_DIR, filename)
if os.path.isfile(file_path):
with open(file_path, 'rb') as f:
file_hash = hashlib.md5(f.read()).hexdigest()
local_files[filename] = file_hash
return local_files
def create_markdown_page(image_files):
header = """---
layout: default
title: Funny Tweets
---
# Funny Tweets
<style>
.tweet-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(300px, 1fr));
gap: 20px;
padding: 20px 0;
}
.tweet-grid img {
width: 100%;
height: auto;
border: 1px solid #ddd;
border-radius: 8px;
}
</style>
<p class="meta">Last updated: {{ site.time | date: "%Y-%m-%d" }}</p>
Over the years I've fallen in love with [Weird
Twitter](https://en.wikipedia.org/wiki/Weird_Twitter) and the very specific
humor found in some tweets. It sounds like hyperbole, but I truly think there's
an art to these exceptionally funny tweets.
Fearing that the tweets will be deleted and disappear from the Internet, I've
started to screenshot and save them. What you see here is a collection of some
of my favorite tweets.
<div class="tweet-grid">
"""
image_content = []
for image in sorted(image_files):
image_content.append(f'<img src="/images/funny_tweets/{quote(image)}" alt="Screenshot of a tweet" loading="lazy">')
footer = "</div>"
content = header + '\n'.join(image_content) + footer
with open(MARKDOWN_FILE, 'w') as f:
f.write(content)
def main():
print(f"Using API key: {API_KEY[:5]}...")
drive_files = get_file_list()
print(f"Found {len(drive_files)} files in Google Drive")
# Get list of local files and their hashes
local_files = get_local_files()
# Sync files
files_to_create_page = []
for file in drive_files:
local_path = os.path.join(IMAGE_DIR, file['name'])
# Check if file needs to be downloaded
needs_download = True
if file['name'] in local_files:
if file.get('md5Checksum') == local_files[file['name']]:
needs_download = False
if needs_download:
print(f"Downloading {file['name']}...")
download_file(file['id'], local_path)
else:
print(f"Skipping {file['name']} (already up to date)")
files_to_create_page.append(file['name'])
# Remove any local files that aren't in Drive
drive_filenames = [f['name'] for f in drive_files]
for local_file in local_files:
if local_file not in drive_filenames:
print(f"Deleting {local_file} (no longer in Drive)...")
os.remove(os.path.join(IMAGE_DIR, local_file))
# Create markdown page
create_markdown_page(files_to_create_page)
print("Markdown page created successfully")
if __name__ == '__main__':
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment