Skip to content

Instantly share code, notes, and snippets.

@ianrenton
Last active January 25, 2025 15:56
Show Gist options
  • Save ianrenton/6c25e7faeecd8303095915c5b058975e to your computer and use it in GitHub Desktop.
Save ianrenton/6c25e7faeecd8303095915c5b058975e to your computer and use it in GitHub Desktop.
Script to find your fedi mutuals' websites
# Script to find your fedi mutuals' websites.
# In the spirit of nerd fedi's desire to bring back self-owned websites, blogs and RSS feeds, this scripts finds all
# your fedi mutuals, and grabs the URLs from their custom fields. By default it only returns "verified" ones on the
# assumption that these are probably personal sites not other randomly-linked things, but you can turn that off if you
# like. Definitely works on Mastodon, not tested on anything else, sorry.
# Ian Renton, January 2025
# Public Domain software
import re
import requests
USERNAME='ian'
INSTANCE='mastodon.radio'
ONLY_VERIFIED=True
NEXT_LINK_REGEX=re.compile(r'<(.+?)>; rel="next"')
# Get user ID
acct_lookup_response = requests.get('https://' + INSTANCE + '/api/v1/accounts/lookup?acct=' + USERNAME)
my_id = acct_lookup_response.json()['id']
# Get followers and following. There is a maximum limit per request so we need to go through
# pagination hassle.
followers = []
page = 1
next_followers_url = 'https://' + INSTANCE + '/api/v1/accounts/' + my_id + '/followers'
while True:
print("Getting followers page " + str(page) + "...")
followers_response = requests.get(next_followers_url)
followers.extend(followers_response.json())
link_text = followers_response.headers['link']
link_match = NEXT_LINK_REGEX.search(link_text)
if link_match:
next_followers_url = link_match.group(1)
page += 1
else:
break
print("Found " + str(len(followers)) + " followers.")
following = []
page = 1
next_following_url = 'https://' + INSTANCE + '/api/v1/accounts/' + my_id + '/following'
while True:
print("Getting following page " + str(page) + "...")
following_response = requests.get(next_following_url)
following.extend(following_response.json())
link_text = following_response.headers['link']
link_match = NEXT_LINK_REGEX.search(link_text)
if link_match:
next_following_url = link_match.group(1)
page += 1
else:
break
print("Found " + str(len(following)) + " following.")
# Find mutuals
following_accts = list(map(lambda a: a['acct'], following))
mutuals = list(filter(lambda a: a['acct'] in following_accts, followers))
print("Found " + str(len(mutuals)) + " mutuals.")
# Print a list of things that look like URLs from your mutuals' fields
print('\nWebsites found in your fedi mutuals:')
for acct in mutuals:
firstForAccount = True
for field in acct['fields']:
match = re.search("(?P<url>https?://\S+)", field['value'])
if match and (field['verified_at'] is not None or not ONLY_VERIFIED):
url = match.group("url").rstrip('\"')
if firstForAccount:
username = acct['acct']
if "@" not in username:
username = username + '@' + INSTANCE
print(username)
firstForAccount = False
print(' ' + url)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment