Produce a complete, source-attributable dependency set for a GitHub repo with clear provenance, direct vs transitive labeling (where feasible), and predictable fallbacks.
- Detect ecosystem(s) from repo files
| WITH date_spine AS ( | |
| SELECT dt | |
| FROM UNNEST(GENERATE_DATE_ARRAY( | |
| '2025-08-01', -- START DATE | |
| '2025-10-01', -- END DATE | |
| INTERVAL 1 DAY | |
| )) AS dt | |
| ), | |
| tx AS ( | |
| SELECT DISTINCT dt, 'transactions' AS table_name |
| from pyoso import Client | |
| client = Client(YOUR_OSO_API_KEY) | |
| target_collection = "your-collection-name" | |
| start_date, end_date = "2025-01-01", "2025-07-31" | |
| query = f""" | |
| WITH target_artifacts AS ( | |
| SELECT DISTINCT artifact_id |
| # python sbom.py --analyze-test-repos | |
| import requests | |
| from typing import List, Dict, Tuple | |
| import os | |
| from datetime import datetime | |
| GITHUB_TOKEN = os.getenv('GITHUB_TOKEN') | |
| TEST_REPOS = [ |
| from dotenv import load_dotenv | |
| import os | |
| import pandas as pd | |
| from pyoso import Client | |
| load_dotenv() | |
| OSO_API_KEY = os.environ['OSO_API_KEY'] | |
| client = Client(api_key=OSO_API_KEY) | |
| import requests | |
| import json | |
| from typing import Dict, Any, List | |
| from requests.exceptions import HTTPError | |
| import re | |
| class OsoClient: | |
| def __init__(self, endpoint: str = "https://www.opensource.observer/api/v1/graphql"): | |
| self.endpoint = endpoint | |
| self.headers = { |
| import csv | |
| import re | |
| import json | |
| from datetime import datetime, timedelta | |
| from githubkit import GitHub | |
| from githubkit.exception import RequestFailed | |
| GITHUB_TOKEN = "" # add your token here | |
| github = GitHub(GITHUB_TOKEN) |
| from PIL import Image, ImageDraw, ImageFont | |
| import os | |
| def add_text_to_images(directory, output_directory, text): | |
| # Create the output directory if it doesn't exist | |
| os.makedirs(output_directory, exist_ok=True) | |
| # Loop through all PNG files in the directory | |
| for filename in os.listdir(directory): | |
| print(filename) |
| import json | |
| import requests | |
| url = 'https://www.fil-retropgf.io/api/trpc/projects.search?input=%7B%22json%22%3A%20%7B%22limit%22%3A%201000%7D%7D' | |
| response = requests.get(url) | |
| data = response.json() | |
| projects = data['result']['data']['json'] | |
| for p in projects: |
| """ | |
| This script converts a properly formatted CSV file to a JSON list. | |
| The CSV must have the `Project ID` in the first column and `OP Amount` in the second column. | |
| The `Project ID` can be found at the end of the voting URL, eg: | |
| https://vote.optimism.io/retropgf/3/application/0xd730a803f5714c7f1b5e518edd57121d1b64c8c91cf611ae5f226cf9bb4b963f | |
| https://round3.optimism.io/projects/0xd730a803f5714c7f1b5e518edd57121d1b64c8c91cf611ae5f226cf9bb4b963f | |
| `Project ID` = 0xd730a803f5714c7f1b5e518edd57121d1b64c8c91cf611ae5f226cf9bb4b963f |