Skip to content

Instantly share code, notes, and snippets.

@jongan69
Last active August 16, 2025 01:52
Show Gist options
  • Select an option

  • Save jongan69/c2007d2085c69faae4702ffa816abca1 to your computer and use it in GitHub Desktop.

Select an option

Save jongan69/c2007d2085c69faae4702ffa816abca1 to your computer and use it in GitHub Desktop.
I fucking hate Miami Beach
#!/usr/bin/env python3
"""
Final Working Parking Ticket Search - JSON Output with All Fields
"""
import requests
from bs4 import BeautifulSoup
import json
def search_parking_tickets(tag_number="62etid"):
url = "https://www2.miamidadeclerk.gov/payparking/parkingSearch.aspx"
session = requests.Session()
session.headers.update({
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"Accept-Language": "en-US,en;q=0.9",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"DNT": "1",
"Origin": "https://www2.miamidadeclerk.gov",
"Pragma": "no-cache",
"Referer": "https://www2.miamidadeclerk.gov/payparking/parkingSearch.aspx",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "same-origin",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Mobile Safari/537.36",
"sec-ch-ua": '"Not;A=Brand";v="99", "Google Chrome";v="139", "Chromium";v="139"',
"sec-ch-ua-mobile": "?1",
"sec-ch-ua-platform": '"Android"'
})
# Step 1: Get initial page
response = session.get(url, timeout=30)
soup = BeautifulSoup(response.text, "html.parser")
viewstate = soup.find("input", {"id": "__VIEWSTATE"})["value"]
eventvalidation = soup.find("input", {"id": "__EVENTVALIDATION"})["value"]
viewstategen = soup.find("input", {"id": "__VIEWSTATEGENERATOR"})["value"]
# Step 2: Submit the search
form_data = {
"__EVENTTARGET": "ctl00$ContentPlaceHolder1$btnSubmit_TagSearch",
"__EVENTARGUMENT": "",
"__VIEWSTATE": viewstate,
"__VIEWSTATEGENERATOR": viewstategen,
"__EVENTVALIDATION": eventvalidation,
"ctl00$ContentPlaceHolder1$hfTab": "tagplate",
"ctl00$ContentPlaceHolder1$txtcitn": "",
"ctl00$ContentPlaceHolder1$txtTag": tag_number,
"ctl00$ContentPlaceHolder1$DropDownState": "FL"
}
session.headers.update({"Content-Type": "application/x-www-form-urlencoded"})
search_response = session.post(url, data=form_data, timeout=30)
return parse_ticket_data(search_response.text)
def parse_ticket_data(html_content):
soup = BeautifulSoup(html_content, "html.parser")
tables = soup.find_all("table")
for table in tables:
rows = table.find_all("tr")
if len(rows) > 1:
headers = [th.get_text(strip=True) for th in rows[0].find_all(["td", "th"])]
# Ensure it's a citation table
if any("citation" in h.lower() for h in headers):
tickets = []
for row in rows[1:]:
cells = row.find_all("td")
if cells:
ticket = {}
for i, header in enumerate(headers):
if i < len(cells):
ticket[header] = cells[i].get_text(strip=True)
tickets.append(ticket)
return tickets
return []
def main():
import sys
if len(sys.argv) > 1:
tag_number = sys.argv[1]
else:
tag_number = input("Enter tag number (default '62etid'): ").strip() or "62etid"
tickets = search_parking_tickets(tag_number)
# Output JSON
print(json.dumps({"tag_number": tag_number, "tickets": tickets}, indent=2))
if __name__ == "__main__":
main()
#!/usr/bin/env python3
"""
Miami-Dade Parking Search -> Enhanced JSON with detailed citation info
- Gets basic citation information from search results
- Fetches detailed information from individual citation pages
- Includes violation details, vehicle details, and payment information
"""
import json
import re
import time
import requests
from bs4 import BeautifulSoup
BASE_URL = "https://www2.miamidadeclerk.gov/payparking/parkingSearch.aspx"
# ------------------ Utilities ------------------
def collect_form_fields(soup: BeautifulSoup) -> dict:
"""Collect *all* form fields. Keeps ASP.NET happy on postbacks."""
data = {}
for inp in soup.select('input[name]'):
name = inp["name"]
t = (inp.get("type") or "").lower()
if t in ("checkbox", "radio"):
if inp.has_attr("checked"):
data[name] = inp.get("value", "on")
else:
data[name] = inp.get("value", "")
for sel in soup.select('select[name]'):
name = sel["name"]
opt = sel.find("option", selected=True) or sel.find("option")
if opt:
data[name] = opt.get("value", opt.get_text(strip=True))
for ta in soup.select('textarea[name]'):
data[ta["name"]] = ta.get_text()
return data
def extract_hidden_fields(soup: BeautifulSoup) -> dict:
"""Grab essential ASP.NET state fields."""
def val(i):
el = soup.find("input", id=i)
return el["value"] if el and el.has_attr("value") else ""
return {
"__VIEWSTATE": val("__VIEWSTATE"),
"__EVENTVALIDATION": val("__EVENTVALIDATION"),
"__VIEWSTATEGENERATOR": val("__VIEWSTATEGENERATOR"),
}
def postback(session: requests.Session, soup: BeautifulSoup, event_target: str, event_argument: str = "", max_retries: int = 3):
"""
Perform a postback with the given __EVENTTARGET, preserving all fields.
Returns the new BeautifulSoup and the raw HTML.
"""
for attempt in range(max_retries):
try:
data = collect_form_fields(soup)
data.update(extract_hidden_fields(soup))
data["__EVENTTARGET"] = event_target
data["__EVENTARGUMENT"] = event_argument
# Add a small delay between requests to be respectful
if attempt > 0:
time.sleep(2)
r = session.post(BASE_URL, data=data, timeout=60)
r.raise_for_status()
return BeautifulSoup(r.text, "html.parser"), r.text
except requests.exceptions.Timeout:
if attempt == max_retries - 1:
raise
print(f"[retry {attempt + 1}/{max_retries}] Timeout, retrying...")
except requests.exceptions.RequestException as e:
if attempt == max_retries - 1:
raise
print(f"[retry {attempt + 1}/{max_retries}] Request failed: {e}, retrying...")
raise Exception(f"Failed after {max_retries} attempts")
# ------------------ Parsing helpers ------------------
def find_results_table(soup: BeautifulSoup):
"""
Find the citations table by checking its header text.
Looks for headers containing: Citation, Date Issued, Status, Amount Due.
"""
for table in soup.find_all("table"):
headers = [th.get_text(strip=True).lower() for th in table.find_all("th")]
if not headers:
continue
needed = {"citation", "date issued", "status", "amount due"}
if needed.issubset(set(headers)):
return table
return None
def parse_main_rows(results_table: BeautifulSoup):
"""
Parse the results table into rows with summary data and expand links.
Uses header mapping to avoid relying on column order.
"""
rows = []
trs = results_table.find_all("tr")
if len(trs) < 2:
return rows
header_cells = [th.get_text(strip=True) for th in trs[0].find_all("th")]
hmap = {h.lower(): i for i, h in enumerate(header_cells)}
idx_plus = hmap.get("more info", 0)
idx_cit = hmap.get("citation", 1)
idx_date = hmap.get("date issued", 2)
idx_status = hmap.get("status", 3)
idx_amount = hmap.get("amount due", 4)
for tr in trs[1:]:
tds = tr.find_all("td")
if len(tds) < max(idx_amount, idx_status, idx_date, idx_cit, idx_plus) + 1:
continue
# expand link in first column
expand_a = tds[idx_plus].find("a", href=True)
expand_target = None
if expand_a and "__doPostBack" in expand_a["href"]:
parts = expand_a["href"].split("'")
if len(parts) >= 2:
expand_target = parts[1]
citation_text = tds[idx_cit].get_text(strip=True)
if not citation_text:
continue
rows.append({
"Citation": citation_text,
"Date Issued": tds[idx_date].get_text(strip=True),
"Status": tds[idx_status].get_text(strip=True),
"Amount Due": tds[idx_amount].get_text(strip=True),
"_expand_target": expand_target
})
return rows
def parse_citation_details(soup: BeautifulSoup, citation_number: str) -> dict:
"""
Parse detailed information from a citation detail page.
"""
details = {}
# Look for specific elements by ID that contain the detailed information
id_mappings = {
# Citation information
"lb_Citation": "citation_number",
"lb_Tag": "tag_number",
"lb_State": "state",
# Date and amount information
"lb_IssueDate": "issue_date",
"lb_AmountDueNow": "amount_due_now",
"lb_DueDate": "due_date",
"lb_AmountDueAfterDueDate": "amount_due_after_due_date",
"lb_Status": "status",
# Violation information
"lb_Violation": "violation_type",
"lb_location": "location",
"lb_Municipality": "municipality",
# Vehicle details
"lb_Make": "vehicle_make",
"lb_Style": "vehicle_style",
"lb_Color": "vehicle_color",
}
for element_id, field_name in id_mappings.items():
element = soup.find("span", id=element_id)
if element:
details[field_name] = element.get_text(strip=True)
# Also look for any table-based information as a fallback
tables = soup.find_all("table", class_="table table-bordered mb-0")
for table in tables:
rows = table.find_all("tr")
for row in rows:
cells = row.find_all(["td", "th"])
if len(cells) >= 2:
key = cells[0].get_text(strip=True)
value = cells[1].get_text(strip=True)
if key and value:
# Clean up the key name
clean_key = key.lower().replace(' ', '_').replace('/', '_').replace('&', 'and')
details[f"table_{clean_key}"] = value
return details
# ------------------ Main flow ------------------
def fetch_citation_details(session: requests.Session, citation_number: str) -> dict:
"""
Fetch detailed information for a specific citation using the citation search.
"""
# Get the initial page to establish session
r = session.get(BASE_URL, timeout=60)
r.raise_for_status()
soup = BeautifulSoup(r.text, "html.parser")
# Build form for citation search
data = collect_form_fields(soup)
data.update(extract_hidden_fields(soup))
data["__EVENTTARGET"] = "ctl00$ContentPlaceHolder1$btnSubmit_CitSearch"
data["__EVENTARGUMENT"] = ""
data["ctl00$ContentPlaceHolder1$txtcitn"] = citation_number
# Keep expected fields if present
if "ctl00$ContentPlaceHolder1$hfTab" in data:
data["ctl00$ContentPlaceHolder1$hfTab"] = "citation"
if "ctl00$ContentPlaceHolder1$txtTag" in data:
data["ctl00$ContentPlaceHolder1$txtTag"] = ""
if "ctl00$ContentPlaceHolder1$DropDownState" in data:
data["ctl00$ContentPlaceHolder1$DropDownState"] = "FL"
# Submit the citation search
r2 = session.post(BASE_URL, data=data, timeout=60)
r2.raise_for_status()
soup = BeautifulSoup(r2.text, "html.parser")
# Parse the detailed information
return parse_citation_details(soup, citation_number)
def fetch_all_citations(tag_number: str) -> dict:
session = requests.Session()
session.headers.update({
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Referer": BASE_URL,
"Origin": "https://www2.miamidadeclerk.gov",
})
# 1) GET initial page
r = session.get(BASE_URL, timeout=60)
r.raise_for_status()
soup = BeautifulSoup(r.text, "html.parser")
# 2) Build form for search
data = collect_form_fields(soup)
data.update(extract_hidden_fields(soup))
data["__EVENTTARGET"] = "ctl00$ContentPlaceHolder1$btnSubmit_TagSearch"
data["__EVENTARGUMENT"] = ""
data["ctl00$ContentPlaceHolder1$txtTag"] = tag_number.strip().upper()
# Keep expected fields if present
if "ctl00$ContentPlaceHolder1$DropDownState" in data and not data["ctl00$ContentPlaceHolder1$DropDownState"]:
data["ctl00$ContentPlaceHolder1$DropDownState"] = "FL"
if "ctl00$ContentPlaceHolder1$hfTab" in data:
data["ctl00$ContentPlaceHolder1$hfTab"] = "tagplate"
if "ctl00$ContentPlaceHolder1$txtcitn" in data:
data["ctl00$ContentPlaceHolder1$txtcitn"] = ""
# 3) POST search
r2 = session.post(BASE_URL, data=data, timeout=60)
r2.raise_for_status()
soup = BeautifulSoup(r2.text, "html.parser")
# 4) Parse main results
table = find_results_table(soup)
if not table:
msg = soup.select_one("#lblErrorTag")
return {
"tag_number": tag_number.strip().upper(),
"count": 0,
"total_due": None,
"citations": [],
"message": msg.get_text(strip=True) if msg else "No results table found."
}
rows = parse_main_rows(table)
total_due_el = soup.select_one("#lbl_totaldue_vTag")
total_due = total_due_el.get_text(strip=True) if total_due_el else None
# 5) Fetch detailed information for each citation using citation search
citations = []
print(f"Found {len(rows)} citations. Fetching detailed information...")
for i, row in enumerate(rows):
print(f"Processing citation {i+1}/{len(rows)}: {row['Citation']}")
# Start with basic information
citation_info = {
"Citation": row["Citation"],
"Date Issued": row["Date Issued"],
"Status": row["Status"],
"Amount Due": row["Amount Due"],
}
# Add payment context
if row["Status"] == "OPEN":
citation_info["needs_payment"] = True
citation_info["payment_required"] = row["Amount Due"]
else:
citation_info["needs_payment"] = False
citation_info["payment_required"] = "$0.00"
# Try to get detailed information using citation search
try:
# Add delay between requests to be respectful to the server
if i > 0:
time.sleep(1)
# Fetch detailed information for this citation
details = fetch_citation_details(session, row["Citation"])
if details:
print(f" ✓ Found {len(details)} detail fields")
# Clean up the details by removing table_ prefix and organizing them
cleaned_details = {}
for key, value in details.items():
if key.startswith('table_'):
# Remove table_ prefix and clean up the key
clean_key = key[6:] # Remove 'table_' prefix
cleaned_details[clean_key] = value
else:
cleaned_details[key] = value
citation_info.update(cleaned_details)
else:
print(f" ⚠ No details found")
except Exception as e:
print(f" ✗ Error fetching details for citation {row['Citation']}: {e}")
citations.append(citation_info)
return {
"tag_number": tag_number.strip().upper(),
"count": len(citations),
"total_due": total_due,
"citations": citations
}
# ------------------ CLI ------------------
if __name__ == "__main__":
import sys
tag = sys.argv[1] if len(sys.argv) > 1 else "62ETID"
result = fetch_all_citations(tag)
print(json.dumps(result, indent=2, ensure_ascii=False))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment