Last active
March 30, 2024 18:13
-
-
Save KonradIT/67c24fde28c579fad73249c11afd9f83 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from bs4 import BeautifulSoup | |
import requests | |
from pprint import pprint | |
browserstorage = { | |
"fccid.io": { | |
"cookies": { | |
}, | |
"headers": { | |
} | |
}, | |
"justia.com": { | |
"cookies": { | |
}, | |
"headers": { | |
} | |
} | |
} | |
def fccfreqs(): | |
""" | |
Known DJI frequencies: | |
- 5ghz OcuSync v2 | |
lower = "5745.5" | |
upper = "5829.5" | |
- | |
""" | |
lower = "5745.5" | |
upper = "5829.5" | |
known_dji_entities = [ | |
"SZ DJI TECHNOLOGY CO., LTD", | |
"SZ DJI Osmo Technology Co.,Ltd." | |
] | |
response = requests.get(f"https://fccid.io/frequency.php?lower={lower}&upper={upper}&exact", cookies=browserstorage.get("fccid.io").get("cookies"), headers=browserstorage.get("fccid.io").get("headers")) | |
html_content = response.text | |
soup = BeautifulSoup(html_content, 'lxml') | |
data = [] | |
for row in soup.select("table tr")[1:]: # skipping the header row with [1:] | |
cols = row.select("td") | |
if len(cols) > 0 and (cols[0].contents[2].text.strip() not in known_dji_entities): | |
data.append({ | |
"id": cols[0].find("a").text.strip(), | |
"company": cols[0].contents[2].text.strip(), | |
"date": cols[0].contents[5].text.strip(), | |
}) | |
print("{:<20} {:<50} {:<15}".format("FCC ID", "Company", "Date")) | |
for hit in data: | |
print("{:<20} {:<50} {:<15}".format(hit.get("id"), hit.get("company"), hit.get("date"))) | |
def trademarks(): | |
response = requests.get( | |
'https://trademarks.justia.com/owners/cogito-tech-company-limited-5648351/', | |
cookies=browserstorage.get("justia.com").get("cookies"), | |
headers=browserstorage.get("justia.com").get("headers") | |
) | |
html_content = response.text | |
soup = BeautifulSoup(html_content, 'lxml') | |
h4_elements = soup.find_all('h4', class_='has-no-margin') | |
data = [] | |
for h4 in h4_elements: | |
# Find all <a> elements within the current <h4> | |
a_elements = h4.find_all('a') | |
# Extract the text from each <a> element and add it to the list | |
for a in a_elements: | |
data.append(a.text) | |
print("Name") | |
for hit in data: | |
print(hit) | |
print("FCC search of known frequencies:") | |
fccfreqs() | |
print("Trademark search:") | |
trademarks() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment