Created
February 22, 2024 15:30
-
-
Save Puyodead1/458deeec052d170fc91c015dc268a24a to your computer and use it in GitHub Desktop.
international trucks
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
you ever wanted to download the models from international trucks? no? well here you go anyways |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import json | |
import os | |
from pathlib import Path | |
import requests | |
default_series = "hv_series" | |
materials_data = None | |
file_cache = {} | |
def get_data_url(series): | |
return f"https://itconfigurator.nyc3.digitaloceanspaces.com/data/{series}/data.json" | |
def get_materials_data_url(): | |
return "https://itconfigurator.nyc3.digitaloceanspaces.com/data/materials_definitions.json" | |
def get_validation_rules_url(series): | |
return f"https://itconfigurator.nyc3.digitaloceanspaces.com/data/{series}/validation_rules.json" | |
def get_file_url(series, file): | |
return f"https://itassets.nyc3.cdn.digitaloceanspaces.com/assets/geo/{series}/{file}" | |
def get_material_url(material, t): | |
""" | |
for shadows, pass shadows as the material | |
""" | |
return f"https://itassets.nyc3.cdn.digitaloceanspaces.com/assets/img/material_textures/{material}/{t}.png" | |
def download_file(url, path): | |
r = requests.get(url, allow_redirects=True) | |
open(path, "wb").write(r.content) | |
def get_series_data(series): | |
data_url = get_data_url(series) | |
r = requests.get(data_url) | |
return r.json()[0] | |
def get_validation_rules(series): | |
validation_rules_url = get_validation_rules_url(series) | |
r = requests.get(validation_rules_url) | |
return r.json() | |
def get_available_series(data): | |
""" | |
Gets a list of the series we can configure | |
""" | |
features = data["features"] | |
group = next((feature for feature in features if feature["group_name"] == "group_series"), None) | |
if not group: | |
exit("No group_series found in data.json") | |
section = next((option for option in group["sections"] if option["group"] == "series"), None) | |
if not section: | |
exit("No series section found in data.json") | |
options = section["options"] | |
return list(map(lambda x: x["value"], options)) | |
def get_materials(): | |
global materials_data | |
r = requests.get(get_materials_data_url()) | |
materials_data = r.json() | |
def process_series(series): | |
print(f"Processing series {series}") | |
if not series in file_cache: | |
file_cache[series] = [] | |
series_data = get_series_data(series) | |
validation_rules = get_validation_rules(series) | |
vehicles = series_data["vehicles"] | |
for vehicle in vehicles: | |
vehicle_name = vehicle["name"].strip().replace(" ", "") | |
print(f" Processing vehicle {vehicle_name} ({vehicles.index(vehicle) + 1}/{len(vehicles)})") | |
series_path = Path(os.getcwd(), series, vehicle_name) | |
series_path.mkdir(parents=True, exist_ok=True) | |
skeleton_file_name = vehicle["skeleton_filename"] | |
skeleton_file_path = Path(series_path, skeleton_file_name) | |
if not skeleton_file_path.exists(): | |
skeleton_file_url = get_file_url(series, skeleton_file_name) | |
download_file(skeleton_file_url, skeleton_file_path) | |
else: | |
print(f" Vehicle skeleton file exists, skipping") | |
default_parts = vehicle["default_parts"] | |
if default_parts: | |
for part in default_parts: | |
part_file = part["file"].strip().replace(" ", "") | |
if part_file in file_cache[series]: | |
print(f" File {part_file} already processed, skipping") | |
continue | |
print(f" Processing default part {part_file} ({default_parts.index(part) + 1}/{len(default_parts)}") | |
part_file_path = Path(series_path, part_file) | |
if part_file_path.exists(): | |
print(f" Part file exists, skipping.") | |
continue | |
part_file_url = get_file_url(series, part_file) | |
download_file(part_file_url, part_file_path) | |
file_cache[series].append(part_file) | |
else: | |
print(" No default parts found, skipping") | |
parts_path = Path(series_path, "parts") | |
parts_path.mkdir(parents=True, exist_ok=True) | |
for rule in validation_rules: | |
group_name = rule["group_name"].strip().replace(" ", "") | |
if group_name == "": | |
continue # skip empty rules | |
rule_path = Path(parts_path, group_name) | |
print(f" Processing validation rule {group_name} ({validation_rules.index(rule)}/{len(validation_rules)})") | |
cases = rule["cases"] | |
# result_bones = rule["result_bones"] | |
for case in cases: | |
print(f" Processing case {cases.index(case) + 1} of {len(cases)} for rule {group_name}") | |
result_files = case["result_files"] | |
if len(result_files) == 0: | |
print(f" No result files found for rule {group_name}, probably a bone thing, skipping") | |
continue | |
rule_path.mkdir(parents=True, exist_ok=True) | |
case_cases = case["case"] | |
for case_case in case_cases: | |
case_case_index = case_cases.index(case_case) | |
print(f" Processing sub case {case_case_index + 1} of {len(case_cases)}") | |
pick = case_case["pick"].strip().replace(" ", "") | |
equals = case_case["equals"].strip().replace(" ", "").replace(" ", "") | |
case_name = f"{pick}_{equals}_" | |
for file in result_files: | |
file_name = case_name + file.strip().replace(" ", "") | |
if file_name in file_cache[series]: | |
print(f" File {file_name} already processed, skipping") | |
continue | |
print(f" Processing file {file_name}") | |
file_path = Path(rule_path, file_name) | |
if file_path.exists(): | |
print(f" File {file_name} exists, skipping") | |
continue | |
file_url = get_file_url(series, file) | |
download_file(file_url, file_path) | |
file_cache[series].append(file_name) | |
def process_materials(): | |
print("Processing Materials") | |
materials_path = Path(os.getcwd(), "materials") | |
materials_path.mkdir(parents=True, exist_ok=True) | |
materials = list(filter(lambda x: "folder" in x and x["folder"] != "fluid_dynamic" and x["hasAnyMaps"] is True, materials_data)) | |
for material in materials: | |
"".s | |
folder = material["folder"].strip().replace(" ", "") | |
print(f" Processing material {folder} ({materials.index(material) + 1}/{len(materials)})") | |
material_path = Path(materials_path, folder) | |
material_path.mkdir(parents=True, exist_ok=True) | |
for t in ["basecolor", "metallic", "opacity", "normal", "roughness", "height"]: | |
file_name = f"{t}.png".strip().replace(" ", "") | |
print(f" Processing file {file_name}") | |
file_path = Path(material_path, file_name) | |
if file_path.exists(): | |
print(f" File {file_name} exists for material {folder}, skipping") | |
continue | |
file_url = get_material_url(folder, t) | |
try: | |
download_file(file_url, file_path) | |
except: | |
# some maps may not exist | |
print(f" File {file_name} does not exist for material {folder}, skipping") | |
# exit(0) | |
def main(): | |
# start by getting the default series data | |
series_data = get_series_data(default_series) | |
series_list = get_available_series(series_data) | |
# print("Fetching Materials") | |
# get_materials() | |
# process_materials() | |
for series in series_list: | |
process_series(series) | |
if __name__ == "__main__": | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment