Skip to content

Instantly share code, notes, and snippets.

View tori29umai0123's full-sized avatar
🏠

tori29umai tori29umai0123

🏠
View GitHub Profile
import tempfile
import gradio as gr
import numpy as np
import torch
from PIL import Image
import trimesh
from huggingface_hub import hf_hub_download
from depth_anything_v2.dpt import DepthAnythingV2
from pygltflib import GLTF2, Node, Camera, Perspective, Scene
import argparse
import logging
import torch
from safetensors import safe_open
from safetensors.torch import load_file, save_file
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
QWEN_IMAGE_KEYS = [
import argparse
from safetensors.torch import safe_open
def load_structure(path):
"""Return {key: (shape, dtype)} dict from safetensors file"""
tensors = {}
with safe_open(path, framework="pt") as f:
for k in f.keys():
t = f.get_tensor(k)
tensors[k] = (tuple(t.shape), str(t.dtype))
Total tensors in 1: 1440
Total tensors in 2: 1680
⚠️ Keys only in first file:
transformer_blocks.0.attn.add_k_proj.lora_A.default.weight [(16, 3072)] torch.bfloat16
transformer_blocks.0.attn.add_k_proj.lora_B.default.weight [(3072, 16)] torch.bfloat16
transformer_blocks.0.attn.add_q_proj.lora_A.default.weight [(16, 3072)] torch.bfloat16
transformer_blocks.0.attn.add_q_proj.lora_B.default.weight [(3072, 16)] torch.bfloat16
transformer_blocks.0.attn.add_v_proj.lora_A.default.weight [(16, 3072)] torch.bfloat16
transformer_blocks.0.attn.add_v_proj.lora_B.default.weight [(3072, 16)] torch.bfloat16
Total tensors in 1: 1440
Total tensors in 2: 1680
⚠️ Keys only in second file:
transformer_blocks.0.img_mlp.net.0.proj.lora_A.default.weight [(4, 3072)] torch.bfloat16
transformer_blocks.0.img_mlp.net.0.proj.lora_B.default.weight [(12288, 4)] torch.bfloat16
transformer_blocks.0.txt_mlp.net.0.proj.lora_A.default.weight [(4, 3072)] torch.bfloat16
transformer_blocks.0.txt_mlp.net.0.proj.lora_B.default.weight [(12288, 4)] torch.bfloat16
transformer_blocks.1.img_mlp.net.0.proj.lora_A.default.weight [(4, 3072)] torch.bfloat16
transformer_blocks.1.img_mlp.net.0.proj.lora_B.default.weight [(12288, 4)] torch.bfloat16