Skip to content

Instantly share code, notes, and snippets.

View tori29umai0123's full-sized avatar
🏠

tori29umai tori29umai0123

🏠
View GitHub Profile
Total tensors in 1: 1440
Total tensors in 2: 1680
⚠️ Keys only in second file:
transformer_blocks.0.img_mlp.net.0.proj.lora_A.default.weight [(4, 3072)] torch.bfloat16
transformer_blocks.0.img_mlp.net.0.proj.lora_B.default.weight [(12288, 4)] torch.bfloat16
transformer_blocks.0.txt_mlp.net.0.proj.lora_A.default.weight [(4, 3072)] torch.bfloat16
transformer_blocks.0.txt_mlp.net.0.proj.lora_B.default.weight [(12288, 4)] torch.bfloat16
transformer_blocks.1.img_mlp.net.0.proj.lora_A.default.weight [(4, 3072)] torch.bfloat16
transformer_blocks.1.img_mlp.net.0.proj.lora_B.default.weight [(12288, 4)] torch.bfloat16
Total tensors in 1: 1440
Total tensors in 2: 1680
⚠️ Keys only in first file:
transformer_blocks.0.attn.add_k_proj.lora_A.default.weight [(16, 3072)] torch.bfloat16
transformer_blocks.0.attn.add_k_proj.lora_B.default.weight [(3072, 16)] torch.bfloat16
transformer_blocks.0.attn.add_q_proj.lora_A.default.weight [(16, 3072)] torch.bfloat16
transformer_blocks.0.attn.add_q_proj.lora_B.default.weight [(3072, 16)] torch.bfloat16
transformer_blocks.0.attn.add_v_proj.lora_A.default.weight [(16, 3072)] torch.bfloat16
transformer_blocks.0.attn.add_v_proj.lora_B.default.weight [(3072, 16)] torch.bfloat16
import argparse
from safetensors.torch import safe_open
def load_structure(path):
"""Return {key: (shape, dtype)} dict from safetensors file"""
tensors = {}
with safe_open(path, framework="pt") as f:
for k in f.keys():
t = f.get_tensor(k)
tensors[k] = (tuple(t.shape), str(t.dtype))
import argparse
import logging
import torch
from safetensors import safe_open
from safetensors.torch import load_file, save_file
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
QWEN_IMAGE_KEYS = [
import tempfile
import gradio as gr
import numpy as np
import torch
from PIL import Image
import trimesh
from huggingface_hub import hf_hub_download
from depth_anything_v2.dpt import DepthAnythingV2
from pygltflib import GLTF2, Node, Camera, Perspective, Scene
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
from typing import Optional
import torch
try:
import flash_attn_interface
FLASH_ATTN_3_AVAILABLE = True
except ModuleNotFoundError:
FLASH_ATTN_3_AVAILABLE = False
import argparse
from datetime import datetime
import gc
import random
import os
import re
import time
import math
import copy
from types import ModuleType, SimpleNamespace
登場人物
勇者(男女選択可能)
数々の試練を乗り越え、魔王を討伐する勇者として認められた。
お人よし故に周囲に流されがちだが、いざという時は強い意志を見せる。
かつての幼馴染を取り戻すのが目的。
魔王(男女選択可能)
世界を破壊と混沌の闇に陥れようと目論む魔王。
勇者の幼馴染の体を奪い復活した。
import os
import sys
import numpy as np
import torch
from PIL import Image
sys.path.append(os.getcwd()) # 現在のディレクトリをシステムパスに追加
from tha3.poser.modes.load_poser import load_poser
from tha3.util import rgba_to_numpy_image, grid_change_to_numpy_image, rgb_to_numpy_image, extract_pytorch_image_from_PIL_image
import random
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from typing import Literal
# RATING、ASPECT_RATIO、LENGTH の選択肢を指定
RATING = Literal[
"<|rating:general|>",
"<|rating:sensitive|>",