This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import matplotlib.pyplot as plt | |
def sigmoid(x): | |
return 1 / (1 + np.exp(-x)) | |
def inverse_sigmoid(y): | |
return np.log(y / (1 - y)) | |
# 逆シグモイド関数の微分 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
================================================================================================================================================================ | |
Layer (type (var_name)) Input Shape Output Shape Param # Kernel Shape | |
================================================================================================================================================================ | |
SD3Transformer2DModel (SD3Transformer2DModel) -- [1, 16, 128, 128] -- -- | |
├─PatchEmbed (pos_embed) [1, 16, 128, 128] [1, 4096, 1536] -- -- | |
│ └─Conv2d (proj) [1, 16, 128, 128] [1, 1536, 64, 64] 99,840 [2, 2] | |
├─CombinedTimestepTextProjEmbeddings (time_text_embed) [1] [1, 1536] -- |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from PIL import Image | |
import hpsv2 | |
import torch | |
class HPSv2: | |
@classmethod | |
def INPUT_TYPES(s): | |
return { | |
"required": { | |
"images": ("IMAGE", ), |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
from einops import rearrange, repeat | |
def block_to_key(block): | |
if block[0] == "input": | |
return "in" + str(block[1]) | |
elif block[0] == "output": | |
return "out" + str(block[1]) | |
elif block[0] == "middle": | |
return "mid" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import gradio as gr | |
import json | |
import requests | |
import argparse | |
from dataclasses import dataclass | |
############### utils ############### | |
BAN_TOKENS = ["<|END_OF_TURN_TOKEN|>"] # command -r 用の回避トークン | |
parser = argparse.ArgumentParser() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from comfy.samplers import KSAMPLER | |
import torch | |
from comfy.k_diffusion.sampling import default_noise_sampler, to_d | |
from tqdm.auto import trange | |
@torch.no_grad() | |
def sampler_tcd(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None, gamma=None): | |
extra_args = {} if extra_args is None else extra_args | |
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler | |
s_in = x.new_ones([x.shape[0]]) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# https://github.com/huggingface/transformers/blob/838b87abe231fd70be5132088d0dee72a7bb8d62/src/transformers/models/opt/modeling_opt.py#L147 | |
""" | |
model = AutoModelForCausalLM.from_pretrained("p1atdev/dart-v1-sft") | |
apply_hook(model) | |
""" | |
import torch | |
import torch.nn as nn | |
def forward_hooker(self): |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# https://huggingface.co/shadowlilac/aesthetic-shadow-v2 | |
from transformers import pipeline | |
import torch | |
from PIL import Image | |
from comfy.ldm.modules.attention import optimized_attention | |
def optimized_forward(self): | |
def forward(hidden_states, head_mask = None, output_attentions = False): | |
query = self.query(hidden_states) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def make_unet_conversion_map(): | |
unet_conversion_map_layer = [] | |
# unet | |
# https://github.com/kohya-ss/sd-scripts/blob/2d7389185c021bc527b414563c245c5489d6328a/library/sdxl_model_util.py#L293 | |
for i in range(3): # num_blocks is 3 in sdxl | |
# loop over downblocks/upblocks | |
for j in range(2): | |
# loop over resnets/attentions for downblocks | |
hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}." | |
sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0." |
NewerOlder