Created
September 29, 2022 02:23
-
-
Save nousr/5726ae54b99c4950a63fb33b1a8f8910 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import argparse, os, sys, glob | |
import torch | |
import torch.nn as nn | |
import numpy as np | |
from omegaconf import OmegaConf | |
from PIL import Image | |
from tqdm import tqdm, trange | |
from itertools import islice | |
from einops import rearrange | |
from torchvision.utils import make_grid | |
import time | |
from pytorch_lightning import seed_everything | |
from torch import autocast | |
from contextlib import contextmanager, nullcontext | |
import accelerate | |
import k_samplers as K | |
from ldm.util import instantiate_from_config | |
from ldm.models.diffusion.ddim import DDIMSampler | |
from ldm.models.diffusion.plms import PLMSSampler | |
def chunk(it, size): | |
it = iter(it) | |
return iter(lambda: tuple(islice(it, size)), ()) | |
def load_model_from_config(config, ckpt, verbose=False): | |
print(f"Loading model from {ckpt}") | |
pl_sd = torch.load(ckpt, map_location="cpu") | |
if "global_step" in pl_sd: | |
print(f"Global Step: {pl_sd['global_step']}") | |
sd = pl_sd["state_dict"] | |
model = instantiate_from_config(config.model) | |
m, u = model.load_state_dict(sd, strict=False) | |
if len(m) > 0 and verbose: | |
print("missing keys:") | |
print(m) | |
if len(u) > 0 and verbose: | |
print("unexpected keys:") | |
print(u) | |
model.cuda() | |
model.eval() | |
return model | |
class CFGDenoiser(nn.Module): | |
def __init__(self, model): | |
super().__init__() | |
self.inner_model = model | |
def forward(self, x, sigma, uncond, cond, cond_scale): | |
x_in = torch.cat([x] * 2) | |
sigma_in = torch.cat([sigma] * 2) | |
cond_in = torch.cat([uncond, cond]) | |
uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2) | |
return uncond + (cond - uncond) * cond_scale | |
def main(): | |
parser = argparse.ArgumentParser() | |
parser.add_argument( | |
"--prompt", | |
type=str, | |
nargs="?", | |
default="a painting of a virus monster playing guitar", | |
help="the prompt to render" | |
) | |
parser.add_argument( | |
"--outdir", | |
type=str, | |
nargs="?", | |
help="dir to write results to", | |
default="out" | |
) | |
parser.add_argument( | |
"--skip_grid", | |
action='store_true', | |
help="do not save a grid, only individual samples. Helpful when evaluating lots of samples", | |
) | |
parser.add_argument( | |
"--skip_save", | |
action='store_true', | |
help="do not save individual samples. For speed measurements.", | |
) | |
parser.add_argument( | |
"--ddim_steps", | |
type=int, | |
default=32, | |
help="number of ddim sampling steps", | |
) | |
parser.add_argument( | |
"--plms", | |
action='store_true', | |
help="use plms sampling", | |
) | |
parser.add_argument( | |
"--laion400m", | |
action='store_true', | |
help="uses the LAION400M model", | |
) | |
parser.add_argument( | |
"--fixed_code", | |
action='store_true', | |
help="if enabled, uses the same starting code across samples ", | |
) | |
parser.add_argument( | |
"--ddim_eta", | |
type=float, | |
default=0.0, | |
help="ddim eta (eta=0.0 corresponds to deterministic sampling", | |
) | |
parser.add_argument( | |
"--n_iter", | |
type=int, | |
default=5, | |
help="sample this often", | |
) | |
parser.add_argument( | |
"--H", | |
type=int, | |
default=768, | |
help="image height, in pixel space", | |
) | |
parser.add_argument( | |
"--W", | |
type=int, | |
default=768, | |
help="image width, in pixel space", | |
) | |
parser.add_argument( | |
"--C", | |
type=int, | |
default=4, | |
help="latent channels", | |
) | |
parser.add_argument( | |
"--f", | |
type=int, | |
default=8, | |
help="downsampling factor", | |
) | |
parser.add_argument( | |
"--n_samples", | |
type=int, | |
default=1, | |
help="how many samples to produce for each given prompt. A.k.a. batch size", | |
) | |
parser.add_argument( | |
"--n_rows", | |
type=int, | |
default=0, | |
help="rows in the grid (default: n_samples)", | |
) | |
parser.add_argument( | |
"--scale", | |
type=float, | |
default=7.5, | |
help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))", | |
) | |
parser.add_argument( | |
"--from-file", | |
type=str, | |
help="if specified, load prompts from this file", | |
) | |
parser.add_argument( | |
"--config", | |
type=str, | |
default="configs/stable-diffusion/v1-inference.yaml", | |
help="path to config which constructs model", | |
) | |
parser.add_argument( | |
"--ckpt", | |
type=str, | |
default="../mech-diffusion-v-1-A.ckpt", | |
help="path to checkpoint of model", | |
) | |
parser.add_argument( | |
"--seed", | |
type=int, | |
default=1337, | |
help="the seed (for reproducible sampling)", | |
) | |
parser.add_argument( | |
"--precision", | |
type=str, | |
help="evaluate at this precision", | |
choices=["full", "autocast"], | |
default="autocast" | |
) | |
opt = parser.parse_args() | |
accelerator = accelerate.Accelerator() | |
device = accelerator.device | |
if opt.laion400m: | |
print("Falling back to LAION 400M model...") | |
opt.config = "configs/latent-diffusion/txt2img-1p4B-eval.yaml" | |
opt.ckpt = "models/ldm/text2img-large/model.ckpt" | |
opt.outdir = "outputs/txt2img-samples-laion400m" | |
config = OmegaConf.load(f"{opt.config}") | |
model = load_model_from_config(config, f"{opt.ckpt}") | |
if opt.plms: | |
sampler = PLMSSampler(model) | |
else: | |
sampler = DDIMSampler(model) | |
model_wrap = K.external.CompVisDenoiser(model) | |
sigma_min, sigma_max = model_wrap.sigmas[0].item(), model_wrap.sigmas[-1].item() | |
os.makedirs(opt.outdir, exist_ok=True) | |
outpath = opt.outdir | |
batch_size = opt.n_samples | |
n_rows = opt.n_rows if opt.n_rows > 0 else batch_size | |
if not opt.from_file: | |
prompt = opt.prompt | |
assert prompt is not None | |
data = [batch_size * [prompt]] | |
else: | |
print(f"reading prompts from {opt.from_file}") | |
with open(opt.from_file, "r") as f: | |
data = f.read().splitlines() | |
data = list(chunk(data, batch_size)) | |
sample_path = os.path.join(outpath, "samples") | |
os.makedirs(sample_path, exist_ok=True) | |
base_count = len(os.listdir(sample_path)) | |
grid_count = len(os.listdir(outpath)) - 1 | |
start_code = None | |
if opt.fixed_code: | |
start_code = torch.randn([opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device) | |
precision_scope = autocast if opt.precision=="autocast" else nullcontext | |
with torch.no_grad(): | |
with precision_scope("cuda"): | |
with model.ema_scope(): | |
tic = time.time() | |
all_samples = list() | |
for n in trange(opt.n_iter, desc="Sampling", disable =not accelerator.is_main_process): | |
seed_everything(opt.seed+n) | |
seed = opt.seed+n | |
seeds = torch.randint(-2 ** 63, 2 ** 63 - 1, [accelerator.num_processes]) | |
torch.manual_seed(seeds[accelerator.process_index].item()) | |
for prompts in tqdm(data, desc="data", disable=not accelerator.is_main_process): | |
uc = None | |
if opt.scale != 1.0: | |
uc = model.get_learned_conditioning(batch_size * [""]) | |
if isinstance(prompts, tuple): | |
prompts = list(prompts) | |
c = model.get_learned_conditioning(prompts) | |
shape = [opt.C, opt.H // opt.f, opt.W // opt.f] | |
sigmas = model_wrap.get_sigmas(opt.ddim_steps) | |
torch.manual_seed(opt.seed+n) # changes manual seeding procedure | |
x = torch.randn([opt.n_samples, *shape], device=device) * sigmas[0] # for GPU draw | |
model_wrap_cfg = CFGDenoiser(model_wrap) | |
extra_args = {'cond': c, 'uncond': uc, 'cond_scale': opt.scale} | |
samples_ddim = K.sampling.sample_euler_ancestral(model_wrap_cfg, x, sigmas, extra_args=extra_args, disable=not accelerator.is_main_process) | |
x_samples_ddim = model.decode_first_stage(samples_ddim) | |
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) | |
x_samples_ddim = accelerator.gather(x_samples_ddim) | |
if accelerator.is_main_process and not opt.skip_save: | |
for x_sample in x_samples_ddim: | |
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c') | |
Image.fromarray(x_sample.astype(np.uint8)).save( | |
os.path.join(sample_path, f"{seed}_{base_count:05}.png")) | |
base_count += 1 | |
if accelerator.is_main_process and not opt.skip_grid: | |
all_samples.append(x_samples_ddim) | |
if accelerator.is_main_process and not opt.skip_grid: | |
# additionally, save as grid | |
grid = torch.stack(all_samples, 0) | |
grid = rearrange(grid, 'n b c h w -> (n b) c h w') | |
grid = make_grid(grid, nrow=n_rows) | |
# to image | |
grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy() | |
Image.fromarray(grid.astype(np.uint8)).save(os.path.join(outpath, f'grid-{grid_count:04}.png')) | |
grid_count += 1 | |
toc = time.time() | |
print(f"Your samples are ready and waiting for you here: \n{outpath} \n" | |
f" \nEnjoy.") | |
if __name__ == "__main__": | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment