Skip to content

Instantly share code, notes, and snippets.

@pashu123
Created September 24, 2022 09:53
Show Gist options
  • Save pashu123/bedb0edf097639f1b76ddd02732efe65 to your computer and use it in GitHub Desktop.
Save pashu123/bedb0edf097639f1b76ddd02732efe65 to your computer and use it in GitHub Desktop.
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler
import torch
from PIL import Image
from diffusers import LMSDiscreteScheduler
from tqdm.auto import tqdm
from shark.shark_inference import SharkInference
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import get_decompositions
import torch_mlir
import tempfile
import numpy as np
import torchdynamo
from shark.sharkdynamo.utils import make_shark_compiler
# pip install diffusers
# pip install scipy
if __name__ == "__main__":
YOUR_TOKEN = "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"
# 1. Load the autoencoder model which will be used to decode the latents into image space.
vae = AutoencoderKL.from_pretrained(
"CompVis/stable-diffusion-v1-4",
subfolder="vae",
use_auth_token=YOUR_TOKEN,
)
@torchdynamo.optimize(
make_shark_compiler(use_tracing=False, device="cpu", verbose=False)
)
def vae_dynamo(inp):
return vae.decode(inp).sample
latents = torch.rand(1, 4, 64, 64)
image = vae_dynamo(latents)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment