from diffusers import AutoPipelineForText2Image
import torch
pipeline = AutoPipelineForText2Image.from_pretrained(
"black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16
).to("cuda")
pipeline.load_lora_weights("sayakpaul/yarn_art_lora_flux", weight_name="pytorch_lora_weights.safetensors")
image = pipeline("a puppy in a pond, yarn art style", guidance_scale=3.5, height=768).images[0]
image.save("yarn.png")
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
from huggingface_hub import hf_hub_download | |
from diffusers import FluxTransformer2DModel, DiffusionPipeline | |
dtype, device = torch.bfloat16, "cuda" | |
ckpt_id = "black-forest-labs/FLUX.1-schnell" | |
with torch.device("meta"): | |
config = FluxTransformer2DModel.load_config(ckpt_id, subfolder="transformer") | |
model = FluxTransformer2DModel.from_config(config).to(dtype) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Originally by jiwooya1000, put together together by sayakpaul. | |
# Documentation: https://huggingface.co/docs/diffusers/main/en/training/distributed_inference | |
""" | |
Run: | |
accelerate launch distributed_inference_diffusers.py --batch_size 8 | |
# Enable memory optimizations for large models like SD3 | |
accelerate launch distributed_inference_diffusers.py --batch_size 8 --low_mem=1 |
Flux: https://blackforestlabs.ai/announcing-black-forest-labs/
- Run Flux with quantization by AmericanPresidentJimmyCarter.
- Run Flux on a 24GB 4090 by decoupling the different stages of the pipeline
- Running with
torchao
- Running with NF4
The first resource even allows you to run the pipeline under 16GBs of GPU VRAM.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from diffusers import FluxPipeline, AutoencoderKL | |
from diffusers.image_processor import VaeImageProcessor | |
from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel | |
import torch | |
import gc | |
def flush(): | |
gc.collect() | |
torch.cuda.empty_cache() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
torch.set_float32_matmul_precision("high") | |
from diffusers import DiffusionPipeline | |
import time | |
pipeline_id = "ptx0/pixart-900m-1024-ft" | |
pipeline = DiffusionPipeline.from_pretrained( | |
pipeline_id, |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
torch.set_float32_matmul_precision("high") | |
from diffusers import StableDiffusion3Pipeline | |
import time | |
id = "stabilityai/stable-diffusion-3-medium-diffusers" | |
pipeline = StableDiffusion3Pipeline.from_pretrained( | |
id, |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from diffusers import StableDiffusion3Pipeline | |
from transformers import T5EncoderModel | |
import torch | |
import time | |
import gc | |
def flush(): | |
gc.collect() | |
torch.cuda.empty_cache() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
torch.set_float32_matmul_precision("high") | |
from diffusers import HunyuanDiTPipeline | |
import argparse | |
import time | |
def load_pipeline(args): |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" | |
Make sure you have `diffusers`, `accelerate`, `transformers`, and `bitsandbytes` installed. | |
You also set up PyTorch and CUDA. | |
Once the dependencies are installed, you can run `python run_hunyuan_dit_less_memory.py`. | |
""" | |
from diffusers import HunyuanDiTPipeline | |
from transformers import T5EncoderModel |