Skip to content

Instantly share code, notes, and snippets.

@linoytsaban
Last active October 24, 2025 15:59
Show Gist options
  • Save linoytsaban/b0ec9c3b83ed3a2101fc2aaed77f30d4 to your computer and use it in GitHub Desktop.
Save linoytsaban/b0ec9c3b83ed3a2101fc2aaed77f30d4 to your computer and use it in GitHub Desktop.
Fast Qwen Image Edit 2509 with diffusers: using Qwen Rapid AIO for 4-step inference, and Qwen next scene LoRA for cinematic image sequences with natural visual progression from frame to frame
import torch
from diffusers.models import QwenImageTransformer2DModel
from diffusers import QwenImageEditPlusPipeline
from diffusers.utils import load_image
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
# load pipeline with Rapid AIO transfromer
pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509",
transformer= QwenImageTransformer2DModel.from_pretrained("linoyts/Qwen-Image-Edit-Rapid-AIO",
subfolder='transformer',
torch_dtype=dtype,
device_map='cuda'),torch_dtype=dtype).to(device)
# load next scene LoRA
pipe.load_lora_weights(
"lovis93/next-scene-qwen-image-lora-2509",
weight_name="next-scene_lora-v2-3000.safetensors", adapter_name="next-scene"
)
pipe.set_adapters(["next-scene"], adapter_weights=[1.])
pipe.fuse_lora(adapter_names=["next-scene"], lora_scale=1.)
pipe.unload_lora_weights()
image1 = load_image("grumpycat.png")
prompt = "turn the cat into an orange cat"
inputs = {
"image": [image1],
"prompt": prompt,
"generator": torch.manual_seed(42),
"true_cfg_scale": 1.0,
"negative_prompt": " ",
"num_inference_steps": 4,
"guidance_scale": 1.0,
"num_images_per_prompt": 1,
}
output = pipeline(**inputs).images[0]
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment