Created
April 16, 2025 07:10
-
-
Save joyofdata/d3d2d92cc51a976f240b49da18cf1f16 to your computer and use it in GitHub Desktop.
Colab Notebook for Image Creation with Stable Diffusion
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
!pip install diffusers | |
!pip install transformers | |
!pip install accelerate | |
!pip install safetensors | |
!pip install xformers | |
# --- | |
token = "[token]" | |
!wget "[download link][?&]token={token}" -O base-model.safetensors | |
!wget "[download link][?&]token={token}" -O lora-x.safetensors | |
!wget "[download link][?&]token={token}" -O lora-y.safetensors | |
# --- | |
from diffusers import StableDiffusionXLPipeline, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, DDIMScheduler | |
from PIL import Image | |
from IPython.display import display | |
from safetensors.torch import load_file | |
import torch | |
import random | |
# --- | |
torch.cuda.empty_cache() | |
pipe = StableDiffusionXLPipeline.from_single_file( | |
"/content/base-model.safetensors", | |
torch_dtype=torch.float16, | |
variant="fp16", | |
use_safetensors=True | |
).to("cuda") | |
pipe.unet.config.attention_mode = "xformers" | |
lora_state_dict = load_file("/content/lora-x.safetensors") | |
pipe.load_lora_weights(lora_state_dict, adapter_name="lora_x") | |
lora_state_dict = load_file("/content/lora-y.safetensors") | |
pipe.load_lora_weights(lora_state_dict, adapter_name="lora_y") | |
# --- | |
pipe.set_adapters([ | |
"lora_x", | |
"lora_y", | |
], adapter_weights=[.8, .3]) | |
#pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Adds more variation to outputs | |
#pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) # usually higher quality | |
#pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) # More deterministic outputs | |
batch_size = 5 | |
prompts = [] | |
for i in range(batch_size): | |
style = random.choice(["cyberpunk","steampunk","impressionism"]) | |
prompt = f""" | |
masterwork, {style}, ... | |
""" | |
prompts.append(prompt) | |
negative_prompt = f""" | |
... | |
""" | |
images = pipe( | |
prompt=prompts, | |
negative_prompt=[negative_prompt] * batch_size, | |
guidance_scale=5, | |
num_inference_steps=50, | |
generator=torch.Generator("cuda").manual_seed(876), | |
).images | |
for i,img in enumerate(images): | |
print(prompts[i]) | |
display(img, clear=False) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment