Last active
April 3, 2024 15:26
-
-
Save CoffeeVampir3/dde66b3df88d32fa88f4d02d4bc0e901 to your computer and use it in GitHub Desktop.
Dora test train
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
# Config Start | |
# Configurations | |
ckpt="/home/blackroot/Desktop/SD/sd-scripts/models/model.safetensors" # base checkpoint to finetune | |
image_dir="/home/blackroot/Desktop/SD/sd-scripts/train_data" # folder containing folders with repeats_conceptname | |
reg_dir="" #optional, just point this to an empty folder if you don't care | |
output="/home/blackroot/Desktop/SD/sd-scripts/outputs" # safetensors output folder | |
lr_warmup_ratio=0.03 | |
train_batch_size=10 | |
num_epochs=120 | |
save_every_n_epochs=5 | |
scheduler="linear" | |
network_dim=8 | |
network_alpha=16 | |
text_encoder_lr=0 | |
unet_lr=0.00008 | |
output_name="output_model_name" | |
# Config End | |
echo "Image directory: $image_dir" | |
ls "$image_dir" | |
while IFS= read -r -d $'\0' dir; do | |
dirname=$(basename "$dir") | |
IFS='_' read -r -a parts <<< "$dirname" | |
if [ "${#parts[@]}" -ne 2 ]; then | |
echo "Directory name $dirname does not follow expected format." | |
continue | |
fi | |
repeats=${parts[0]} | |
concept=${parts[1]} | |
echo "Processing: $dirname, Repeats: $repeats, Concept: $concept" | |
imgs=$(find "$dir" -type f \( -iname "*.png" -o -iname "*.bmp" -o -iname "*.gif" -o -iname "*.jpg" -o -iname "*.jpeg" -o -iname "*.webp" \) | wc -l) | |
img_repeats=$((repeats * imgs)) | |
echo -e "\t$concept: $repeats repeats * $imgs images = $img_repeats" | |
total=$((total + img_repeats)) | |
done < <(find "$image_dir" -mindepth 1 -maxdepth 1 -type d -print0) | |
# Calculations based on total images | |
mts=$((total / train_batch_size * num_epochs)) | |
lr_warmup_steps=$(printf "%d" $(echo "$mts * $lr_warmup_ratio" | bc)) | |
echo "Total images with repeats: $total" | |
echo "Max training steps $total / $train_batch_size * $num_epochs = $mts" | |
echo "LR Warmup Steps: $lr_warmup_steps" | |
# Activate the virtual environment | |
source ./venv/bin/activate | |
# Launch training script with parameters | |
accelerate launch --num_cpu_threads_per_process 16 sdxl_train_network.py \ | |
--cache_latents \ | |
--enable_bucket \ | |
--min_bucket_reso=512 \ | |
--max_bucket_reso=2048 \ | |
--bucket_reso_steps=256 \ | |
--max_data_loader_n_workers=8 \ | |
--persistent_data_loader_workers \ | |
--pretrained_model_name_or_path="$ckpt" \ | |
--train_data_dir="$image_dir" \ | |
--reg_data_dir="$reg_dir" \ | |
--resolution=1024,1024 \ | |
--optimizer_type="AdamW" \ | |
--optimizer_args "weight_decay=0.05" "betas=0.95,0.997" \ | |
--output_dir="$output" \ | |
--train_batch_size=$train_batch_size \ | |
--lr_scheduler="$scheduler" \ | |
--lr_warmup_steps=$lr_warmup_steps \ | |
--max_train_steps=$mts \ | |
--multires_noise_discount=0.3 \ | |
--prior_loss_weight=1 \ | |
--gradient_checkpointing \ | |
--xformers \ | |
--mixed_precision=fp16 \ | |
--save_every_n_epochs=$save_every_n_epochs \ | |
--seed=1234 \ | |
--logging_dir="/home/blackroot/Desktop/SD/sd-scripts/logs" \ | |
--save_model_as=safetensors \ | |
--text_encoder_lr=$text_encoder_lr \ | |
--unet_lr=$unet_lr \ | |
--network_dim=$network_dim \ | |
--network_alpha=$network_alpha \ | |
--output_name=$output_name \ | |
--network_module=lycoris.kohya \ | |
--network_args "algo=lora" "dora_wd=True" "conv_dim=4" "weight_decompose=True" \ | |
--min_snr_gamma=5 \ | |
--network_train_unet_only \ | |
--caption_extension=.txt |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
# Config Start | |
# Configurations | |
ckpt="/home/blackroot/Desktop/SD/sd-scripts/models/CHEYENNE_v16.safetensors" # base checkpoint to finetune | |
image_dir="/home/blackroot/Desktop/SD/sd-scripts/train_data" # folder containing folders with repeats_conceptname | |
reg_dir="" #optional, just point this to an empty folder if you don't care | |
output="/home/blackroot/Desktop/SD/sd-scripts/outputs" # safetensors output folder | |
lr_warmup_ratio=0.03 | |
train_batch_size=10 | |
num_epochs=80 | |
save_every_n_epochs=5 | |
scheduler="cosine_with_restarts" | |
network_dim=16 | |
network_alpha=16 | |
text_encoder_lr=0 | |
unet_lr=0.001 | |
output_name="net" | |
# Config End | |
echo "Image directory: $image_dir" | |
ls "$image_dir" | |
while IFS= read -r -d $'\0' dir; do | |
dirname=$(basename "$dir") | |
IFS='_' read -r -a parts <<< "$dirname" | |
if [ "${#parts[@]}" -ne 2 ]; then | |
echo "Directory name $dirname does not follow expected format." | |
continue | |
fi | |
repeats=${parts[0]} | |
concept=${parts[1]} | |
echo "Processing: $dirname, Repeats: $repeats, Concept: $concept" | |
imgs=$(find "$dir" -type f \( -iname "*.png" -o -iname "*.bmp" -o -iname "*.gif" -o -iname "*.jpg" -o -iname "*.jpeg" -o -iname "*.webp" \) | wc -l) | |
img_repeats=$((repeats * imgs)) | |
echo -e "\t$concept: $repeats repeats * $imgs images = $img_repeats" | |
total=$((total + img_repeats)) | |
done < <(find "$image_dir" -mindepth 1 -maxdepth 1 -type d -print0) | |
# Calculations based on total images | |
mts=$((total / train_batch_size * num_epochs)) | |
lr_warmup_steps=$(printf "%d" $(echo "$mts * $lr_warmup_ratio" | bc)) | |
echo "Total images with repeats: $total" | |
echo "Max training steps $total / $train_batch_size * $num_epochs = $mts" | |
echo "LR Warmup Steps: $lr_warmup_steps" | |
# Activate the virtual environment | |
source ./venv/bin/activate | |
# Launch training script with parameters | |
accelerate launch --num_cpu_threads_per_process 16 sdxl_train_network.py \ | |
--cache_latents \ | |
--enable_bucket \ | |
--min_bucket_reso=512 \ | |
--max_bucket_reso=2048 \ | |
--bucket_reso_steps=256 \ | |
--max_data_loader_n_workers=8 \ | |
--persistent_data_loader_workers \ | |
--pretrained_model_name_or_path="$ckpt" \ | |
--train_data_dir="$image_dir" \ | |
--reg_data_dir="$reg_dir" \ | |
--resolution=1024,1024 \ | |
--optimizer_type="AdamW" \ | |
--optimizer_args "weight_decay=0.08" "betas=0.95,0.999" \ | |
--output_dir="$output" \ | |
--train_batch_size=$train_batch_size \ | |
--lr_scheduler="$scheduler" \ | |
--lr_warmup_steps=$lr_warmup_steps \ | |
--max_train_steps=$mts \ | |
--multires_noise_discount=0.3 \ | |
--multires_noise_iterations=6 \ | |
--prior_loss_weight=1 \ | |
--gradient_checkpointing \ | |
--xformers \ | |
--mixed_precision=fp16 \ | |
--save_every_n_epochs=$save_every_n_epochs \ | |
--seed=1234 \ | |
--logging_dir="/home/blackroot/Desktop/SD/sd-scripts/logs" \ | |
--save_model_as=safetensors \ | |
--text_encoder_lr=$text_encoder_lr \ | |
--unet_lr=$unet_lr \ | |
--network_dim=$network_dim \ | |
--network_alpha=$network_alpha \ | |
--output_name=$output_name \ | |
--network_module=lycoris.kohya \ | |
--network_args "algo=lora" "dora_wd=True" "conv_dim=4" "weight_decompose=True" \ | |
--min_snr_gamma=5 \ | |
--noise_offset=0.0357 \ | |
--network_train_unet_only \ | |
--caption_extension=.txt |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
# Config Start | |
# Configurations | |
ckpt="/home/blackroot/Desktop/SD/sd-scripts/models/CHEYENNE_v16.safetensors" # base checkpoint to finetune | |
image_dir="/home/blackroot/Desktop/SD/sd-scripts/train_data" # folder containing folders with repeats_conceptname | |
reg_dir="" #optional, just point this to an empty folder if you don't care | |
output="/home/blackroot/Desktop/SD/sd-scripts/outputs" # safetensors output folder | |
lr_warmup_ratio=0 | |
train_batch_size=10 | |
num_epochs=80 | |
save_every_n_epochs=5 | |
scheduler="linear" | |
network_dim=32 | |
network_alpha=8 | |
text_encoder_lr=0.001 | |
unet_lr=0.001 | |
output_name="dora_net" | |
# Config End | |
echo "Image directory: $image_dir" | |
ls "$image_dir" | |
while IFS= read -r -d $'\0' dir; do | |
dirname=$(basename "$dir") | |
IFS='_' read -r -a parts <<< "$dirname" | |
if [ "${#parts[@]}" -ne 2 ]; then | |
echo "Directory name $dirname does not follow expected format." | |
continue | |
fi | |
repeats=${parts[0]} | |
concept=${parts[1]} | |
echo "Processing: $dirname, Repeats: $repeats, Concept: $concept" | |
imgs=$(find "$dir" -type f \( -iname "*.png" -o -iname "*.bmp" -o -iname "*.gif" -o -iname "*.jpg" -o -iname "*.jpeg" -o -iname "*.webp" \) | wc -l) | |
img_repeats=$((repeats * imgs)) | |
echo -e "\t$concept: $repeats repeats * $imgs images = $img_repeats" | |
total=$((total + img_repeats)) | |
done < <(find "$image_dir" -mindepth 1 -maxdepth 1 -type d -print0) | |
# Calculations based on total images | |
mts=$((total / train_batch_size * num_epochs)) | |
lr_warmup_steps=$(printf "%d" $(echo "$mts * $lr_warmup_ratio" | bc)) | |
echo "Total images with repeats: $total" | |
echo "Max training steps $total / $train_batch_size * $num_epochs = $mts" | |
echo "LR Warmup Steps: $lr_warmup_steps" | |
# Activate the virtual environment | |
source ./venv/bin/activate | |
#--optimizer_args "decouple=True" "weight_decay=0.1" "betas=0.9,0.999" "use_bias_correction=False" \ | |
# Launch training script with parameters | |
accelerate launch --num_cpu_threads_per_process 16 sdxl_train_network.py \ | |
--cache_latents \ | |
--enable_bucket \ | |
--min_bucket_reso=512 \ | |
--max_bucket_reso=2048 \ | |
--bucket_reso_steps=256 \ | |
--max_data_loader_n_workers=8 \ | |
--persistent_data_loader_workers \ | |
--pretrained_model_name_or_path="$ckpt" \ | |
--train_data_dir="$image_dir" \ | |
--reg_data_dir="$reg_dir" \ | |
--resolution=1024,1024 \ | |
--optimizer_type="AdamW" \ | |
--optimizer_args "weight_decay=0.1" "betas=0.9,0.999" \ | |
--output_dir="$output" \ | |
--train_batch_size=$train_batch_size \ | |
--lr_scheduler="$scheduler" \ | |
--lr_warmup_steps=$lr_warmup_steps \ | |
--max_train_steps=$mts \ | |
--multires_noise_discount=0.3 \ | |
--multires_noise_iterations=6 \ | |
--prior_loss_weight=1 \ | |
--gradient_checkpointing \ | |
--xformers \ | |
--mixed_precision=fp16 \ | |
--save_every_n_epochs=$save_every_n_epochs \ | |
--seed=1234 \ | |
--logging_dir="/home/blackroot/Desktop/SD/sd-scripts/logs" \ | |
--save_model_as=safetensors \ | |
--text_encoder_lr=$text_encoder_lr \ | |
--unet_lr=$unet_lr \ | |
--network_dim=$network_dim \ | |
--network_alpha=$network_alpha \ | |
--output_name=$output_name \ | |
--network_module=lycoris.kohya \ | |
--network_args "algo=locon" "dora_wd=True" "conv_dim=4" "weight_decompose=True" "rs_lora=True" \ | |
--min_snr_gamma=5 \ | |
--noise_offset=0.0357 \ | |
--network_train_unet_only \ | |
--caption_extension=.txt |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment