Skip to content

Instantly share code, notes, and snippets.

@renexu
renexu / train.py
Created December 28, 2017 05:46
Keras HDF5Matrix and fit_generator for huge hdf5 dataset
import threading
from keras.applications.inception_v3 import InceptionV3
from keras.optimizers import Adam
from keras.utils.io_utils import HDF5Matrix
class threadsafe_iter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
@pommedeterresautee
pommedeterresautee / Makefile
Last active August 19, 2024 13:27
Divide Hugging Face Transformers training times by 2 or more with dynamic padding and uniform length batching
# required by (\ SHELL COMMANDS \)
SHELL:=/bin/bash
VIRT_ENV_FOLDER = ~/.local/share/virtualenvs/xnli
SOURCE_VIRT_ENV = source $(VIRT_ENV_FOLDER)/bin/activate
.PHONY: train
train:
( \
@l4rz
l4rz / gist:7040835c3f8266d8b8ea3615a0b49494
Created February 25, 2021 18:35
ALEPH by @advadnoun but for local execution
#
# ALEPH by Advadnoun, https://colab.research.google.com/drive/1Q-TbYvASMPRMXCOQjkxxf72CXYjR_8Vp
# "This is a notebook that uses DALL-E's decoder and CLIP to generate images from text. I will very likely make this better & easier to use in the future."
#
# rearranged to run locally on faster GPU
#
# directions:
# clone https://github.com/openai/DALL-E/ and https://github.com/openai/CLIP
# copy relevant files into one dir with this script
# install torch==1.7.1 and other stuff
@karpathy
karpathy / stablediffusionwalk.py
Last active October 1, 2024 09:56
hacky stablediffusion code for generating videos
"""
stable diffusion dreaming
creates hypnotic moving videos by smoothly walking randomly through the sample space
example way to run this script:
$ python stablediffusionwalk.py --prompt "blueberry spaghetti" --name blueberry
to stitch together the images, e.g.:
$ ffmpeg -r 10 -f image2 -s 512x512 -i blueberry/frame%06d.jpg -vcodec libx264 -crf 10 -pix_fmt yuv420p blueberry.mp4
@Norod
Norod / vae-test.py
Last active September 28, 2022 08:51
Encapsulate an encoded VAE latent as PNG image, then load it and use it to decode the original image
#!pip install diffusers==0.2.4
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np
from torchvision import transforms as tfms
torch_device = None
vae = None
@torridgristle
torridgristle / prompt_mass_encoding_randomization.py
Last active October 1, 2022 14:21
Generate every combination of prompt parts, encode all of the prompts in batches to avoid running out of memory. Alternatively only keep the min/max channel values and min/max token norms and randomly generate prompts with randn noise. Intended for Stable Diffusion but can be used for anything with CLIP by just swapping out the model.get_learned…
import itertools
def prompt_combinations(prompt_parts):
'''
Provide a list of lists of prompt parts, like:
[ ["A ","An "], ["anteater","feather duster"] ]
'''
opt_prompt = list(itertools.product(*prompt_parts, repeat=1))
opt_prompt = [''.join(opt_prompt[b]) for b in range(len(opt_prompt))]
return opt_prompt
# Script for converting a HF Diffusers saved pipeline to a Stable Diffusion checkpoint.
# *Only* converts the UNet, VAE, and Text Encoder.
# Does not convert optimizer state or any other thing.
# Written by jachiam
import argparse
import os.path as osp
import torch
@nousr
nousr / example_script.sh
Last active July 29, 2023 23:16
How to compute clip embeddings easily with clip-retrieval & slurm
#!/bin/bash
clip-retrieval inference \
--input_dataset="<parent folder containing images>" \
--output_folder="<output s3 bucket or local folder>" \
--input_format="files" \
--enable_metadata=False \
--write_batch_size=500 \
--num_prepro_workers=2 \
--batch_size=64 \
--enable_wandb=True \
@Birch-san
Birch-san / scaled_softmax.py
Created April 3, 2023 00:16
Questionable softmax
from torch import FloatTensor
vae_scale_factor = 8
typical_self_attn_key_length = (512/vae_scale_factor) * (512/vae_scale_factor)
desired_self_attn_key_length = (200/vae_scale_factor) * (200/vae_scale_factor)
key_length_factor=desired_self_attn_key_length/typical_self_attn_key_length if is_self_attn else 1.
def softmax(x: FloatTensor, dim=-1) -> FloatTensor:
key_tokens = x.size(-1)
@Birch-san
Birch-san / arb.py
Created July 27, 2023 23:07
Computing aspect ratio buckets
import numpy as np
import math
from numpy.typing import NDArray
# we are trying to make buckets of varying aspect ratios,
# all with about the same area (equivalent to a 512x512 square)
square_side = 512
buckets = 8
widest_aspect: float = math.atan2(1, 2) # 1/2 = 0.5 aspect ratio