More details and background: Generating (almost) reproducible pictures using Diffusers with xFormers
Unfortunately, Flash Attention won't accept SD1.x attention shapes, but will accept SD2.x or variants.
#import "YAJL.h" // YAJL ObjC | |
#import "NSString+SBJSON.h" // JSON Framework | |
// JSON Data: Twitter Home Timeline (count=200) | |
// iPod touch 3rd Gen -> test0:296ms test1:323ms test2:170ms test3:488ms | |
@implementation Test | |
- (void)test0:(NSData*)JSONData { | |
double st = CFAbsoluteTimeGetCurrent(); |
static void indentedMsg(int indent, NSString *str) { | |
NSMutableString *idt = [NSMutableString string]; | |
for (int i = 0; i < indent; i++) { | |
[idt appendString:@" "]; | |
} | |
NSLog(@"%@%@", idt, str); | |
} | |
- (void)parserDidStartDictionary:(YAJLParser *)parser { | |
indentedMsg(indent, @"dict>"); |
/* | |
PerfCheck: Stopwatch for Debugging (ObjC) | |
Usage: | |
PFBEGIN(@"hoge"); | |
// some process | |
PFEND(@"hoge"); | |
PFBEGIN(@"fuga"); | |
// some process |
#import "flalib.h" | |
@interface TestSprite : FLSprite { | |
} | |
@end | |
@implementation TestSprite |
# SELF-ATTENTION DOES NOT NEED O(n2) MEMORY: https://arxiv.org/pdf/2112.05682.pdf | |
# https://github.com/google-research/google-research/blob/master/memory_efficient_attention/memory_efficient_attention.ipynb | |
# you may need to apply 'export XLA_PYTHON_CLIENT_PREALLOCATE=false' | |
# https://github.com/google/jax/issues/7118#issuecomment-950183972 | |
import functools | |
import jax | |
import jax.numpy as jnp | |
import math |
import xformers | |
import xformers.ops | |
import torch | |
device = 'cuda' | |
dtype = torch.float16 | |
shape = (1, 1024, 16, 16) | |
torch.manual_seed(0) |
import torch | |
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler | |
import matplotlib.pyplot as plt | |
from PIL import Image | |
import numpy as np | |
plt.rcParams["figure.figsize"] = (10,5) | |
plt.rcParams['figure.facecolor'] = 'white' | |
def generate_tuxedo_cat_picture(fn_prefix, seed=0): |
import torch | |
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler | |
import matplotlib.pyplot as plt | |
from PIL import Image | |
import numpy as np | |
import xformers | |
import xformers.ops | |
plt.rcParams["figure.figsize"] = (10,5) | |
plt.rcParams['figure.facecolor'] = 'white' |
More details and background: Generating (almost) reproducible pictures using Diffusers with xFormers
Unfortunately, Flash Attention won't accept SD1.x attention shapes, but will accept SD2.x or variants.
# from https://github.com/lllyasviel/ControlNet/blob/main/gradio_canny2image.py | |
from share import * | |
import config | |
import cv2 | |
import einops | |
import gradio as gr | |
import numpy as np | |
import torch |