Skip to content

Instantly share code, notes, and snippets.

View Birch-san's full-sized avatar

Birch-san

View GitHub Profile
@Birch-san
Birch-san / naiv1_generate.py
Last active June 15, 2025 22:52
Script for generating images from NAIv1
from dataclasses import dataclass
from einops import rearrange
import re
import torch
from torch import BoolTensor, FloatTensor, IntTensor, LongTensor, inference_mode
from torch.nn.functional import pad
from itertools import islice
from typing import Generator, Iterable, Iterator, Optional, Protocol, TypeVar
from typing_extensions import override
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel, UNet2DConditionOutput
from __future__ import annotations
from argparse import ArgumentParser, Namespace
from dataclasses import dataclass
from functools import partial
from typing import Any, Callable, Optional
import torch
from torch import Tensor, no_grad, enable_grad
import torch.autograd.forward_ad as fwAD
from torch.nn.attention import SDPBackend, sdpa_kernel
from torch.nn.functional import scaled_dot_product_attention
@Birch-san
Birch-san / jvp_flops.py
Created June 14, 2025 00:12
Does linearize work? Am I using it right?
from __future__ import annotations
from argparse import ArgumentParser, Namespace
from dataclasses import dataclass
from functools import partial
from typing import Callable, Generic, TypeVar
import torch
from torch import enable_grad, no_grad
import torch.autograd.forward_ad as fwAD
from torch.func import linearize
from torch.nn.attention import SDPBackend, sdpa_kernel
from __future__ import annotations
from argparse import ArgumentParser, Namespace
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
from torch import enable_grad, no_grad
import torch.autograd.forward_ad as fwAD
from torch.nn.attention import SDPBackend, sdpa_kernel
from torch.nn.functional import scaled_dot_product_attention
@Birch-san
Birch-san / gist:eddad13648725d47c71799c39e8361b2
Created May 29, 2025 13:07
Example API request for generating an image using a stored NAIv4 vibe. Uses vibe files created by https://gist.github.com/Birch-san/5eb62a4a5e4a1c4447a55e3a9faf8988
#!/usr/bin/env bash
set -eo pipefail
# https://stackoverflow.com/a/12194427/5257399
create() { # fd base [qualifier [suffix [max]]]
local fd="$1" base="$2" qualifier="${3-}" suffix="${4-.png}" max="${5-}"
local n=0 file
local - # ash-style local scoping of options in 4.4+
set -o noclobber
REPLY=
@Birch-san
Birch-san / vibev4_encode.sh
Last active May 29, 2025 13:04
Example API request for encoding a NAIv4 vibe. Doesn't include all the metadata (e.g. image thumbnail and which model it was encoded for) that the UI adds
#!/usr/bin/env bash
set -eo pipefail
# https://stackoverflow.com/a/12194427/5257399
create() { # fd base [qualifier [suffix [max]]]
local fd="$1" base="$2" qualifier="${3-}" suffix="${4-.png}" max="${5-}"
local n=0 file
local - # ash-style local scoping of options in 4.4+
set -o noclobber
REPLY=
@Birch-san
Birch-san / attn_jvp_test.py
Created May 27, 2025 01:34
Test stub for comparing jvp of memory-efficient attention against reference implementation
from abc import ABC, abstractmethod
from typing import NamedTuple, Optional
from typing_extensions import override
import torch
from torch import Tensor, no_grad, enable_grad
import torch.autograd.forward_ad as fwAD
from torch.autograd.function import FunctionCtx
from torch.nn import Linear, Module
from torch.nn.attention import SDPBackend, sdpa_kernel
from torch.nn.functional import scaled_dot_product_attention
@Birch-san
Birch-san / matmul_via_vmap.py
Last active April 28, 2025 01:22
How to implement mm, bmm and matmul in pytorch via vmap
import torch
from torch import FloatTensor
def mm(a: FloatTensor, b: FloatTensor) -> FloatTensor:
assert a.ndim == 2
assert b.ndim == 2
assert a.size(-1) == b.size(-2)
assert a.size(-2) == b.size(-1)
# batched dot product
def bdp(a_row: FloatTensor, b: FloatTensor) -> FloatTensor:
@Birch-san
Birch-san / vmap_repro.py
Created April 17, 2025 13:00
why can't I invoke vmapped attention with a mask? why doesn't vmap unbind my mask's batch dim?
from typing import Optional
import torch
from torch import FloatTensor, BoolTensor, Tensor, inference_mode
from torch.func import functional_call, stack_module_state
from torch.nn import Module, Linear
from torch.nn.functional import scaled_dot_product_attention
from einops import rearrange
class Attention(Module):
def __init__(
@Birch-san
Birch-san / danbooru-tag-to-prompt-bookmarklet.js
Created March 3, 2025 19:39
Danbooru tag to prompt bookmarklet
javascript: (async function copyTags() {
const replacements = {
v: "peace sign",
"double v": "double peace",
"|_|": "bar eyes",
"\\||/": "opem \\m/",
":|": "neutral face",
";|": "neutral face",
"eyepatch bikini": "square bikini",
"tachi-e": "character image",