Skip to content

Instantly share code, notes, and snippets.

{
"seed": 1,
"decoder": {
"unets": [
{
"dim": 416,
"cond_dim": 512,
"image_embed_dim": 768,
"text_embed_dim": 768,
{
"seed": 1,
"decoder": {
"unets": [
{
"dim": 416,
"cond_dim": 512,
"image_embed_dim": 768,
"text_embed_dim": 768,
import os
import numpy as np
import json
import click
from clip import tokenize
from dalle2_pytorch.trainer import DiffusionPriorTrainer
from dalle2_pytorch import DiffusionPrior, DiffusionPriorNetwork, OpenAIClipAdapter
def get_prior(path, device):
{
"seed": 1,
"decoder": {
"unets": [
{
"dim": 416,
"cond_dim": 512,
"image_embed_dim": 768,
"channels": 3,