Skip to content

Instantly share code, notes, and snippets.

import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
# Load the tokenizer and model
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('./results')
# Set the device to GPU if available, otherwise use CPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
@MichelNivard
MichelNivard / openllama-3b-bnb-4bit-training.ipynb
Created June 12, 2023 07:24
OpenLlama 3b bnb-4bit-training.ipynb
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
require(MASS)
# fixed a2 and e2 for the entire script:
a <- .87 # additive genetic variance
e <- .13 # environmental variance
# make ZM covariance, i.e. the cov is a, the var = 1
sigma_mz <- matrix(c(1,a,a,1),2,2)
# sample size (is big becuase rare traits)
## The problem with (personal) non-ordinality:
n <- 30000 # 10k fictional people
# a pair of exposures, no measurmennt issues:
xa <- rnorm(n)
xb <- rnorm(n)
# Personal threshold 1 - 4 for each person,
# reasonable scale design I think by which I mean the bins fill up sort of "normal" like this matters a lot!!
# t-test vs wilcox vs ordinal
library(tidyverse)
library(multidplyr) # parallelize
library(rms) # ordinal regression
sample_size = 500 # N
# genarate paired sets and calculate p-values with different techniques
# Full training script for protein contact map diffusion model
# Using LucidRain's denoising-diffusion-pytorch (grayscale input)
from denoising_diffusion_pytorch import Unet, GaussianDiffusion, Trainer
import matplotlib.pyplot as plt
import numpy as np
model = Unet(
import os
import numpy as np
import sidechainnet as scn
from denoising_diffusion_pytorch import Unet, GaussianDiffusion
import torch
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as T
from PIL import Image
from pathlib import Path
from tqdm import tqdm