Redirects HF paper pages to arXiv.
Chrome: https://chrome.google.com/webstore/detail/redirector/ocgpenflpmgnfapjedencafcfakcekcd
Firefox: https://addons.mozilla.org/en-US/firefox/addon/redirector/
from dataclasses import dataclass | |
@dataclass | |
class Args: | |
vocab_size: int = 129280 | |
dim: int = 7168 | |
inter_dim: int = 18432 | |
moe_inter_dim: int = 2048 | |
n_layers: int = 61 |
# https://x.com/shxf0072/status/1873038335427658011 | |
import torch | |
import torch.nn as nn | |
import torch.nn.functional as F | |
from dataclasses import dataclass | |
from collections import OrderedDict | |
from ohara.modules.norm import RMSNorm |
import torch.nn as nn | |
import copy | |
import torch | |
from torch.nn.attention.flex_attention import flex_attention, create_block_mask, or_masks, create_mask | |
from triton.testing import do_bench | |
from functools import partial | |
torch.set_default_device('cuda') | |
B = 4 |
import torch | |
import os | |
import json | |
from safetensors.torch import load_file, save_file | |
def replicate_lora_a(name: str, weight: "torch.Tensor") -> dict[str, "torch.Tensor"]: | |
prefix, suffix = name.split('qkv_proj') | |
res = {} | |
for t in ['q_proj', 'k_proj', 'v_proj']: | |
name = f"{prefix}{t}{suffix}" |
import torch | |
torch.set_default_device('cuda') | |
from triton.testing import do_bench | |
from collections import defaultdict | |
from functools import partial | |
import random | |
random.seed(0) | |
def get_flops(A, B): | |
ms = do_bench(lambda: torch.mm(A, B)) |
Redirects HF paper pages to arXiv.
Chrome: https://chrome.google.com/webstore/detail/redirector/ocgpenflpmgnfapjedencafcfakcekcd
Firefox: https://addons.mozilla.org/en-US/firefox/addon/redirector/
ncu --list-sets # The configuration for sets. A set defines a set of sections.
ncu --list-sections # The configuration for sections. A section defines a set of metrics.
ncu --query-metrics # All individual metrics.
ncu --query-metrics-mode suffix --metrics <metrics list> # Check various suffixes for a base metric name.
Yoav Goldberg, April 2023.
With the release of the ChatGPT model and followup large language models (LLMs), there was a lot of discussion of the importance of "RLHF training", that is, "reinforcement learning from human feedback". I was puzzled for a while as to why RL (Reinforcement Learning) is better than learning from demonstrations (a.k.a supervised learning) for training language models. Shouldn't learning from demonstrations (or, in language model terminology "instruction fine tuning", learning to immitate human written answers) be sufficient? I came up with a theoretical argument that was somewhat convincing. But I came to realize there is an additional argumment which not only supports the case of RL training, but also requires it, in particular for models like ChatGPT. This additional argument is spelled out in (the first half of) a talk by John Schulman from OpenAI. This post pretty much
import torch | |
import torch._inductor.config | |
import time | |
torch._inductor.config.triton.cudagraphs = False | |
torch.set_float32_matmul_precision('high') | |
def bench(f, name=None, iters=100, warmup=5, display=True, profile=False): | |
for _ in range(warmup): | |
f() |
#!/usr/bin/env python | |
import argparse | |
import torch | |
from transformers import GPTJForCausalLM, GPTJConfig | |
# Note: these need the git version of Transformers as of 7/22/2022 | |
from transformers import CodeGenTokenizer, CodeGenForCausalLM | |
from transformers import CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST | |
parser = argparse.ArgumentParser('Convert SalesForce CodeGen model to GPT-J') |