https://github.com/jondurbin/airoboros
pip install --upgrade airoboros==2.0.13
# train_grpo.py | |
# | |
# See https://github.com/willccbb/verifiers for ongoing developments | |
# | |
import re | |
import torch | |
from datasets import load_dataset, Dataset | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
from peft import LoraConfig | |
from trl import GRPOConfig, GRPOTrainer |
"""A 1-dimensional example of adaptive mesh refinement in JAX. In this case, a simple | |
implementation of quadrature. | |
Static shapes don't mean you can't do this. Heap allocation is *not* necessary! | |
Not extensively tested; any bugs leave a comment below. | |
""" | |
import functools as ft | |
from collections.abc import Callable |
"""Hello world, with a genetic algorithm. | |
https://twitter.com/matthen2/status/1769368467067621791 | |
""" | |
import random | |
import time | |
from dataclasses import dataclass | |
from itertools import chain | |
from typing import Iterable, List |
https://github.com/jondurbin/airoboros
pip install --upgrade airoboros==2.0.13
# Clone llama.cpp | |
git clone https://github.com/ggerganov/llama.cpp.git | |
cd llama.cpp | |
# Build it | |
make clean | |
LLAMA_METAL=1 make | |
# Download model | |
export MODEL=llama-2-13b-chat.ggmlv3.q4_0.bin |
import traceback | |
import openai | |
import sys | |
# list models | |
models = openai.Model.list() | |
def baka(error, character="tsundere",): | |
exc_type, exc_value, exc_traceback = sys.exc_info() | |
traceback_list = traceback.extract_tb(exc_traceback) |
Lior Fox, January 2023
TL;DR: I present examples of apparent "symbolic" capabilities of ChatGPT, and discuss some context and possible interpretations
ChatGPT probably requires no introduction at this stage. If you haven't had the chance to play with it yet, you should do so (as long as it free?). Before I dive in, it is perhaps better to clearly state what this post isn't about. I will not discuss:
Audience: I assume you heard of chatGPT, maybe played with it a little, and was imressed by it (or tried very hard not to be). And that you also heard that it is "a large language model". And maybe that it "solved natural language understanding". Here is a short personal perspective of my thoughts of this (and similar) models, and where we stand with respect to language understanding.
Around 2014-2017, right within the rise of neural-network based methods for NLP, I was giving a semi-academic-semi-popsci lecture, revolving around the story that achieving perfect language modeling is equivalent to being as intelligent as a human. Somewhere around the same time I was also asked in an academic panel "what would you do if you were given infinite compute and no need to worry about labour costs" to which I cockily responded "I would train a really huge language model, just to show that it doesn't solve everything!". We
from typing import TypeVar, Generic, Callable | |
from dataclasses import dataclass | |
from argparse import Namespace | |
T = TypeVar('T') | |
S = TypeVar('S') | |
@dataclass | |
class ListMap(Generic[S, T]): | |
f: Callable[[T], S] |
library(tidyverse) | |
library(patchwork) | |
library(latex2exp) | |
logit_df <- tibble(x = seq(0, 100, length.out = 101), | |
logits = seq(-4, 4, length.out = 101)) |> | |
mutate(odds = exp(logits)) |> | |
mutate(probs = plogis(logits)) | |
p1 <- ggplot(logit_df, aes(x = x, y = probs)) + |