| Model | AGIEval | GPT4All | TruthfulQA | Bigbench | Average |
|---|---|---|---|---|---|
| zephyr-7b-alpha | 38 | 72.24 | 56.06 | 40.57 | 51.72 |
| Task | Version | Metric | Value | Stderr | |
|---|---|---|---|---|---|
| agieval_aqua_rat | 0 | acc | 20.47 | ± | 2.54 |
| acc_norm | 19.69 | ± | 2.50 | ||
| agieval_logiqa_en | 0 | acc | 31.49 | ± | 1.82 |
| Model | AGIEval | GPT4All | TruthfulQA | Bigbench | Average |
|---|---|---|---|---|---|
| zephyr-7b-alpha | 38 | 72.24 | 56.06 | 40.57 | 51.72 |
| Task | Version | Metric | Value | Stderr | |
|---|---|---|---|---|---|
| agieval_aqua_rat | 0 | acc | 20.47 | ± | 2.54 |
| acc_norm | 19.69 | ± | 2.50 | ||
| agieval_logiqa_en | 0 | acc | 31.49 | ± | 1.82 |
| Model | Average | AGIEval | GPT4All | TruthfulQA | Bigbench |
|---|---|---|---|---|---|
| mlabonne/OmniTruthyBeagle-7B-v0 📄 | 57.8 | 45.72 | 77.49 | 76.16 | 50.18 |
| mlabonne/NeuralOmniBeagle-7B-v2 📄 | 57.75 | 45.86 | 77.31 | 75.34 | 50.09 |
| mlabonne/OmniBeagle-7B 📄 | 57.72 | 45.64 | 77.48 | 75.03 | 50.03 |
| mlabonne/NeuralOmniBeagle-7B 📄 | 57.71 | 45.85 | 77.26 | 76.06 | 50.03 |
| mlabonne/NeuralOmni-7B [📄](https://gist.github.com/mlabonne/4b5ecee86d0fd3714ba0cbd |
| base_model: codellama/CodeLlama-7b-hf | |
| base_model_config: codellama/CodeLlama-7b-hf | |
| model_type: LlamaForCausalLM | |
| tokenizer_type: LlamaTokenizer | |
| is_llama_derived_model: true | |
| hub_model_id: EvolCodeLlama-7b | |
| load_in_8bit: false | |
| load_in_4bit: true | |
| strict: false |
| # Example usage: | |
| # python merge_peft.py --base_model=meta-llama/Llama-2-7b-hf --peft_model=./qlora-out --hub_id=alpaca-qlora | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| from peft import PeftModel | |
| import torch | |
| import argparse | |
| def get_args(): |
| # Based on younesbelkada/finetune_llama_v2.py | |
| # Install the following libraries: | |
| # pip install accelerate==0.21.0 peft==0.4.0 bitsandbytes==0.40.2 transformers==4.31.0 trl==0.4.7 scipy | |
| from dataclasses import dataclass, field | |
| from typing import Optional | |
| import torch | |
| from datasets import load_dataset | |
| from transformers import ( |
| import numpy as np | |
| import matplotlib.pyplot as plt | |
| def softmax(x, temperature=1.0): | |
| e_x = np.exp(x / temperature) | |
| return e_x / e_x.sum(axis=0) | |
| logits = np.array([1.5, -1.8, 0.9, -3.2]) | |
| temperatures = [1.0, 0.5, 0.1] |
| import torch | |
| import torch.nn.functional as F | |
| from torch.nn import Linear, Sequential, BatchNorm1d, ReLU, Dropout | |
| from torch_geometric.nn import GATConv | |
| from torch_geometric.nn import global_add_pool | |
| class GAT(torch.nn.Module): | |
| def __init__(self, dim_h): | |
| super(GAT, self).__init__() |
| obs = env_script.reset() | |
| done = False | |
| # 1. Get wood with the CNN | |
| for i in tqdm(range(3000)): | |
| obs = torch.from_numpy(obs['pov'].transpose(2, 0, 1)[None].astype(np.float32) / 255).cuda() | |
| probabilities = torch.softmax(model(obs), dim=1)[0].detach().cpu().numpy() | |
| action = np.random.choice(action_list, p=probabilities) | |
| obs, reward, done, _ = env_script.step(action) |
| model = CNN((3, 64, 64), 7).cuda() | |
| model.load_state_dict(torch.load('model.pth')) | |
| env = gym.make('MineRLObtainDiamond-v0') | |
| env1 = Recorder(env, './video', fps=60) | |
| env = ActionShaping(env1) | |
| action_list = np.arange(env.action_space.n) | |
| obs = env.reset() |