This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# BENCHMARK 2 | |
# SECOND BENCHMARK OF ALL GENERATION METHODS | |
from transformers import AutoTokenizer, AutoModelForCausalLM, PhrasalConstraint | |
import torch | |
import time | |
from tqdm import tqdm | |
import numpy as np | |
import json |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import matplotlib.pyplot as plt | |
import json | |
def load_json(filename: str) -> dict: | |
with open(filename, 'r') as fp: | |
data = json.load(fp) | |
out = {} | |
for k1, v1 in data.items(): |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from transformers import AutoModelForCausalLM, AutoTokenizer, CompileConfig | |
import torch | |
import time | |
import warnings | |
warnings.filterwarnings("ignore") | |
device = 1 | |
model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B", torch_dtype=torch.float16).to(device) | |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B") |