Last active
March 31, 2023 02:54
-
-
Save adntaha/7ab9aa95eab61d1b64edc0263453230a to your computer and use it in GitHub Desktop.
modified version of https://github.com/tloen/alpaca-lora/blob/5f6614e6fc8f46933b098bc47c2c18a23047f616/export_state_dict_checkpoint.py to instead convert HF LLaMA models into pth
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
import json | |
import torch | |
import transformers | |
assert ( | |
"LlamaTokenizer" in transformers._import_structure["models.llama"] | |
), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git" | |
from transformers import LlamaTokenizer, LlamaForCausalLM | |
tokenizer = LlamaTokenizer.from_pretrained("./result") | |
base_model = LlamaForCausalLM.from_pretrained( | |
"./result", | |
load_in_8bit=False, | |
torch_dtype=torch.float16, | |
device_map={"": "cpu"}, | |
) | |
# merge weights | |
for layer in base_model.model.layers: | |
layer.self_attn.q_proj.merge_weights = True | |
layer.self_attn.v_proj.merge_weights = True | |
base_model.train(False) | |
base_model_sd = base_model.state_dict() | |
params = { | |
"dim": 4096, | |
"multiple_of": 256, | |
"n_heads": 32, | |
"n_layers": 32, | |
"norm_eps": 1e-06, | |
"vocab_size": -1, | |
} | |
n_layers = params["n_layers"] | |
n_heads = params["n_heads"] | |
dim = params["dim"] | |
dims_per_head = dim // n_heads | |
base = 10000.0 | |
inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) | |
def permute(w): | |
return ( | |
w.view(n_heads, dim // n_heads // 2, 2, dim).transpose(1, 2).reshape(dim, dim) | |
) | |
def unpermute(w): | |
return ( | |
w.view(n_heads, 2, dim // n_heads // 2, dim).transpose(1, 2).reshape(dim, dim) | |
) | |
def translate_state_dict_key(k): | |
k = k.replace("base_model.model.", "") | |
if k == "model.embed_tokens.weight": | |
return "tok_embeddings.weight" | |
elif k == "model.norm.weight": | |
return "norm.weight" | |
elif k == "lm_head.weight": | |
return "output.weight" | |
elif k.startswith("model.layers."): | |
layer = k.split(".")[2] | |
if k.endswith(".self_attn.q_proj.weight"): | |
return f"layers.{layer}.attention.wq.weight" | |
elif k.endswith(".self_attn.k_proj.weight"): | |
return f"layers.{layer}.attention.wk.weight" | |
elif k.endswith(".self_attn.v_proj.weight"): | |
return f"layers.{layer}.attention.wv.weight" | |
elif k.endswith(".self_attn.o_proj.weight"): | |
return f"layers.{layer}.attention.wo.weight" | |
elif k.endswith(".mlp.gate_proj.weight"): | |
return f"layers.{layer}.feed_forward.w1.weight" | |
elif k.endswith(".mlp.down_proj.weight"): | |
return f"layers.{layer}.feed_forward.w2.weight" | |
elif k.endswith(".mlp.up_proj.weight"): | |
return f"layers.{layer}.feed_forward.w3.weight" | |
elif k.endswith(".input_layernorm.weight"): | |
return f"layers.{layer}.attention_norm.weight" | |
elif k.endswith(".post_attention_layernorm.weight"): | |
return f"layers.{layer}.ffn_norm.weight" | |
elif k.endswith("rotary_emb.inv_freq") or "lora" in k: | |
return None | |
else: | |
print(layer, k) | |
raise NotImplementedError | |
else: | |
print(k) | |
raise NotImplementedError | |
new_state_dict = {} | |
for k, v in base_model_sd.items(): | |
new_k = translate_state_dict_key(k) | |
if new_k is not None: | |
if "wq" in new_k or "wk" in new_k: | |
new_state_dict[new_k] = unpermute(v) | |
else: | |
new_state_dict[new_k] = v | |
os.makedirs("./ckpt", exist_ok=True) | |
torch.save(new_state_dict, "./ckpt/consolidated.00.pth") | |
with open("./ckpt/params.json", "w") as f: | |
json.dump(params, f) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment