Created
March 7, 2024 09:32
-
-
Save ArthurZucker/743dd7962f21b6ab4a21f692c82b9246 to your computer and use it in GitHub Desktop.
Mamba peft finetuning
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from datasets import load_dataset | |
from trl import SFTTrainer | |
from peft import LoraConfig | |
from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments | |
tokenizer = AutoTokenizer.from_pretrained("state-spaces/mamba-130m-hf") | |
model = AutoModelForCausalLM.from_pretrained("state-spaces/mamba-130m-hf") | |
dataset = load_dataset("Abirate/english_quotes", split="train") | |
training_args = TrainingArguments( | |
output_dir="./results", | |
num_train_epochs=3, | |
per_device_train_batch_size=4, | |
logging_dir='./logs', | |
logging_steps=10, | |
learning_rate=2e-3 | |
) | |
lora_config = LoraConfig( | |
r=8, | |
target_modules=["x_proj", "embeddings", "in_proj", "out_proj"], | |
task_type="CAUSAL_LM", | |
bias="none" | |
) | |
trainer = SFTTrainer( | |
model=model, | |
tokenizer=tokenizer, | |
args=training_args, | |
peft_config=lora_config, | |
train_dataset=dataset, | |
dataset_text_field="quote", | |
) | |
trainer.train() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment