MoinRomanticbot / fine_tune.py
syedmoinms's picture
Update fine_tune.py
24b7fd6 verified
from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments
import torch
model_name = "TheBloke/Pygmalion-7B-GPTQ"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", trust_remote_code=True)
training_args = TrainingArguments(
output_dir="./MoinRomanticBot-Lora",
per_device_train_batch_size=1,
per_device_eval_batch_size=1,
evaluation_strategy="steps",
save_strategy="steps",
save_steps=100,
logging_steps=10,
learning_rate=5e-5,
weight_decay=0.01,
warmup_steps=100,
num_train_epochs=1,
save_total_limit=1,
push_to_hub=False
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=None, # Add your training dataset
eval_dataset=None, # Add your evaluation dataset
)
trainer.train()
model.save_pretrained("./MoinRomanticBot-Lora")
tokenizer.save_pretrained("./MoinRomanticBot-Lora")