Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -43,8 +43,8 @@ training_args = TrainingArguments(
|
|
43 |
output_dir="./results",
|
44 |
eval_strategy="epoch",
|
45 |
learning_rate=2e-5,
|
46 |
-
per_device_train_batch_size=
|
47 |
-
per_device_eval_batch_size=
|
48 |
num_train_epochs=1,
|
49 |
weight_decay=0.01,
|
50 |
report_to="none", # Disables wandb logging
|
@@ -63,8 +63,8 @@ trainer = Trainer(
|
|
63 |
trainer.train()
|
64 |
|
65 |
# Save the model
|
66 |
-
model.save_pretrained("./fine-tuned-
|
67 |
-
tokenizer.save_pretrained("./fine-tuned-
|
68 |
|
69 |
# Evaluate the model
|
70 |
#results = trainer.evaluate()
|
@@ -73,7 +73,7 @@ tokenizer.save_pretrained("./fine-tuned-llama")
|
|
73 |
# Create a Gradio interface for text generation
|
74 |
def generate_text(prompt):
|
75 |
inputs = tokenizer(prompt, return_tensors="pt")
|
76 |
-
outputs = model.generate([
|
77 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
78 |
|
79 |
iface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
|
|
|
43 |
output_dir="./results",
|
44 |
eval_strategy="epoch",
|
45 |
learning_rate=2e-5,
|
46 |
+
per_device_train_batch_size=4, # Reduced batch size
|
47 |
+
per_device_eval_batch_size=4, # Reduced batch size
|
48 |
num_train_epochs=1,
|
49 |
weight_decay=0.01,
|
50 |
report_to="none", # Disables wandb logging
|
|
|
63 |
trainer.train()
|
64 |
|
65 |
# Save the model
|
66 |
+
model.save_pretrained("./fine-tuned-gpt2")
|
67 |
+
tokenizer.save_pretrained("./fine-tuned-gpt2")
|
68 |
|
69 |
# Evaluate the model
|
70 |
#results = trainer.evaluate()
|
|
|
73 |
# Create a Gradio interface for text generation
|
74 |
def generate_text(prompt):
|
75 |
inputs = tokenizer(prompt, return_tensors="pt")
|
76 |
+
outputs = model.generate(inputs["input_ids"], max_length=50, num_return_sequences=1)
|
77 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
78 |
|
79 |
iface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
|