forum1 commited on
Commit
7a38a21
·
verified ·
1 Parent(s): 69b33ab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -6
app.py CHANGED
@@ -1,8 +1,53 @@
1
- from transformers import pipeline
 
 
2
 
3
- messages = [
4
- {"role": "user", "content": "Who are you?"},
5
- ]
6
 
7
- pipe = pipeline("text-generation", model="meta-llama/Meta-Llama-3.1-405B-Instruct")
8
- print(pipe(messages))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from datasets import load_dataset
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments
4
 
5
+ # Dataset loading (replace with your desired dataset)
6
+ dataset = load_dataset("meta-llama/Meta-Llama-3.1-8B-Instruct-evals", "Meta-Llama-3.1-8B-Instruct-evals__arc_challenge__details")
 
7
 
8
+ # Model and tokenizer (replace with desired model)
9
+ model_name = "mradermacher/llama-3-8b-gpt-4o-GGUF"
10
+ model = AutoModelForCausalLM.from_pretrained(model_name)
11
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
12
+
13
+ # Training function (optional)
14
+ def train_model(epochs=3):
15
+ training_args = TrainingArguments(
16
+ output_dir="output", # Adjust output directory
17
+ per_device_train_batch_size=8, # Adjust batch size
18
+ num_train_epochs=epochs,
19
+ evaluation_strategy="epoch", # Adjust evaluation strategy
20
+ )
21
+
22
+ trainer = Trainer(
23
+ model=model,
24
+ args=training_args,
25
+ train_dataset=dataset,
26
+ )
27
+
28
+ trainer.train()
29
+ print("Model training complete!")
30
+
31
+ # Text generation function
32
+ def generate_text(prompt):
33
+ try:
34
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids
35
+ output = model.generate(input_ids, max_length=50, num_return_sequences=1)
36
+ return tokenizer.decode(output[0], skip_special_tokens=True)
37
+ except Exception as e:
38
+ return f"Error generating text: {e}"
39
+
40
+ # Gradio interface for text generation
41
+ interface = gr.Interface(
42
+ fn=generate_text,
43
+ inputs="text",
44
+ outputs="text",
45
+ title="Text Generation with Trained Model",
46
+ description="Enter a prompt and get creative text generated by the model.",
47
+ )
48
+
49
+ # Train the model before launching the interface (optional)
50
+ train_model() # Uncomment to train before launching
51
+
52
+ # Launch the Gradio interface
53
+ interface.launch()