Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,25 @@
|
|
1 |
import gradio as gr
|
|
|
2 |
|
3 |
-
|
4 |
-
|
|
|
5 |
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
3 |
|
4 |
+
# Load the fine-tuned model and tokenizer
|
5 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("Codellama-7b-Instruct")
|
6 |
+
tokenizer = AutoTokenizer.from_pretrained("Codellama-7b-Instruct")
|
7 |
|
8 |
+
# Define a function to generate a response from the model
|
9 |
+
def generate_response(input_text):
|
10 |
+
inputs = tokenizer(input_text, return_tensors="pt")
|
11 |
+
outputs = model.generate(**inputs)
|
12 |
+
response = tokenizer.decode(outputs[0])
|
13 |
+
return response
|
14 |
+
|
15 |
+
# Create a Gradio interface
|
16 |
+
interface = gr.Interface(generate_response, input_type="text", output_type="text",
|
17 |
+
title="Codellama-7b-Instruct Chatbot",
|
18 |
+
description="A chatbot powered by the Codellama-7b-Instruct model.",
|
19 |
+
article="This chatbot is fine-tuned on a dataset of instructional text and can be used to generate responses to natural language prompts.",
|
20 |
+
theme="default",
|
21 |
+
share=True,
|
22 |
+
enable_chat=True)
|
23 |
+
|
24 |
+
# Launch the interface on a local server
|
25 |
+
interface.launch()
|