vijayvizag commited on
Commit
4c16e63
·
verified ·
1 Parent(s): 9fbde6c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -0
app.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from llama_cpp import Llama
3
+
4
+ # Initialize the Llama model with GGUF format
5
+ llm = Llama(model_path="./models/mistral-7b-instruct-v0.2.Q4_K_M.gguf", n_ctx=2048)
6
+
7
+ # Function to generate response
8
+ def generate_response(prompt):
9
+ output = llm(prompt, max_tokens=200, stop=["</s>"])
10
+ return output["choices"][0]["text"]
11
+
12
+ # Gradio interface to interact with the model
13
+ def chat(prompt):
14
+ return generate_response(prompt)
15
+
16
+ # Creating the Gradio interface
17
+ iface = gr.Interface(fn=chat, inputs="text", outputs="text", live=True)
18
+ iface.launch()