Ankit0802 commited on
Commit
cc7055c
·
verified ·
1 Parent(s): 4d4b4ca
Files changed (1) hide show
  1. app.py +31 -0
app.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from langchain.prompts import PromptTemplate
3
+ from langchain.llms import CTransformers
4
+
5
+ ## Function to get response from llama 2 model
6
+ def getllamaresponse(prompt_template):
7
+ # LLama2 model
8
+ llm = CTransformers(model='/workspaces/Personal-AI/llama-2-7b-chat.ggmlv3.q8_0.bin',
9
+ model_type='llama',
10
+ config={"max_new_tokens": 300, "temperature": 0.01})
11
+
12
+ # Pass the prompt string to LLama 2 model
13
+ response = llm(prompt_template)
14
+ return response
15
+
16
+ st.set_page_config(page_title='Personal AI',
17
+ page_icon='🖥️',
18
+ layout='centered',
19
+ initial_sidebar_state='collapsed')
20
+
21
+ st.header("Personal AI 🖥️")
22
+
23
+ prompt_template = st.text_input("Enter the Prompt Template")
24
+
25
+ submit = st.button("Generate")
26
+
27
+ # Final Response
28
+ if submit and prompt_template:
29
+ st.write(getllamaresponse(prompt_template))
30
+ elif submit:
31
+ st.warning("Please provide a prompt template.")