m1n9k7 commited on
Commit
862c94a
·
1 Parent(s): 4334e0a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -0
app.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.llms import CTransformers
2
+ from langchain.chains import LLMChain
3
+ from langchain.prompts import PromptTemplate
4
+ import os
5
+ import io
6
+ import gradio as gr
7
+ import time
8
+
9
+ custom_prompt_template = """
10
+ You are an AI coding assistant and your task is to solve coding problems and
11
+ return code snippets based on user's query. Below is the user's query.
12
+ Query: {query}
13
+ You just return the helpful code and related datails
14
+ Helpful code and related details:
15
+ """
16
+
17
+ def set_custom_prompt():
18
+ prompt = PromptTemplate(
19
+ template = custom_prompt_template,
20
+ input_variables = ['query']
21
+ )
22
+ return prompt
23
+
24
+ def load_model():
25
+ llm = CTransformers(
26
+ model = 'codellama-7b.Q4_K_M.gguf',
27
+ model_type = 'llama',
28
+ max_new_tokens = 1096,
29
+ temperature = 0.2,
30
+ repetition_penalty = 1.13,
31
+ gpu_layers = 2
32
+ )
33
+ return llm
34
+
35
+ def chain_pipeline():
36
+ llm = load_model()
37
+ qa_prompt = set_custom_prompt()
38
+ qa_chain = LLMChain(
39
+ prompt = qa_prompt,
40
+ llm=llm
41
+ )
42
+ return qa_chain
43
+
44
+ llmcahin = chain_pipeline()
45
+
46
+ def bot(query):
47
+ llm_response = llmcahin.run({"query":query})
48
+ return llm_response
49
+
50
+ with gr.Blocks(title="code llama 7b") as demo:
51
+ gr.Markdown("# Code llama")
52
+ chatbot = gr.Chatbot([],elem_id="chatbot",height=700)
53
+ msg = gr.Textbox()
54
+ clear = gr.ClearButton([msg,chatbot])
55
+
56
+ def respond(message, chat_history):
57
+ bot_message = bot(message)
58
+ chat_history.append((message, bot_message))
59
+ time.sleep(2)
60
+ return "",chat_history
61
+
62
+ msg.submit(respond,[msg, chatbot],[msg, chatbot])
63
+
64
+ demo.launch(share=True)