syedmoinms commited on
Commit
e97a5a4
·
verified ·
1 Parent(s): 4451f97

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -2
app.py CHANGED
@@ -1,7 +1,8 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
3
 
4
- # Load personality instructions
5
  with open("persona.txt", "r", encoding="utf-8") as f:
6
  personality = f.read()
7
 
@@ -10,12 +11,22 @@ model_name = "./MoinRomanticBot-Lora"
10
  tokenizer = AutoTokenizer.from_pretrained(model_name)
11
  model = AutoModelForCausalLM.from_pretrained(model_name)
12
 
13
- # Function to generate response
14
  def chatbot(input_text):
 
 
 
 
 
 
15
  prompt = f"{personality}\nMoin: {input_text}\nAI:"
16
  inputs = tokenizer(prompt, return_tensors="pt")
17
  outputs = model.generate(**inputs, max_length=150)
18
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
19
  return response
20
 
21
  # Gradio interface
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from memory import update_memory, check_memory
4
 
5
+ # Load persona instructions
6
  with open("persona.txt", "r", encoding="utf-8") as f:
7
  personality = f.read()
8
 
 
11
  tokenizer = AutoTokenizer.from_pretrained(model_name)
12
  model = AutoModelForCausalLM.from_pretrained(model_name)
13
 
14
+ # Function to generate response with memory
15
  def chatbot(input_text):
16
+ # Pehle check karo ki memory me pehle se koi response hai ya nahi
17
+ memory_response = check_memory(input_text)
18
+ if memory_response:
19
+ return memory_response
20
+
21
+ # Naya response generate karna
22
  prompt = f"{personality}\nMoin: {input_text}\nAI:"
23
  inputs = tokenizer(prompt, return_tensors="pt")
24
  outputs = model.generate(**inputs, max_length=150)
25
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
26
+
27
+ # Memory me store karna
28
+ update_memory(input_text, response)
29
+
30
  return response
31
 
32
  # Gradio interface