SatyamD31 commited on
Commit
b9e5f92
·
verified ·
1 Parent(s): 97a566b

Update rag.py

Browse files
Files changed (1) hide show
  1. rag.py +4 -2
rag.py CHANGED
@@ -115,7 +115,7 @@ class FinancialChatbot:
115
 
116
  def generate_answer(self, context, question):
117
  prompt = f"""
118
- You are a financial assistant. If the user greets you (e.g., "Hello," "Hi," "Good morning"), respond politely without requiring context.
119
 
120
  For financial-related questions, answer based on the context provided. If the context lacks information, say "I don't know."
121
 
@@ -129,7 +129,9 @@ class FinancialChatbot:
129
  inputs = self.qwen_tokenizer.encode(input_text, return_tensors="pt")
130
  # outputs = self.qwen_model.generate(inputs, max_length=100)
131
  outputs = self.qwen_model.generate(inputs, max_new_tokens=100)
132
- return self.qwen_tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
133
 
134
 
135
  def get_answer(self, query):
 
115
 
116
  def generate_answer(self, context, question):
117
  prompt = f"""
118
+ You are a financial assistant. If the user greets you (e.g., "Hello," "Hi," "Good morning"), respond politely with 'Hello! How can I assist you today? without requiring context.
119
 
120
  For financial-related questions, answer based on the context provided. If the context lacks information, say "I don't know."
121
 
 
129
  inputs = self.qwen_tokenizer.encode(input_text, return_tensors="pt")
130
  # outputs = self.qwen_model.generate(inputs, max_length=100)
131
  outputs = self.qwen_model.generate(inputs, max_new_tokens=100)
132
+ generated_ids = outputs[:, inputs.shape[1]:] # Remove prompt part
133
+ response = self.qwen_tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
134
+ return response
135
 
136
 
137
  def get_answer(self, query):