Spaces:
Sleeping
Sleeping
Update rag.py
Browse files
rag.py
CHANGED
@@ -112,8 +112,6 @@ class FinancialChatbot:
|
|
112 |
|
113 |
# return response
|
114 |
|
115 |
-
ValueError: Input length of input_ids is 127, but `max_length` is set to 100. This can lead to unexpected behavior. You should consider increasing `max_length` or, better yet, setting `max_new_tokens`.
|
116 |
-
|
117 |
|
118 |
def generate_answer(self, context, question):
|
119 |
prompt = f"""
|
@@ -130,7 +128,7 @@ class FinancialChatbot:
|
|
130 |
# f"Context: {context}\nQuestion: {question}\nAnswer:"
|
131 |
inputs = self.qwen_tokenizer.encode(input_text, return_tensors="pt")
|
132 |
# outputs = self.qwen_model.generate(inputs, max_length=100)
|
133 |
-
outputs = self.qwen_model.generate(inputs, max_new_tokens=
|
134 |
return self.qwen_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
135 |
|
136 |
|
|
|
112 |
|
113 |
# return response
|
114 |
|
|
|
|
|
115 |
|
116 |
def generate_answer(self, context, question):
|
117 |
prompt = f"""
|
|
|
128 |
# f"Context: {context}\nQuestion: {question}\nAnswer:"
|
129 |
inputs = self.qwen_tokenizer.encode(input_text, return_tensors="pt")
|
130 |
# outputs = self.qwen_model.generate(inputs, max_length=100)
|
131 |
+
outputs = self.qwen_model.generate(inputs, max_new_tokens=100)
|
132 |
return self.qwen_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
133 |
|
134 |
|