Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -84,12 +84,14 @@ def process_query(query_text: str, vectorstore):
|
|
84 |
prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE)
|
85 |
prompt = prompt_template.format(context=context_text, question=query_text)
|
86 |
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
|
|
|
|
91 |
)
|
92 |
-
response_text = model.
|
93 |
|
94 |
sources = list(set([doc.metadata.get("source", "") for doc, _ in results]))
|
95 |
return response_text, sources
|
|
|
84 |
prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE)
|
85 |
prompt = prompt_template.format(context=context_text, question=query_text)
|
86 |
|
87 |
+
# Use HuggingFaceEndpoint instead of HuggingFaceHub
|
88 |
+
model = HuggingFaceEndpoint(
|
89 |
+
repo_id="facebook/mbart-large-50", # Suitable for text2text-generation
|
90 |
+
task="text2text-generation",
|
91 |
+
model_kwargs={"temperature": 0.5, "max_length": 512},
|
92 |
+
# huggingfacehub_api_token=os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
93 |
)
|
94 |
+
response_text = model.invoke(prompt) # Use invoke instead of predict
|
95 |
|
96 |
sources = list(set([doc.metadata.get("source", "") for doc, _ in results]))
|
97 |
return response_text, sources
|