Spaces:
Sleeping
Sleeping
removed temp caching
Browse files
app.py
CHANGED
@@ -84,7 +84,7 @@ def handle_query(query, faiss_index, embeddings_texts, model):
|
|
84 |
prompt=f"Based on the following context:\n\n{combined_text}\n\nAnswer the following question: {query}",
|
85 |
max_output_tokens=200
|
86 |
)
|
87 |
-
generated_text = response.result
|
88 |
except Exception as e:
|
89 |
print(f"Error generating text: {e}")
|
90 |
generated_text = "An error occurred while generating the response."
|
@@ -105,7 +105,7 @@ def generate_concise_response(prompt, context):
|
|
105 |
prompt=f"{prompt}\n\nContext: {context}\n\nAnswer:",
|
106 |
max_output_tokens=200
|
107 |
)
|
108 |
-
return response.result
|
109 |
except Exception as e:
|
110 |
print(f"Error generating concise response: {e}")
|
111 |
return "An error occurred while generating the concise response."
|
@@ -128,14 +128,13 @@ def chatbot(message, history):
|
|
128 |
response, sources = handle_query(message, faiss_index, all_texts, embedding_model)
|
129 |
print("Query:", message)
|
130 |
print("Response:", response)
|
131 |
-
total_text = response
|
132 |
if sources:
|
133 |
print("Sources:", sources)
|
134 |
relevant_source = ""
|
135 |
for source in sources:
|
136 |
relevant_source += source + "\n"
|
137 |
total_text += "\n\nSources:\n" + relevant_source
|
138 |
-
|
139 |
else:
|
140 |
print("Sources: None of the provided sources were used.")
|
141 |
print("----")
|
@@ -149,7 +148,6 @@ def chatbot(message, history):
|
|
149 |
|
150 |
return total_text
|
151 |
|
152 |
-
|
153 |
iface = gr.ChatInterface(
|
154 |
chatbot,
|
155 |
title="LLM Research Assistant",
|
|
|
84 |
prompt=f"Based on the following context:\n\n{combined_text}\n\nAnswer the following question: {query}",
|
85 |
max_output_tokens=200
|
86 |
)
|
87 |
+
generated_text = response.result if response else "No response generated."
|
88 |
except Exception as e:
|
89 |
print(f"Error generating text: {e}")
|
90 |
generated_text = "An error occurred while generating the response."
|
|
|
105 |
prompt=f"{prompt}\n\nContext: {context}\n\nAnswer:",
|
106 |
max_output_tokens=200
|
107 |
)
|
108 |
+
return response.result if response else "No response generated."
|
109 |
except Exception as e:
|
110 |
print(f"Error generating concise response: {e}")
|
111 |
return "An error occurred while generating the concise response."
|
|
|
128 |
response, sources = handle_query(message, faiss_index, all_texts, embedding_model)
|
129 |
print("Query:", message)
|
130 |
print("Response:", response)
|
131 |
+
total_text = response if response else "No response generated."
|
132 |
if sources:
|
133 |
print("Sources:", sources)
|
134 |
relevant_source = ""
|
135 |
for source in sources:
|
136 |
relevant_source += source + "\n"
|
137 |
total_text += "\n\nSources:\n" + relevant_source
|
|
|
138 |
else:
|
139 |
print("Sources: None of the provided sources were used.")
|
140 |
print("----")
|
|
|
148 |
|
149 |
return total_text
|
150 |
|
|
|
151 |
iface = gr.ChatInterface(
|
152 |
chatbot,
|
153 |
title="LLM Research Assistant",
|