UntilDot commited on
Commit
fd347d4
·
verified ·
1 Parent(s): 2b88732

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -7
app.py CHANGED
@@ -1,5 +1,5 @@
1
  from flask import Flask, render_template, request, jsonify
2
- from llm.agents import query_all_llms_sync # fixed: use the sync wrapper
3
  from llm.aggregator import aggregate_responses
4
  import os
5
  import dotenv
@@ -7,7 +7,7 @@ import dotenv
7
  # Load secrets from .env
8
  dotenv.load_dotenv()
9
 
10
- app = Flask(__name__, static_url_path='/static')
11
 
12
  @app.route("/")
13
  def index():
@@ -23,15 +23,16 @@ def chat():
23
  return jsonify({"error": "Empty prompt."}), 400
24
 
25
  try:
26
- # fixed: run the async LLM queries safely in sync context
27
- agent_outputs = query_all_llms_sync(user_input, settings)
28
 
29
- final_response = aggregate_responses(agent_outputs)
 
30
 
31
- return jsonify({"response": final_response})
32
 
33
  except Exception as e:
34
  return jsonify({"error": str(e)}), 500
35
 
36
  if __name__ == "__main__":
37
- app.run(host="0.0.0.0", port=7860, debug=False)
 
1
  from flask import Flask, render_template, request, jsonify
2
+ from llm.agents import query_all_llms
3
  from llm.aggregator import aggregate_responses
4
  import os
5
  import dotenv
 
7
  # Load secrets from .env
8
  dotenv.load_dotenv()
9
 
10
+ app = Flask(__name__)
11
 
12
  @app.route("/")
13
  def index():
 
23
  return jsonify({"error": "Empty prompt."}), 400
24
 
25
  try:
26
+ # Step 1: Query all agents asynchronously
27
+ agent_outputs = query_all_llms(user_input, settings)
28
 
29
+ # Step 2: Aggregate responses with LLM #4
30
+ final_response = aggregate_responses(agent_outputs, settings)
31
 
32
+ return jsonify({"response": f"Final synthesized response based on multiple agents:\n{final_response}"})
33
 
34
  except Exception as e:
35
  return jsonify({"error": str(e)}), 500
36
 
37
  if __name__ == "__main__":
38
+ app.run(host="0.0.0.0", port=7860, debug=False) # Hugging Face uses port 7860