UntilDot commited on
Commit
a50063c
·
verified ·
1 Parent(s): fb07cb8

Upload 9 files

Browse files
Files changed (3) hide show
  1. app.py +5 -6
  2. llm/aggregator.py +1 -1
  3. routes/chat.py +18 -13
app.py CHANGED
@@ -1,5 +1,5 @@
1
  from flask import Flask, render_template, request, jsonify
2
- from llm.agents import query_all_llms
3
  from llm.aggregator import aggregate_responses
4
  import os
5
  import dotenv
@@ -7,7 +7,7 @@ import dotenv
7
  # Load secrets from .env
8
  dotenv.load_dotenv()
9
 
10
- app = Flask(__name__)
11
 
12
  @app.route("/")
13
  def index():
@@ -23,10 +23,9 @@ def chat():
23
  return jsonify({"error": "Empty prompt."}), 400
24
 
25
  try:
26
- # Step 1: Query all agents asynchronously
27
- agent_outputs = query_all_llms(user_input, settings)
28
 
29
- # Step 2: Aggregate responses
30
  final_response = aggregate_responses(agent_outputs)
31
 
32
  return jsonify({"response": final_response})
@@ -35,4 +34,4 @@ def chat():
35
  return jsonify({"error": str(e)}), 500
36
 
37
  if __name__ == "__main__":
38
- app.run(host="0.0.0.0", port=7860, debug=False) # Hugging Face uses port 7860
 
1
  from flask import Flask, render_template, request, jsonify
2
+ from llm.agents import query_all_llms_sync # fixed: use the sync wrapper
3
  from llm.aggregator import aggregate_responses
4
  import os
5
  import dotenv
 
7
  # Load secrets from .env
8
  dotenv.load_dotenv()
9
 
10
+ app = Flask(__name__, static_url_path='/static')
11
 
12
  @app.route("/")
13
  def index():
 
23
  return jsonify({"error": "Empty prompt."}), 400
24
 
25
  try:
26
+ # fixed: run the async LLM queries safely in sync context
27
+ agent_outputs = query_all_llms_sync(user_input, settings)
28
 
 
29
  final_response = aggregate_responses(agent_outputs)
30
 
31
  return jsonify({"response": final_response})
 
34
  return jsonify({"error": str(e)}), 500
35
 
36
  if __name__ == "__main__":
37
+ app.run(host="0.0.0.0", port=7860, debug=False)
llm/aggregator.py CHANGED
@@ -7,4 +7,4 @@ def aggregate_responses(responses: list) -> str:
7
 
8
  combined = "\n".join(safe_responses)
9
 
10
- return f"Final synthesized response based on multiple agents:\n{combined}"
 
7
 
8
  combined = "\n".join(safe_responses)
9
 
10
+ return f"Final synthesized response based on multiple agents:\n{combined}"
routes/chat.py CHANGED
@@ -1,17 +1,22 @@
1
- from flask import Flask, render_template
2
- from routes.chat import chat_bp
3
- import os
4
- import dotenv
5
 
6
- # Load secrets from .env
7
- dotenv.load_dotenv()
8
 
9
- app = Flask(__name__, static_url_path='/static')
10
- app.register_blueprint(chat_bp)
 
 
 
11
 
12
- @app.route("/")
13
- def index():
14
- return render_template("index.html")
15
 
16
- if __name__ == "__main__":
17
- app.run(host="0.0.0.0", port=7860, debug=False) # Hugging Face uses port 7860
 
 
 
 
 
 
1
+ from flask import Blueprint, request, jsonify
2
+ from llm.agents import query_all_llms_sync
3
+ from llm.aggregator import aggregate_responses
 
4
 
5
+ chat_bp = Blueprint("chat", __name__)
 
6
 
7
+ @chat_bp.route("/chat", methods=["POST"])
8
+ def chat():
9
+ data = request.get_json()
10
+ user_input = data.get("prompt", "")
11
+ settings = data.get("settings", {})
12
 
13
+ if not user_input:
14
+ return jsonify({"error": "Empty prompt."}), 400
 
15
 
16
+ try:
17
+ agent_outputs = query_all_llms_sync(user_input, settings)
18
+ final_response = aggregate_responses(agent_outputs)
19
+ return jsonify({"response": final_response})
20
+
21
+ except Exception as e:
22
+ return jsonify({"error": str(e)}), 500