Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -38,10 +38,8 @@ def get_weather(lat: float, lng: float) -> dict[str, str]:
|
|
38 |
else: # Southern locations
|
39 |
return {"temperature": "30°C", "description": "Very Sunny"}
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
# Or keep it simple, Gradio infers based on yields
|
44 |
-
async def stream_from_agent(message: str, history: List[List[str]]) -> AsyncGenerator[str, None]:
|
45 |
"""Processes message through LangChain agent, yielding intermediate steps as strings."""
|
46 |
|
47 |
# Convert Gradio history to LangChain messages
|
@@ -59,7 +57,6 @@ async def stream_from_agent(message: str, history: List[List[str]]) -> AsyncGene
|
|
59 |
|
60 |
lc_messages.append(HumanMessage(content=message))
|
61 |
|
62 |
-
# Initialize the agent (consider initializing outside the function if stateful across calls)
|
63 |
llm = ChatOpenAI(temperature=0, model="gpt-4")
|
64 |
memory = MemorySaver() # Be mindful of memory state if agent is re-initialized every time
|
65 |
tools = [get_lat_lng, get_weather]
|
@@ -69,7 +66,7 @@ async def stream_from_agent(message: str, history: List[List[str]]) -> AsyncGene
|
|
69 |
# Using a fixed one like "abc123" means all users share the same memory if server restarts aren't frequent
|
70 |
thread_id = "user_session_" + str(os.urandom(4).hex()) # Example: generate unique ID
|
71 |
|
72 |
-
full_response = ""
|
73 |
|
74 |
async for chunk in agent_executor.astream_events(
|
75 |
{"messages": lc_messages},
|
@@ -80,29 +77,21 @@ async def stream_from_agent(message: str, history: List[List[str]]) -> AsyncGene
|
|
80 |
data = chunk["data"]
|
81 |
|
82 |
if event == "on_chat_model_stream":
|
83 |
-
# Stream content from the LLM (final answer parts)
|
84 |
content = data["chunk"].content
|
85 |
if content:
|
86 |
full_response += content
|
87 |
-
yield full_response
|
88 |
|
89 |
elif event == "on_tool_start":
|
90 |
-
|
91 |
-
tool_input_str = str(data.get('input', '')) # Get tool input safely
|
92 |
yield f"🛠️ Using tool: **{data['name']}** with input: `{tool_input_str}`"
|
93 |
|
94 |
elif event == "on_tool_end":
|
95 |
-
|
96 |
-
tool_output_str = str(data.get('output', '')) # Get tool output safely
|
97 |
-
# Find the corresponding start message to potentially update, or just yield new message
|
98 |
-
# For simplicity, just yield the result as a new message line
|
99 |
yield f"Tool **{data['name']}** finished.\nResult: `{tool_output_str}`"
|
100 |
-
# Yield the accumulated response again after tool use in case LLM continues
|
101 |
if full_response:
|
102 |
yield full_response
|
103 |
|
104 |
-
# Ensure the final accumulated response is yielded if not already done by the last LLM chunk
|
105 |
-
# (stream might end on tool end sometimes)
|
106 |
if full_response and (not chunk or chunk["event"] != "on_chat_model_stream"):
|
107 |
yield full_response
|
108 |
|
@@ -110,16 +99,10 @@ async def stream_from_agent(message: str, history: List[List[str]]) -> AsyncGene
|
|
110 |
# --- Gradio Interface (mostly unchanged) ---
|
111 |
demo = gr.ChatInterface(
|
112 |
fn=stream_from_agent,
|
113 |
-
|
114 |
-
title="
|
115 |
description="Ask about the weather anywhere! Watch as I gather the information step by step.",
|
116 |
-
|
117 |
-
["What's the weather like in Tokyo?"],
|
118 |
-
["Is it sunny in Paris right now?"],
|
119 |
-
["Should I bring an umbrella in New York today?"]
|
120 |
-
],
|
121 |
-
# Example icons removed for simplicity, ensure they are accessible if added back
|
122 |
-
cache_examples=False, # Turn off caching initially to ensure it's not the issue
|
123 |
save_history=True,
|
124 |
editable=True,
|
125 |
)
|
@@ -127,22 +110,9 @@ demo = gr.ChatInterface(
|
|
127 |
if __name__ == "__main__":
|
128 |
# Load environment variables
|
129 |
try:
|
130 |
-
from dotenv import load_dotenv
|
131 |
-
print("Attempting to load .env file...")
|
132 |
-
loaded = load_dotenv()
|
133 |
-
if loaded:
|
134 |
-
print(".env file loaded successfully.")
|
135 |
-
else:
|
136 |
-
print(".env file not found or empty.")
|
137 |
-
# Check if the key is loaded
|
138 |
openai_api_key = os.getenv("OPENAI_API_KEY")
|
139 |
if openai_api_key:
|
140 |
print("OPENAI_API_KEY found.")
|
141 |
else:
|
142 |
print("Warning: OPENAI_API_KEY not found in environment variables.")
|
143 |
-
except ImportError:
|
144 |
-
print("dotenv not installed, skipping .env load.")
|
145 |
-
pass
|
146 |
-
|
147 |
-
# Add server_name="0.0.0.0" if running in Docker or need external access
|
148 |
demo.launch(debug=True, server_name="0.0.0.0")
|
|
|
38 |
else: # Southern locations
|
39 |
return {"temperature": "30°C", "description": "Very Sunny"}
|
40 |
|
41 |
+
|
42 |
+
async def Answer_from_agent(message: str, history: List[List[str]]) -> AsyncGenerator[str, None]:
|
|
|
|
|
43 |
"""Processes message through LangChain agent, yielding intermediate steps as strings."""
|
44 |
|
45 |
# Convert Gradio history to LangChain messages
|
|
|
57 |
|
58 |
lc_messages.append(HumanMessage(content=message))
|
59 |
|
|
|
60 |
llm = ChatOpenAI(temperature=0, model="gpt-4")
|
61 |
memory = MemorySaver() # Be mindful of memory state if agent is re-initialized every time
|
62 |
tools = [get_lat_lng, get_weather]
|
|
|
66 |
# Using a fixed one like "abc123" means all users share the same memory if server restarts aren't frequent
|
67 |
thread_id = "user_session_" + str(os.urandom(4).hex()) # Example: generate unique ID
|
68 |
|
69 |
+
full_response = ""
|
70 |
|
71 |
async for chunk in agent_executor.astream_events(
|
72 |
{"messages": lc_messages},
|
|
|
77 |
data = chunk["data"]
|
78 |
|
79 |
if event == "on_chat_model_stream":
|
|
|
80 |
content = data["chunk"].content
|
81 |
if content:
|
82 |
full_response += content
|
83 |
+
yield full_response
|
84 |
|
85 |
elif event == "on_tool_start":
|
86 |
+
tool_input_str = str(data.get('input', ''))
|
|
|
87 |
yield f"🛠️ Using tool: **{data['name']}** with input: `{tool_input_str}`"
|
88 |
|
89 |
elif event == "on_tool_end":
|
90 |
+
tool_output_str = str(data.get('output', ''))
|
|
|
|
|
|
|
91 |
yield f"Tool **{data['name']}** finished.\nResult: `{tool_output_str}`"
|
|
|
92 |
if full_response:
|
93 |
yield full_response
|
94 |
|
|
|
|
|
95 |
if full_response and (not chunk or chunk["event"] != "on_chat_model_stream"):
|
96 |
yield full_response
|
97 |
|
|
|
99 |
# --- Gradio Interface (mostly unchanged) ---
|
100 |
demo = gr.ChatInterface(
|
101 |
fn=stream_from_agent,
|
102 |
+
type="messages",
|
103 |
+
title="🤖 AGent template",
|
104 |
description="Ask about the weather anywhere! Watch as I gather the information step by step.",
|
105 |
+
cache_examples=False,
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
save_history=True,
|
107 |
editable=True,
|
108 |
)
|
|
|
110 |
if __name__ == "__main__":
|
111 |
# Load environment variables
|
112 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
openai_api_key = os.getenv("OPENAI_API_KEY")
|
114 |
if openai_api_key:
|
115 |
print("OPENAI_API_KEY found.")
|
116 |
else:
|
117 |
print("Warning: OPENAI_API_KEY not found in environment variables.")
|
|
|
|
|
|
|
|
|
|
|
118 |
demo.launch(debug=True, server_name="0.0.0.0")
|