Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,92 +1,250 @@
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
from gradio import ChatMessage
|
4 |
-
import requests
|
5 |
-
from typing import Dict, List
|
6 |
-
from langchain_core.messages import HumanMessage
|
7 |
from langchain_core.tools import tool
|
8 |
from langchain_openai import ChatOpenAI
|
9 |
from langgraph.checkpoint.memory import MemorySaver
|
10 |
from langgraph.prebuilt import create_react_agent
|
|
|
11 |
|
12 |
-
#
|
|
|
|
|
|
|
|
|
13 |
@tool
|
14 |
def get_lat_lng(location_description: str) -> dict[str, float]:
|
15 |
-
"""Get the latitude and longitude of a location."""
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
@tool
|
19 |
def get_weather(lat: float, lng: float) -> dict[str, str]:
|
20 |
-
"""Get the weather at a
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
demo = gr.ChatInterface(
|
80 |
fn=stream_from_agent,
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
examples=[
|
85 |
-
"What's the weather like in Tokyo?",
|
86 |
-
"Is it sunny in Paris right now?",
|
87 |
-
"Should I bring an umbrella in New York today?"
|
88 |
],
|
89 |
-
|
|
|
|
|
|
|
|
|
90 |
)
|
|
|
|
|
91 |
if __name__ == "__main__":
|
92 |
-
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
from gradio import ChatMessage
|
4 |
+
import requests # Although not used in dummy tools, keep if future tools need it
|
5 |
+
from typing import Dict, List, Generator, Sequence
|
6 |
+
from langchain_core.messages import HumanMessage, BaseMessage
|
7 |
from langchain_core.tools import tool
|
8 |
from langchain_openai import ChatOpenAI
|
9 |
from langgraph.checkpoint.memory import MemorySaver
|
10 |
from langgraph.prebuilt import create_react_agent
|
11 |
+
import logging
|
12 |
|
13 |
+
# Configure logging for better debugging on Spaces
|
14 |
+
logging.basicConfig(level=logging.INFO)
|
15 |
+
logger = logging.getLogger(__name__)
|
16 |
+
|
17 |
+
# --- Tool Definitions ---
|
18 |
@tool
|
19 |
def get_lat_lng(location_description: str) -> dict[str, float]:
|
20 |
+
"""Get the latitude and longitude of a location description (e.g., 'Paris', 'Tokyo, Japan')."""
|
21 |
+
# In a real app, you'd call a geocoding API here.
|
22 |
+
logger.info(f"Tool 'get_lat_lng' called with location: {location_description}")
|
23 |
+
# Dummy response for demonstration
|
24 |
+
if "london" in location_description.lower():
|
25 |
+
return {"lat": 51.5074, "lng": -0.1278}
|
26 |
+
elif "tokyo" in location_description.lower():
|
27 |
+
return {"lat": 35.6895, "lng": 139.6917}
|
28 |
+
elif "paris" in location_description.lower():
|
29 |
+
return {"lat": 48.8566, "lng": 2.3522}
|
30 |
+
elif "new york" in location_description.lower():
|
31 |
+
return {"lat": 40.7128, "lng": -74.0060}
|
32 |
+
else:
|
33 |
+
# Default dummy response
|
34 |
+
return {"lat": 51.1, "lng": -0.1}
|
35 |
|
36 |
@tool
|
37 |
def get_weather(lat: float, lng: float) -> dict[str, str]:
|
38 |
+
"""Get the current weather conditions at a specific latitude and longitude."""
|
39 |
+
# In a real app, you'd call a weather API (e.g., OpenWeatherMap) here.
|
40 |
+
logger.info(f"Tool 'get_weather' called with lat: {lat}, lng: {lng}")
|
41 |
+
# Dummy response based on latitude for variety
|
42 |
+
if 40 < lat < 50: # Approx Paris/New York
|
43 |
+
return {"temperature": "18°C", "description": "Cloudy"}
|
44 |
+
elif lat > 50: # Approx London
|
45 |
+
return {"temperature": "15°C", "description": "Rainy"}
|
46 |
+
else: # Approx Tokyo / Default
|
47 |
+
return {"temperature": "25°C", "description": "Sunny"}
|
48 |
+
|
49 |
+
# --- Agent and Streaming Logic ---
|
50 |
+
def initialize_agent():
|
51 |
+
"""Initializes the LangChain agent."""
|
52 |
+
api_key = os.getenv("OPENAI_API_KEY")
|
53 |
+
if not api_key:
|
54 |
+
logger.error("OPENAI_API_KEY environment variable not set.")
|
55 |
+
# Option 1: Raise an error to stop the app
|
56 |
+
# raise ValueError("OpenAI API Key not found. Please set it in the Space secrets.")
|
57 |
+
# Option 2: Return None and handle it in the stream function
|
58 |
+
return None
|
59 |
+
|
60 |
+
try:
|
61 |
+
llm = ChatOpenAI(temperature=0, model="gpt-4", openai_api_key=api_key)
|
62 |
+
# Note: MemorySaver() is in-memory. State will be lost on space restarts/sleeps.
|
63 |
+
# For persistent memory across sessions/restarts, you'd need a persistent checkpointer (e.g., using Redis, SQL).
|
64 |
+
memory = MemorySaver()
|
65 |
+
tools = [get_lat_lng, get_weather]
|
66 |
+
agent_executor = create_react_agent(llm, tools, checkpointer=memory)
|
67 |
+
logger.info("LangChain agent initialized successfully.")
|
68 |
+
return agent_executor
|
69 |
+
except Exception as e:
|
70 |
+
logger.error(f"Failed to initialize LangChain agent: {e}", exc_info=True)
|
71 |
+
return None
|
72 |
+
|
73 |
+
# Initialize agent once when the script starts
|
74 |
+
agent_executor = initialize_agent()
|
75 |
+
|
76 |
+
# Define the streaming function for Gradio ChatInterface
|
77 |
+
def stream_from_agent(message: str, history: List[List[str]]) -> Generator[Sequence[ChatMessage], None, None]:
|
78 |
+
"""
|
79 |
+
Processes user messages through the LangChain agent, yielding intermediate steps.
|
80 |
+
|
81 |
+
Args:
|
82 |
+
message: The user's input message.
|
83 |
+
history: The conversation history provided by Gradio (list of [user, assistant] pairs).
|
84 |
+
|
85 |
+
Yields:
|
86 |
+
A sequence of Gradio ChatMessage objects representing the agent's thoughts and actions.
|
87 |
+
"""
|
88 |
+
global agent_executor # Use the globally initialized agent
|
89 |
+
|
90 |
+
if agent_executor is None:
|
91 |
+
error_msg = "Agent initialization failed. Please check the logs and ensure the OPENAI_API_KEY secret is set correctly."
|
92 |
+
yield [ChatMessage(role="assistant", content=error_msg)]
|
93 |
+
return
|
94 |
+
|
95 |
+
logger.info(f"Received message: {message}")
|
96 |
+
logger.info(f"History: {history}")
|
97 |
+
|
98 |
+
# Convert Gradio history to LangChain message format
|
99 |
+
# Note: create_react_agent expects a list of BaseMessages under the "messages" key.
|
100 |
+
# It typically works best with a single HumanMessage as input per turn for the ReAct loop.
|
101 |
+
# We will use the memory checkpointer to handle history persistence within the agent's context.
|
102 |
+
langchain_message = HumanMessage(content=message)
|
103 |
+
|
104 |
+
messages_to_display: List[ChatMessage] = []
|
105 |
+
final_response_content = ""
|
106 |
+
|
107 |
+
try:
|
108 |
+
# Note: Using a fixed thread_id means all users share the same memory state if MemorySaver is used.
|
109 |
+
# For isolated user sessions, you'd need a mechanism to generate/retrieve unique thread_ids per user/session.
|
110 |
+
# This often requires integrating with Gradio's state or session management.
|
111 |
+
# For simplicity here, we use a fixed ID as in the original code.
|
112 |
+
thread_id = "shared_weather_thread_123"
|
113 |
+
config = {"configurable": {"thread_id": thread_id}}
|
114 |
+
|
115 |
+
# Stream the agent's execution steps
|
116 |
+
for chunk in agent_executor.stream({"messages": [langchain_message]}, config=config):
|
117 |
+
logger.debug(f"Agent chunk received: {chunk}") # Use debug level for verbose chunk logging
|
118 |
+
|
119 |
+
# Check for Agent Actions (Tool Calls)
|
120 |
+
if agent_action := chunk.get("agent"):
|
121 |
+
# Often the agent's rationale or decision to use a tool might be here
|
122 |
+
# Depending on the specific agent type, you might parse agent_action differently
|
123 |
+
if agent_action.get("messages"):
|
124 |
+
for msg in agent_action["messages"]:
|
125 |
+
if hasattr(msg, 'tool_calls') and msg.tool_calls:
|
126 |
+
for tool_call in msg.tool_calls:
|
127 |
+
# Display the tool call intention
|
128 |
+
tool_msg = ChatMessage(
|
129 |
+
role="assistant", # Show tool usage as assistant action
|
130 |
+
content=f"Parameters: `{tool_call['args']}`",
|
131 |
+
metadata={
|
132 |
+
"title": f"🛠️ Calling Tool: `{tool_call['name']}`",
|
133 |
+
"tool_call_id": tool_call["id"], # Store ID to match response
|
134 |
+
}
|
135 |
+
)
|
136 |
+
messages_to_display.append(tool_msg)
|
137 |
+
yield messages_to_display
|
138 |
+
# Capture potential intermediate reasoning if available (depends on agent/LLM)
|
139 |
+
elif hasattr(msg, 'content') and isinstance(msg.content, str) and msg.content:
|
140 |
+
# Avoid displaying the *final* answer prematurely if it appears mid-stream
|
141 |
+
# The final answer is usually in the last chunk's 'agent' message list
|
142 |
+
pass # We'll handle the final answer specifically later
|
143 |
+
|
144 |
+
|
145 |
+
# Check for Tool Execution Results
|
146 |
+
if tool_chunk := chunk.get("tools"):
|
147 |
+
if tool_chunk.get("messages"):
|
148 |
+
for tool_response in tool_chunk["messages"]:
|
149 |
+
# Find the corresponding tool call message to update it
|
150 |
+
found = False
|
151 |
+
for i, msg in enumerate(messages_to_display):
|
152 |
+
if msg.metadata and msg.metadata.get("tool_call_id") == tool_response.tool_call_id:
|
153 |
+
# Update the existing tool message with the result
|
154 |
+
updated_content = msg.content + f"\nResult: `{tool_response.content}`"
|
155 |
+
messages_to_display[i] = ChatMessage(
|
156 |
+
role=msg.role,
|
157 |
+
content=updated_content,
|
158 |
+
metadata=msg.metadata # Keep original metadata
|
159 |
+
)
|
160 |
+
found = True
|
161 |
+
break
|
162 |
+
if found:
|
163 |
+
yield messages_to_display
|
164 |
+
else:
|
165 |
+
# If matching call not found (shouldn't happen often), display separately
|
166 |
+
tool_result_msg = ChatMessage(
|
167 |
+
role="tool", # Or keep as assistant? 'tool' role might not render well by default
|
168 |
+
content=f"Tool Result (`{tool_response.tool_call_id}`): `{tool_response.content}`"
|
169 |
+
)
|
170 |
+
messages_to_display.append(tool_result_msg)
|
171 |
+
yield messages_to_display
|
172 |
+
|
173 |
+
|
174 |
+
# Check for the Final Agent Response
|
175 |
+
# The final answer is typically the last message in the 'agent' chunk's list
|
176 |
+
if agent_final := chunk.get("agent"):
|
177 |
+
if agent_final.get("messages"):
|
178 |
+
last_message = agent_final["messages"][-1]
|
179 |
+
# Ensure it's the final response (often not a tool call)
|
180 |
+
if hasattr(last_message, 'content') and not (hasattr(last_message, 'tool_calls') and last_message.tool_calls):
|
181 |
+
final_response_content = last_message.content
|
182 |
+
|
183 |
+
|
184 |
+
# After the loop, ensure the final response is added if it hasn't been implicitly handled
|
185 |
+
if final_response_content:
|
186 |
+
# Check if the last displayed message is already the final response
|
187 |
+
is_already_displayed = False
|
188 |
+
if messages_to_display:
|
189 |
+
last_displayed = messages_to_display[-1]
|
190 |
+
# Simple check: if last displayed message has no tool metadata and content matches
|
191 |
+
if not (last_displayed.metadata and "tool_call_id" in last_displayed.metadata) and last_displayed.content == final_response_content:
|
192 |
+
is_already_displayed = True
|
193 |
+
|
194 |
+
if not is_already_displayed:
|
195 |
+
final_msg = ChatMessage(role="assistant", content=final_response_content)
|
196 |
+
messages_to_display.append(final_msg)
|
197 |
+
yield messages_to_display
|
198 |
+
elif not messages_to_display:
|
199 |
+
# Handle cases where the agent might not produce a final response (e.g., errors)
|
200 |
+
yield [ChatMessage(role="assistant", content="Sorry, I couldn't process that request.")]
|
201 |
+
|
202 |
+
|
203 |
+
except Exception as e:
|
204 |
+
logger.error(f"Error during agent stream: {e}", exc_info=True)
|
205 |
+
error_message = f"An error occurred: {e}"
|
206 |
+
yield [ChatMessage(role="assistant", content=error_message)]
|
207 |
+
|
208 |
+
|
209 |
+
# --- Gradio Interface Definition ---
|
210 |
+
# Use gr.ChatInterface with type="messages" for full ChatMessage object support
|
211 |
demo = gr.ChatInterface(
|
212 |
fn=stream_from_agent,
|
213 |
+
chatbot=gr.Chatbot(
|
214 |
+
bubble_full_width=False,
|
215 |
+
show_copy_button=True,
|
216 |
+
render=False # Render manually for better control if needed, but False is fine here
|
217 |
+
),
|
218 |
+
input_components=[gr.Textbox(label="Ask the weather assistant")], # Customize input textbox
|
219 |
+
# `type="messages"` passes message/history using gr.ChatMessage objects (needed for metadata)
|
220 |
+
# However, ChatInterface's standard history format is List[List[str]].
|
221 |
+
# Let's stick to the standard fn signature for ChatInterface if possible
|
222 |
+
# and convert history inside the function if needed.
|
223 |
+
# Reverting fn signature slightly based on typical ChatInterface usage.
|
224 |
+
# If type="messages" is used, fn signature might expect different types.
|
225 |
+
# Sticking to standard List[List[str]] history for compatibility.
|
226 |
+
# Let's adjust the stream_from_agent function signature slightly if needed.
|
227 |
+
# **Correction**: `gr.ChatInterface` *does* handle the `List[List[str]]` history format even when yielding `ChatMessage`.
|
228 |
+
# The function signature `(message: str, history: List[List[str]])` is correct.
|
229 |
+
|
230 |
+
title="🌤️ Weather Assistant with LangGraph ReAct Agent",
|
231 |
+
description="Ask about the weather anywhere! Watch the agent think step-by-step as it uses tools.",
|
232 |
examples=[
|
233 |
+
["What's the weather like in Tokyo?"],
|
234 |
+
["Is it sunny in Paris right now?"],
|
235 |
+
["Should I bring an umbrella in New York today?"]
|
236 |
],
|
237 |
+
cache_examples=False, # Disable caching for dynamic examples if needed
|
238 |
+
theme="soft", # Optional: Apply a theme
|
239 |
+
retry_btn=None, # Disable retry button if stream handles errors
|
240 |
+
undo_btn="Delete Previous", # Customize undo button text
|
241 |
+
clear_btn="Clear Conversation", # Customize clear button text
|
242 |
)
|
243 |
+
|
244 |
+
# --- Launch the App ---
|
245 |
if __name__ == "__main__":
|
246 |
+
# Launch the Gradio app
|
247 |
+
# share=False is default and recommended for Spaces
|
248 |
+
# debug=True can be helpful during development but disable for production
|
249 |
+
# server_name="0.0.0.0" allows access within the Space's network
|
250 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|