Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
from gradio import ChatMessage
|
4 |
+
import requests
|
5 |
+
from typing import Dict, List
|
6 |
+
from langchain_core.messages import HumanMessage
|
7 |
+
from langchain_core.tools import tool
|
8 |
+
from langchain_openai import ChatOpenAI
|
9 |
+
from langgraph.checkpoint.memory import MemorySaver
|
10 |
+
from langgraph.prebuilt import create_react_agent
|
11 |
+
|
12 |
+
# Weather and location tools
|
13 |
+
@tool
|
14 |
+
def get_lat_lng(location_description: str) -> dict[str, float]:
|
15 |
+
"""Get the latitude and longitude of a location."""
|
16 |
+
if os.getenv("GEO_API_KEY") is None:
|
17 |
+
return {"lat": 51.1, "lng": -0.1} # London coordinates as dummy response
|
18 |
+
|
19 |
+
params = {
|
20 |
+
"q": location_description,
|
21 |
+
"api_key": os.getenv("GEO_API_KEY"),
|
22 |
+
}
|
23 |
+
|
24 |
+
r = requests.get("https://geocode.maps.co/search", params=params)
|
25 |
+
r.raise_for_status()
|
26 |
+
data = r.json()
|
27 |
+
|
28 |
+
if data:
|
29 |
+
return {"lat": float(data[0]["lat"]), "lng": float(data[0]["lon"])}
|
30 |
+
else:
|
31 |
+
raise ValueError("Could not find the location")
|
32 |
+
|
33 |
+
@tool
|
34 |
+
def get_weather(lat: float, lng: float) -> dict[str, str]:
|
35 |
+
"""Get the weather at a location."""
|
36 |
+
if not os.getenv("WEATHER_API_KEY"):
|
37 |
+
return {"temperature": "21°C", "description": "Sunny"} # Dummy response
|
38 |
+
|
39 |
+
params = {
|
40 |
+
"apikey": os.getenv("WEATHER_API_KEY"),
|
41 |
+
"location": f"{lat},{lng}",
|
42 |
+
"units": "metric",
|
43 |
+
}
|
44 |
+
|
45 |
+
r = requests.get("https://api.tomorrow.io/v4/weather/realtime", params=params)
|
46 |
+
r.raise_for_status()
|
47 |
+
data = r.json()
|
48 |
+
|
49 |
+
values = data["data"]["values"]
|
50 |
+
weather_codes = {
|
51 |
+
1000: "Clear, Sunny", 1100: "Mostly Clear", 1101: "Partly Cloudy",
|
52 |
+
1102: "Mostly Cloudy", 1001: "Cloudy", 2000: "Fog",
|
53 |
+
2100: "Light Fog", 4000: "Drizzle", 4001: "Rain",
|
54 |
+
4200: "Light Rain", 4201: "Heavy Rain", 5000: "Snow",
|
55 |
+
5001: "Flurries", 5100: "Light Snow", 5101: "Heavy Snow",
|
56 |
+
6000: "Freezing Drizzle", 6001: "Freezing Rain",
|
57 |
+
6200: "Light Freezing Rain", 6201: "Heavy Freezing Rain",
|
58 |
+
7000: "Ice Pellets", 7101: "Heavy Ice Pellets",
|
59 |
+
7102: "Light Ice Pellets", 8000: "Thunderstorm"
|
60 |
+
}
|
61 |
+
|
62 |
+
return {
|
63 |
+
"temperature": f'{values["temperatureApparent"]:0.0f}°C',
|
64 |
+
"description": weather_codes.get(values["weatherCode"], "Unknown")
|
65 |
+
}
|
66 |
+
|
67 |
+
def stream_from_agent(message: str, history: List[Dict[str, str]]) -> gr.ChatMessage:
|
68 |
+
"""Process messages through the LangChain agent with visible reasoning."""
|
69 |
+
|
70 |
+
# Initialize the agent
|
71 |
+
llm = ChatOpenAI(temperature=0, model="gpt-4")
|
72 |
+
memory = MemorySaver()
|
73 |
+
tools = [get_lat_lng, get_weather]
|
74 |
+
agent_executor = create_react_agent(llm, tools, checkpointer=memory)
|
75 |
+
|
76 |
+
# Add message to history
|
77 |
+
past_messages = [HumanMessage(content=message)]
|
78 |
+
for h in history:
|
79 |
+
if h["role"] == "user":
|
80 |
+
past_messages.append(HumanMessage(content=h["content"]))
|
81 |
+
|
82 |
+
messages_to_display = []
|
83 |
+
final_response = None
|
84 |
+
|
85 |
+
for chunk in agent_executor.stream(
|
86 |
+
{"messages": past_messages},
|
87 |
+
config={"configurable": {"thread_id": "abc123"}}
|
88 |
+
):
|
89 |
+
# Handle agent's actions and tool usage
|
90 |
+
if chunk.get("agent"):
|
91 |
+
for msg in chunk["agent"]["messages"]:
|
92 |
+
if msg.content:
|
93 |
+
final_response = msg.content
|
94 |
+
|
95 |
+
# Handle tool calls
|
96 |
+
for tool_call in msg.tool_calls:
|
97 |
+
tool_message = ChatMessage(
|
98 |
+
content=f"Parameters: {tool_call['args']}",
|
99 |
+
metadata={
|
100 |
+
"title": f"🛠️ Using {tool_call['name']}",
|
101 |
+
"id": tool_call["id"],
|
102 |
+
"status": "pending",
|
103 |
+
}
|
104 |
+
)
|
105 |
+
messages_to_display.append(tool_message)
|
106 |
+
yield messages_to_display
|
107 |
+
tool_message.metadata["status"] = "done"
|
108 |
+
|
109 |
+
# Handle tool responses
|
110 |
+
if chunk.get("tools"):
|
111 |
+
for tool_response in chunk["tools"]["messages"]:
|
112 |
+
# Find the corresponding tool message
|
113 |
+
for msg in messages_to_display:
|
114 |
+
if msg.metadata.get("id") == tool_response.tool_call_id:
|
115 |
+
msg.content += f"\nResult: {tool_response.content}"
|
116 |
+
yield messages_to_display
|
117 |
+
|
118 |
+
# Add the final response as a regular message
|
119 |
+
if final_response:
|
120 |
+
messages_to_display.append(ChatMessage(content=final_response))
|
121 |
+
yield messages_to_display
|
122 |
+
|
123 |
+
# Create the Gradio interface
|
124 |
+
demo = gr.ChatInterface(
|
125 |
+
fn=stream_from_agent,
|
126 |
+
type="messages",
|
127 |
+
title="🌤️ Weather Assistant",
|
128 |
+
description="Ask about the weather anywhere! Watch as I gather the information step by step.",
|
129 |
+
examples=[
|
130 |
+
"What's the weather like in Tokyo?",
|
131 |
+
"Is it sunny in Paris right now?",
|
132 |
+
"Should I bring an umbrella in New York today?"
|
133 |
+
],
|
134 |
+
#retry_btn="Retry ↺",
|
135 |
+
#undo_btn="Undo ↩",
|
136 |
+
#clear_btn="Clear 🗑️"
|
137 |
+
)
|
138 |
+
|
139 |
+
if __name__ == "__main__":
|
140 |
+
# Load environment variables
|
141 |
+
try:
|
142 |
+
from dotenv import load_dotenv
|
143 |
+
load_dotenv()
|
144 |
+
except ImportError:
|
145 |
+
pass
|
146 |
+
|
147 |
+
demo.launch(debug=True)
|