Spaces:
Sleeping
Sleeping
apple muncy
commited on
Commit
·
ea45d2d
1
Parent(s):
3b081a7
replaced app.py
Browse filesSigned-off-by: apple muncy <[email protected]>
app.py
CHANGED
@@ -6,31 +6,19 @@ from langgraph.graph import START, StateGraph
|
|
6 |
from langgraph.prebuilt import tools_condition
|
7 |
from langchain_ollama.chat_models import ChatOllama
|
8 |
|
9 |
-
from tools import search_tool, weather_info_tool, hub_stats_tool
|
10 |
-
|
11 |
|
|
|
12 |
import gradio as gr
|
13 |
-
|
14 |
-
|
15 |
-
from smolagents import GradioUI, CodeAgent, HfApiModel
|
16 |
-
|
17 |
-
|
18 |
from retriever import load_guest_dataset
|
19 |
-
|
20 |
-
from dotenv import load_dotenv
|
21 |
import yaml
|
22 |
-
load_dotenv
|
23 |
-
|
24 |
-
# Import our custom tools from their modules
|
25 |
-
from retriever import load_guest_dataset
|
26 |
|
|
|
27 |
|
28 |
# Import our custom tools from their modules
|
29 |
from retriever import load_guest_dataset
|
30 |
|
31 |
-
model = HfApiModel()
|
32 |
-
guest_info_tool = load_guest_dataset()
|
33 |
-
|
34 |
# Load prompts from YAML file
|
35 |
with open("prompts.yaml", 'r') as stream:
|
36 |
prompt_templates = yaml.safe_load(stream)
|
@@ -39,7 +27,7 @@ with open("prompts.yaml", 'r') as stream:
|
|
39 |
system_prompt = prompt_templates["system_prompt"]
|
40 |
|
41 |
# Initialize the chat model
|
42 |
-
chat = ChatOllama(model="qwen2:7b",
|
43 |
verbose=True)
|
44 |
|
45 |
# Define available tools
|
@@ -52,6 +40,7 @@ tools = [
|
|
52 |
|
53 |
# Bind tools to the chat model
|
54 |
chat_with_tools = chat.bind_tools(tools)
|
|
|
55 |
# Generate the AgentState and Agent graph
|
56 |
class AgentState(TypedDict):
|
57 |
messages: Annotated[list[AnyMessage], add_messages]
|
@@ -63,18 +52,17 @@ def assistant(state: AgentState):
|
|
63 |
messages = [SystemMessage(content=system_prompt)] + state["messages"]
|
64 |
else:
|
65 |
messages = state["messages"]
|
66 |
-
|
67 |
return {
|
68 |
"messages": [chat_with_tools.invoke(messages)],
|
69 |
}
|
70 |
|
71 |
-
|
72 |
## The graph
|
73 |
builder = StateGraph(AgentState)
|
74 |
|
75 |
# Define nodes: these do the work
|
76 |
builder.add_node("assistant", assistant)
|
77 |
-
builder.add_node("tools",ToolNode(tools))
|
78 |
|
79 |
# Define edges: these determine how the control flow moves
|
80 |
builder.add_edge(START, "assistant")
|
@@ -102,7 +90,7 @@ def chat_fn(message, history):
|
|
102 |
history.append((message, response))
|
103 |
return history, ""
|
104 |
|
105 |
-
|
106 |
# Create Gradio interface
|
107 |
with gr.Blocks() as demo:
|
108 |
gr.Markdown("### LangGraph Chat with Gradio")
|
@@ -113,6 +101,5 @@ with gr.Blocks() as demo:
|
|
113 |
# Hook the send button
|
114 |
send_btn.click(fn=chat_fn, inputs=[msg, chatbot], outputs=[chatbot, msg])
|
115 |
|
116 |
-
|
117 |
if __name__ == "__main__":
|
118 |
-
demo.launch()
|
|
|
6 |
from langgraph.prebuilt import tools_condition
|
7 |
from langchain_ollama.chat_models import ChatOllama
|
8 |
|
|
|
|
|
9 |
|
10 |
+
from tools import search_tool, weather_info_tool, hub_stats_tool
|
11 |
import gradio as gr
|
12 |
+
from smolagents import GradioUI, LiteLLMModel
|
|
|
|
|
|
|
|
|
13 |
from retriever import load_guest_dataset
|
|
|
|
|
14 |
import yaml
|
15 |
+
from dotenv import load_dotenv
|
|
|
|
|
|
|
16 |
|
17 |
+
load_dotenv()
|
18 |
|
19 |
# Import our custom tools from their modules
|
20 |
from retriever import load_guest_dataset
|
21 |
|
|
|
|
|
|
|
22 |
# Load prompts from YAML file
|
23 |
with open("prompts.yaml", 'r') as stream:
|
24 |
prompt_templates = yaml.safe_load(stream)
|
|
|
27 |
system_prompt = prompt_templates["system_prompt"]
|
28 |
|
29 |
# Initialize the chat model
|
30 |
+
chat = ChatOllama(model="qwen2:7b",
|
31 |
verbose=True)
|
32 |
|
33 |
# Define available tools
|
|
|
40 |
|
41 |
# Bind tools to the chat model
|
42 |
chat_with_tools = chat.bind_tools(tools)
|
43 |
+
|
44 |
# Generate the AgentState and Agent graph
|
45 |
class AgentState(TypedDict):
|
46 |
messages: Annotated[list[AnyMessage], add_messages]
|
|
|
52 |
messages = [SystemMessage(content=system_prompt)] + state["messages"]
|
53 |
else:
|
54 |
messages = state["messages"]
|
55 |
+
|
56 |
return {
|
57 |
"messages": [chat_with_tools.invoke(messages)],
|
58 |
}
|
59 |
|
|
|
60 |
## The graph
|
61 |
builder = StateGraph(AgentState)
|
62 |
|
63 |
# Define nodes: these do the work
|
64 |
builder.add_node("assistant", assistant)
|
65 |
+
builder.add_node("tools", ToolNode(tools))
|
66 |
|
67 |
# Define edges: these determine how the control flow moves
|
68 |
builder.add_edge(START, "assistant")
|
|
|
90 |
history.append((message, response))
|
91 |
return history, ""
|
92 |
|
93 |
+
|
94 |
# Create Gradio interface
|
95 |
with gr.Blocks() as demo:
|
96 |
gr.Markdown("### LangGraph Chat with Gradio")
|
|
|
101 |
# Hook the send button
|
102 |
send_btn.click(fn=chat_fn, inputs=[msg, chatbot], outputs=[chatbot, msg])
|
103 |
|
|
|
104 |
if __name__ == "__main__":
|
105 |
+
demo.launch()
|