Update app.py
Browse files
app.py
CHANGED
@@ -10,12 +10,16 @@ from langchain_community.retrievers import PineconeHybridSearchRetriever
|
|
10 |
from langchain.tools.retriever import create_retriever_tool
|
11 |
from langgraph.prebuilt import create_react_agent
|
12 |
|
13 |
-
# Download the NLTK tokenizer if not already downloaded
|
14 |
nltk.download('punkt_tab')
|
15 |
|
16 |
|
17 |
@st.cache_resource
|
18 |
-
def init_agent():
|
|
|
|
|
|
|
|
|
19 |
# Retrieve API keys from environment variables
|
20 |
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
|
21 |
PINE_API_KEY = os.environ.get("PINE_API_KEY")
|
@@ -31,43 +35,56 @@ def init_agent():
|
|
31 |
|
32 |
# --- Pinecone Setup ---
|
33 |
index_name = 'autogen'
|
34 |
-
namespace_name = 'langgraph-main'
|
35 |
pc = Pinecone(api_key=PINE_API_KEY)
|
36 |
index = pc.Index(index_name)
|
37 |
-
# Allow
|
38 |
time.sleep(1)
|
39 |
index.describe_index_stats()
|
40 |
|
41 |
# --- BM25 Sparse Encoder ---
|
42 |
bm25_encoder = BM25Encoder().default()
|
43 |
|
44 |
-
# --- Create
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
embeddings=embed,
|
47 |
sparse_encoder=bm25_encoder,
|
48 |
index=index,
|
49 |
-
namespace=
|
50 |
top_k=4
|
51 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
# --- Chat Model ---
|
54 |
model = ChatOpenAI(model_name="o3-mini-2025-01-31", openai_api_key=OPENAI_API_KEY)
|
55 |
|
56 |
-
# --- Create the Retriever Tool ---
|
57 |
-
retriever_tool = create_retriever_tool(
|
58 |
-
retriever,
|
59 |
-
"retrieve_context",
|
60 |
-
"Search and return information form Autogen's codebase and documentation",
|
61 |
-
)
|
62 |
-
tools = [retriever_tool]
|
63 |
-
|
64 |
# --- System Prompt ---
|
65 |
prompt = """
|
66 |
You are an AI coding assistant specializing in the LangGraph framework. Your primary role is to help users build, code, and debug their LangGraph graphs for multi-agent AI applications. Focus on guiding users through the actual coding and implementation of LangGraph graphs rather than merely answering theoretical questions. Your responses should empower users to write, test, and optimize their LangGraph code by leveraging documentation, source code, and practical coding examples.
|
67 |
|
68 |
-
You have access to
|
69 |
|
70 |
-
When using
|
71 |
- **Graph coding**: for guidance on building and structuring LangGraph graphs.
|
72 |
- **Nodes implementation**: for creating, managing, and customizing workflow nodes in code.
|
73 |
- **Multi-agent graph workflows**: for coding interactions and collaborations among agents.
|
@@ -86,7 +103,7 @@ When using the `retriever_tool`, formulate your search queries with these key te
|
|
86 |
When responding to user queries:
|
87 |
1. **Focus on coding**: Prioritize providing code examples, step-by-step coding instructions, and debugging tips related to building LangGraph graphs.
|
88 |
2. **Begin** by understanding the specific coding challenge or feature the user wants to implement.
|
89 |
-
3. **Search** for relevant coding examples or API details using the
|
90 |
4. **Provide** clear, concise, and accurate code snippets, including explanations for each part of the code.
|
91 |
5. **Explain** technical concepts in a way that is accessible to developers who are implementing LangGraph graphs.
|
92 |
6. **Suggest** best practices, testing strategies, and debugging techniques for the user’s code.
|
@@ -100,31 +117,35 @@ When responding to user queries:
|
|
100 |
|
101 |
If a user’s query is unclear or falls outside the direct scope of coding LangGraph graphs, politely ask for clarification or guide them towards more appropriate resources.
|
102 |
|
103 |
-
Always use the
|
104 |
|
105 |
Now, please help the user with their coding query for LangGraph:
|
106 |
"""
|
107 |
|
108 |
-
# --- Create the React Agent ---
|
109 |
graph = create_react_agent(model, tools=tools, messages_modifier=prompt)
|
110 |
return graph
|
111 |
|
112 |
|
113 |
-
#
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
-
##########################################
|
117 |
-
# Streamlit Chat App UI
|
118 |
-
##########################################
|
119 |
|
|
|
120 |
st.title("LangGraph Coding Chat Assistant")
|
121 |
|
122 |
# Initialize conversation history in session state
|
123 |
if "chat_history" not in st.session_state:
|
124 |
st.session_state.chat_history = [] # Each entry is a tuple: (role, message)
|
125 |
|
126 |
-
# Function to display the conversation
|
127 |
def display_conversation():
|
|
|
128 |
for role, message in st.session_state.chat_history:
|
129 |
if role == "user":
|
130 |
st.markdown(f"**You:** {message}")
|
@@ -146,23 +167,21 @@ with st.form("chat_form", clear_on_submit=True):
|
|
146 |
if st.session_state.chat_history and st.session_state.chat_history[-1][0] == "user":
|
147 |
inputs = {"messages": st.session_state.chat_history}
|
148 |
|
149 |
-
# Placeholder
|
150 |
response_placeholder = st.empty()
|
151 |
assistant_message = ""
|
152 |
|
153 |
-
# Stream the agent's response
|
154 |
for s in graph.stream(inputs, stream_mode="values"):
|
155 |
-
# Extract the last message from the messages list
|
156 |
message = s["messages"][-1]
|
157 |
if isinstance(message, tuple):
|
158 |
-
# If the message is a tuple like ("assistant", text)
|
159 |
role, text = message
|
160 |
else:
|
161 |
-
# Otherwise, assume it has a 'content' attribute.
|
162 |
text = message.content
|
163 |
assistant_message += text
|
164 |
response_placeholder.markdown(f"**Assistant:** {assistant_message}")
|
165 |
|
166 |
-
# Append the full response to the chat history
|
167 |
st.session_state.chat_history.append(("assistant", assistant_message))
|
168 |
st.experimental_rerun()
|
|
|
10 |
from langchain.tools.retriever import create_retriever_tool
|
11 |
from langgraph.prebuilt import create_react_agent
|
12 |
|
13 |
+
# Download the NLTK tokenizer (if not already downloaded)
|
14 |
nltk.download('punkt_tab')
|
15 |
|
16 |
|
17 |
@st.cache_resource
|
18 |
+
def init_agent(namespace1: str, namespace2: str):
|
19 |
+
"""
|
20 |
+
Initialize the LangGraph agent with two Pinecone retriever tools,
|
21 |
+
each configured with a different namespace.
|
22 |
+
"""
|
23 |
# Retrieve API keys from environment variables
|
24 |
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
|
25 |
PINE_API_KEY = os.environ.get("PINE_API_KEY")
|
|
|
35 |
|
36 |
# --- Pinecone Setup ---
|
37 |
index_name = 'autogen'
|
|
|
38 |
pc = Pinecone(api_key=PINE_API_KEY)
|
39 |
index = pc.Index(index_name)
|
40 |
+
# Allow the index to connect
|
41 |
time.sleep(1)
|
42 |
index.describe_index_stats()
|
43 |
|
44 |
# --- BM25 Sparse Encoder ---
|
45 |
bm25_encoder = BM25Encoder().default()
|
46 |
|
47 |
+
# --- Create first Pinecone Hybrid Retriever Tool using namespace1 ---
|
48 |
+
retriever1 = PineconeHybridSearchRetriever(
|
49 |
+
embeddings=embed,
|
50 |
+
sparse_encoder=bm25_encoder,
|
51 |
+
index=index,
|
52 |
+
namespace=namespace1,
|
53 |
+
top_k=4
|
54 |
+
)
|
55 |
+
retriever_tool1 = create_retriever_tool(
|
56 |
+
retriever1,
|
57 |
+
"retrieve_context_1",
|
58 |
+
f"Search and return information from Autogen's codebase and documentation using namespace '{namespace1}'.",
|
59 |
+
)
|
60 |
+
|
61 |
+
# --- Create second (duplicate) Pinecone Hybrid Retriever Tool using namespace2 ---
|
62 |
+
retriever2 = PineconeHybridSearchRetriever(
|
63 |
embeddings=embed,
|
64 |
sparse_encoder=bm25_encoder,
|
65 |
index=index,
|
66 |
+
namespace=namespace2,
|
67 |
top_k=4
|
68 |
)
|
69 |
+
retriever_tool2 = create_retriever_tool(
|
70 |
+
retriever2,
|
71 |
+
"retrieve_context_2",
|
72 |
+
f"Search and return information from Autogen's codebase and documentation using namespace '{namespace2}'.",
|
73 |
+
)
|
74 |
+
|
75 |
+
# Both retriever tools are added to the list of available tools.
|
76 |
+
tools = [retriever_tool1, retriever_tool2]
|
77 |
|
78 |
# --- Chat Model ---
|
79 |
model = ChatOpenAI(model_name="o3-mini-2025-01-31", openai_api_key=OPENAI_API_KEY)
|
80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
# --- System Prompt ---
|
82 |
prompt = """
|
83 |
You are an AI coding assistant specializing in the LangGraph framework. Your primary role is to help users build, code, and debug their LangGraph graphs for multi-agent AI applications. Focus on guiding users through the actual coding and implementation of LangGraph graphs rather than merely answering theoretical questions. Your responses should empower users to write, test, and optimize their LangGraph code by leveraging documentation, source code, and practical coding examples.
|
84 |
|
85 |
+
You have access to two powerful tools called `retrieve_context_1` and `retrieve_context_2` that function as search engines for LangGraph’s resources. These tools are essential for retrieving up-to-date code examples, API references, and implementation details to ensure that your responses reflect the latest details from LangGraph. Use them extensively to fetch relevant coding resources when necessary.
|
86 |
|
87 |
+
When using these retriever tools, formulate your search queries with these key terms:
|
88 |
- **Graph coding**: for guidance on building and structuring LangGraph graphs.
|
89 |
- **Nodes implementation**: for creating, managing, and customizing workflow nodes in code.
|
90 |
- **Multi-agent graph workflows**: for coding interactions and collaborations among agents.
|
|
|
103 |
When responding to user queries:
|
104 |
1. **Focus on coding**: Prioritize providing code examples, step-by-step coding instructions, and debugging tips related to building LangGraph graphs.
|
105 |
2. **Begin** by understanding the specific coding challenge or feature the user wants to implement.
|
106 |
+
3. **Search** for relevant coding examples or API details using the retriever tools if necessary.
|
107 |
4. **Provide** clear, concise, and accurate code snippets, including explanations for each part of the code.
|
108 |
5. **Explain** technical concepts in a way that is accessible to developers who are implementing LangGraph graphs.
|
109 |
6. **Suggest** best practices, testing strategies, and debugging techniques for the user’s code.
|
|
|
117 |
|
118 |
If a user’s query is unclear or falls outside the direct scope of coding LangGraph graphs, politely ask for clarification or guide them towards more appropriate resources.
|
119 |
|
120 |
+
Always use the retriever tools frequently—even for queries you are confident about—since LangGraph’s coding resources are continuously updated.
|
121 |
|
122 |
Now, please help the user with their coding query for LangGraph:
|
123 |
"""
|
124 |
|
125 |
+
# --- Create the React Agent using both tools ---
|
126 |
graph = create_react_agent(model, tools=tools, messages_modifier=prompt)
|
127 |
return graph
|
128 |
|
129 |
|
130 |
+
# ----------------- Sidebar: Namespace Selection ----------------- #
|
131 |
+
st.sidebar.header("Namespace Selection for Retriever Tools")
|
132 |
+
namespace_options = ["langgraph-main", "autogen"]
|
133 |
+
namespace1 = st.sidebar.selectbox("Select namespace for Retriever Tool 1:", namespace_options, index=0)
|
134 |
+
namespace2 = st.sidebar.selectbox("Select namespace for Retriever Tool 2:", namespace_options, index=0)
|
135 |
+
|
136 |
+
# Initialize the agent with the selected namespaces.
|
137 |
+
graph = init_agent(namespace1, namespace2)
|
138 |
|
|
|
|
|
|
|
139 |
|
140 |
+
# ----------------- Main Chat App UI ----------------- #
|
141 |
st.title("LangGraph Coding Chat Assistant")
|
142 |
|
143 |
# Initialize conversation history in session state
|
144 |
if "chat_history" not in st.session_state:
|
145 |
st.session_state.chat_history = [] # Each entry is a tuple: (role, message)
|
146 |
|
|
|
147 |
def display_conversation():
|
148 |
+
"""Display the chat history."""
|
149 |
for role, message in st.session_state.chat_history:
|
150 |
if role == "user":
|
151 |
st.markdown(f"**You:** {message}")
|
|
|
167 |
if st.session_state.chat_history and st.session_state.chat_history[-1][0] == "user":
|
168 |
inputs = {"messages": st.session_state.chat_history}
|
169 |
|
170 |
+
# Placeholder to update the response stream in real time.
|
171 |
response_placeholder = st.empty()
|
172 |
assistant_message = ""
|
173 |
|
174 |
+
# Stream the agent's response chunk-by-chunk.
|
175 |
for s in graph.stream(inputs, stream_mode="values"):
|
176 |
+
# Extract the last message from the messages list.
|
177 |
message = s["messages"][-1]
|
178 |
if isinstance(message, tuple):
|
|
|
179 |
role, text = message
|
180 |
else:
|
|
|
181 |
text = message.content
|
182 |
assistant_message += text
|
183 |
response_placeholder.markdown(f"**Assistant:** {assistant_message}")
|
184 |
|
185 |
+
# Append the full assistant response to the chat history.
|
186 |
st.session_state.chat_history.append(("assistant", assistant_message))
|
187 |
st.experimental_rerun()
|