thomfoolery commited on
Commit
6b8b230
·
1 Parent(s): b6b104f

chainlit app with multiple tools

Browse files
Files changed (13) hide show
  1. .chainlit/config.toml +84 -0
  2. .eslintrc.cjs +84 -0
  3. .gitignore +7 -0
  4. .python-version +1 -0
  5. Dockerfile +24 -9
  6. README.md +41 -0
  7. app.py +13 -3
  8. chainlit.md +14 -0
  9. chainlit.py +126 -0
  10. graph.py +77 -0
  11. pyproject.toml +17 -0
  12. requirements.txt +0 -2
  13. uv.lock +0 -0
.chainlit/config.toml ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ # Whether to enable telemetry (default: true). No personal data is collected.
3
+ enable_telemetry = true
4
+
5
+ # List of environment variables to be provided by each user to use the app.
6
+ user_env = []
7
+
8
+ # Duration (in seconds) during which the session is saved when the connection is lost
9
+ session_timeout = 3600
10
+
11
+ # Enable third parties caching (e.g LangChain cache)
12
+ cache = false
13
+
14
+ # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
15
+ # follow_symlink = false
16
+
17
+ [features]
18
+ # Show the prompt playground
19
+ prompt_playground = true
20
+
21
+ # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
22
+ unsafe_allow_html = false
23
+
24
+ # Process and display mathematical expressions. This can clash with "$" characters in messages.
25
+ latex = false
26
+
27
+ # Authorize users to upload files with messages
28
+ multi_modal = true
29
+
30
+ # Allows user to use speech to text
31
+ [features.speech_to_text]
32
+ enabled = false
33
+ # See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string
34
+ # language = "en-US"
35
+
36
+ [UI]
37
+ # Name of the app and chatbot.
38
+ name = "Chatbot"
39
+
40
+ # Show the readme while the conversation is empty.
41
+ show_readme_as_default = true
42
+
43
+ # Description of the app and chatbot. This is used for HTML tags.
44
+ # description = ""
45
+
46
+ # Large size content are by default collapsed for a cleaner ui
47
+ default_collapse_content = true
48
+
49
+ # The default value for the expand messages settings.
50
+ default_expand_messages = false
51
+
52
+ # Hide the chain of thought details from the user in the UI.
53
+ hide_cot = false
54
+
55
+ # Link to your github repo. This will add a github button in the UI's header.
56
+ # github = ""
57
+
58
+ # Specify a CSS file that can be used to customize the user interface.
59
+ # The CSS file can be served from the public directory or via an external link.
60
+ # custom_css = "/public/test.css"
61
+
62
+ # Override default MUI light theme. (Check theme.ts)
63
+ [UI.theme.light]
64
+ #background = "#FAFAFA"
65
+ #paper = "#FFFFFF"
66
+
67
+ [UI.theme.light.primary]
68
+ #main = "#F80061"
69
+ #dark = "#980039"
70
+ #light = "#FFE7EB"
71
+
72
+ # Override default MUI dark theme. (Check theme.ts)
73
+ [UI.theme.dark]
74
+ #background = "#FAFAFA"
75
+ #paper = "#FFFFFF"
76
+
77
+ [UI.theme.dark.primary]
78
+ #main = "#F80061"
79
+ #dark = "#980039"
80
+ #light = "#FFE7EB"
81
+
82
+
83
+ [meta]
84
+ generated_by = "0.7.700"
.eslintrc.cjs ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * This is intended to be a basic starting point for linting in your app.
3
+ * It relies on recommended configs out of the box for simplicity, but you can
4
+ * and should modify this configuration to best suit your team's needs.
5
+ */
6
+
7
+ /** @type {import('eslint').Linter.Config} */
8
+ module.exports = {
9
+ root: true,
10
+ parserOptions: {
11
+ ecmaVersion: "latest",
12
+ sourceType: "module",
13
+ ecmaFeatures: {
14
+ jsx: true,
15
+ },
16
+ },
17
+ env: {
18
+ browser: true,
19
+ commonjs: true,
20
+ es6: true,
21
+ },
22
+ ignorePatterns: ["!**/.server", "!**/.client"],
23
+
24
+ // Base config
25
+ extends: ["eslint:recommended"],
26
+
27
+ overrides: [
28
+ // React
29
+ {
30
+ files: ["**/*.{js,jsx,ts,tsx}"],
31
+ plugins: ["react", "jsx-a11y"],
32
+ extends: [
33
+ "plugin:react/recommended",
34
+ "plugin:react/jsx-runtime",
35
+ "plugin:react-hooks/recommended",
36
+ "plugin:jsx-a11y/recommended",
37
+ ],
38
+ settings: {
39
+ react: {
40
+ version: "detect",
41
+ },
42
+ formComponents: ["Form"],
43
+ linkComponents: [
44
+ { name: "Link", linkAttribute: "to" },
45
+ { name: "NavLink", linkAttribute: "to" },
46
+ ],
47
+ "import/resolver": {
48
+ typescript: {},
49
+ },
50
+ },
51
+ },
52
+
53
+ // Typescript
54
+ {
55
+ files: ["**/*.{ts,tsx}"],
56
+ plugins: ["@typescript-eslint", "import"],
57
+ parser: "@typescript-eslint/parser",
58
+ settings: {
59
+ "import/internal-regex": "^~/",
60
+ "import/resolver": {
61
+ node: {
62
+ extensions: [".ts", ".tsx"],
63
+ },
64
+ typescript: {
65
+ alwaysTryTypes: true,
66
+ },
67
+ },
68
+ },
69
+ extends: [
70
+ "plugin:@typescript-eslint/recommended",
71
+ "plugin:import/recommended",
72
+ "plugin:import/typescript",
73
+ ],
74
+ },
75
+
76
+ // Node
77
+ {
78
+ files: [".eslintrc.cjs"],
79
+ env: {
80
+ node: true,
81
+ },
82
+ },
83
+ ],
84
+ };
.gitignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ node_modules
2
+
3
+ /__pycache__
4
+ /.cache
5
+ /build
6
+ .venv
7
+ .env
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.12
Dockerfile CHANGED
@@ -1,16 +1,31 @@
1
- # Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
- # you will also find guides on how best to write your Dockerfile
3
 
4
- FROM python:3.9
 
5
 
 
 
6
  RUN useradd -m -u 1000 user
7
  USER user
8
- ENV PATH="/home/user/.local/bin:$PATH"
9
 
10
- WORKDIR /app
 
 
11
 
12
- COPY --chown=user ./requirements.txt requirements.txt
13
- RUN pip install --no-cache-dir --upgrade -r requirements.txt
14
 
15
- COPY --chown=user . /app
16
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
+ # Get a distribution that has uv already installed
3
+ FROM ghcr.io/astral-sh/uv:python3.13-bookworm-slim
4
 
5
+ # Add user - this is the user that will run the app
6
+ # If you do not set user, the app will run as root (undesirable)
7
  RUN useradd -m -u 1000 user
8
  USER user
 
9
 
10
+ # Set the home directory and path
11
+ ENV HOME=/home/user \
12
+ PATH=/home/user/.local/bin:$PATH
13
 
14
+ ENV UVICORN_WS_PROTOCOL=websockets
 
15
 
16
+ # Set the working directory
17
+ WORKDIR $HOME/app
18
+
19
+ # Copy the app to the container
20
+ COPY --chown=user . $HOME/app
21
+
22
+ # Install the dependencies
23
+ # RUN uv sync --frozen
24
+ RUN uv sync
25
+
26
+ # Expose the port
27
+ EXPOSE 7860
28
+
29
+ # Run the app
30
+ # CMD yarn build && uv run unicorn app:app --host 0.0.0.0 --port 7860
31
+ CMD uv run chainlit run chainlit.py --host 0.0.0.0 --port 7860
README.md CHANGED
@@ -9,3 +9,44 @@ license: mit
9
  ---
10
 
11
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  ---
10
 
11
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
12
+
13
+ # Welcome to Remix!
14
+
15
+ - 📖 [Remix docs](https://remix.run/docs)
16
+
17
+ ## Development
18
+
19
+ Run the dev server:
20
+
21
+ ```shellscript
22
+ npm run dev
23
+ ```
24
+
25
+ ## Deployment
26
+
27
+ First, build your app for production:
28
+
29
+ ```sh
30
+ npm run build
31
+ ```
32
+
33
+ Then run the app in production mode:
34
+
35
+ ```sh
36
+ npm start
37
+ ```
38
+
39
+ Now you'll need to pick a host to deploy it to.
40
+
41
+ ### DIY
42
+
43
+ If you're familiar with deploying Node applications, the built-in Remix app server is production-ready.
44
+
45
+ Make sure to deploy the output of `npm run build`
46
+
47
+ - `build/server`
48
+ - `build/client`
49
+
50
+ ## Styling
51
+
52
+ This template comes with [Tailwind CSS](https://tailwindcss.com/) already configured for a simple default starting experience. You can use whatever css framework you prefer. See the [Vite docs on css](https://vitejs.dev/guide/features.html#css) for more information.
app.py CHANGED
@@ -1,7 +1,17 @@
1
- from fastapi import FastAPI
 
 
 
2
 
3
  app = FastAPI()
4
 
5
- @app.get("/")
6
- def greet_json():
7
  return {"Hello": "World!"}
 
 
 
 
 
 
 
 
1
+ import os
2
+ import uvicorn
3
+ from fastapi import FastAPI, Body
4
+ from fastapi.staticfiles import StaticFiles
5
 
6
  app = FastAPI()
7
 
8
+ @app.get("/api/chat")
9
+ def chat():
10
  return {"Hello": "World!"}
11
+
12
+ if os.getenv("ENV") == "production":
13
+ app.mount("/", StaticFiles(directory="dist", html=True), name="static")
14
+ app.get("/")(StaticFiles(directory="dist", html=True))
15
+
16
+ if __name__ == "__main__":
17
+ uvicorn.run(app, host="0.0.0.0", port=7860)
chainlit.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Welcome to Chainlit! 🚀🤖
2
+
3
+ Hi there, Developer! 👋 We're excited to have you on board. Chainlit is a powerful tool designed to help you prototype, debug and share applications built on top of LLMs.
4
+
5
+ ## Useful Links 🔗
6
+
7
+ - **Documentation:** Get started with our comprehensive [Chainlit Documentation](https://docs.chainlit.io) 📚
8
+ - **Discord Community:** Join our friendly [Chainlit Discord](https://discord.gg/k73SQ3FyUh) to ask questions, share your projects, and connect with other developers! 💬
9
+
10
+ We can't wait to see what you create with Chainlit! Happy coding! 💻😊
11
+
12
+ ## Welcome screen
13
+
14
+ To modify the welcome screen, edit the `chainlit.md` file at the root of your project. If you do not want a welcome screen, just leave this file empty.
chainlit.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Literal
2
+ from langchain_openai import ChatOpenAI
3
+ from langgraph.prebuilt import ToolNode
4
+ from langchain.schema import StrOutputParser
5
+ from langchain.prompts import ChatPromptTemplate
6
+ from langchain.schema.runnable.config import RunnableConfig
7
+ from langchain_core.messages import HumanMessage
8
+ from langgraph.graph import END, StateGraph, START
9
+ from langgraph.graph.message import MessagesState
10
+ from langchain_core.messages import BaseMessage, SystemMessage, HumanMessage
11
+
12
+ # tools
13
+ from langchain_core.tools import tool
14
+ from langchain_community.tools.arxiv.tool import ArxivQueryRun
15
+ from langchain_community.tools.tavily_search import TavilySearchResults
16
+ from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool
17
+
18
+
19
+ import chainlit as cl
20
+
21
+ @tool
22
+ def multiply(first_int: int, second_int: int) -> int:
23
+ """Multiply two integers together."""
24
+ return first_int * second_int
25
+
26
+ yahoo_finance_news = YahooFinanceNewsTool()
27
+ tavily_search = TavilySearchResults(max_results=5)
28
+ arxiv_query = ArxivQueryRun()
29
+
30
+ tools = [
31
+ yahoo_finance_news,
32
+ tavily_search,
33
+ arxiv_query,
34
+ multiply,
35
+ ]
36
+
37
+ model = ChatOpenAI(model_name="gpt-4o-mini", temperature=0)
38
+ final_model = ChatOpenAI(model_name="gpt-4o-mini", temperature=0)
39
+
40
+ model = model.bind_tools(tools)
41
+
42
+ # NOTE: this is where we're adding a tag that we'll can use later to filter the model stream events to only the model called in the final node.
43
+ # This is not necessary if you call a single LLM but might be important in case you call multiple models within the node and want to filter events
44
+ # from only one of them.
45
+ final_model = final_model.with_config(tags=["final_node"])
46
+ tool_node = ToolNode(tools=tools)
47
+
48
+ def should_continue(state: MessagesState) -> Literal["tools", "final"]:
49
+ messages = state["messages"]
50
+ last_message = messages[-1]
51
+ # If the LLM makes a tool call, then we route to the "tools" node
52
+ if last_message.tool_calls:
53
+ return "tools"
54
+ # Otherwise, we stop (reply to the user)
55
+ return "final"
56
+
57
+
58
+ def call_model(state: MessagesState):
59
+ messages = state["messages"]
60
+ response = model.invoke(messages)
61
+ # We return a list, because this will get merged with the existing list
62
+ return {"messages": [response]}
63
+
64
+
65
+ def call_final_model(state: MessagesState):
66
+ messages = state["messages"]
67
+ last_ai_message = messages[-1]
68
+ response = final_model.invoke(
69
+ [
70
+ SystemMessage("Provide a summary in point form notes of the following:"),
71
+ HumanMessage(last_ai_message.content),
72
+ ]
73
+ )
74
+ # overwrite the last AI message from the agent
75
+ response.id = last_ai_message.id
76
+ return {"messages": [response]}
77
+
78
+
79
+ builder = StateGraph(MessagesState)
80
+
81
+ builder.add_node("agent", call_model)
82
+ builder.add_node("tools", tool_node)
83
+ # add a separate final node
84
+ builder.add_node("final", call_final_model)
85
+
86
+ builder.add_edge(START, "agent")
87
+ builder.add_conditional_edges(
88
+ "agent",
89
+ should_continue,
90
+ )
91
+
92
+ builder.add_edge("tools", "agent")
93
+ builder.add_edge("final", END)
94
+
95
+ graph = builder.compile()
96
+
97
+ @cl.on_chat_start
98
+ async def on_chat_start():
99
+ model = ChatOpenAI(streaming=True)
100
+ prompt = ChatPromptTemplate.from_messages(
101
+ [
102
+ (
103
+ "system",
104
+ "You're a very knowledgeable agent with access to several tools to get recent data and multiply numbers.",
105
+ ),
106
+ ("human", "{question}"),
107
+ ]
108
+ )
109
+ runnable = prompt | model | StrOutputParser()
110
+ cl.user_session.set("runnable", runnable)
111
+
112
+ @cl.on_message
113
+ async def on_message(msg: cl.Message):
114
+ config = {}
115
+ cb = cl.LangchainCallbackHandler()
116
+ final_answer = cl.Message(content="")
117
+
118
+ for msg, metadata in graph.stream({"messages": [HumanMessage(content=msg.content)]}, stream_mode="messages", config=RunnableConfig(callbacks=[cb], **config)):
119
+ if (
120
+ msg.content
121
+ and not isinstance(msg, HumanMessage)
122
+ and metadata["langgraph_node"] == "final"
123
+ ):
124
+ await final_answer.stream_token(msg.content)
125
+
126
+ await final_answer.send()
graph.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ from typing import TypedDict, Annotated
3
+ from langchain_openai import ChatOpenAI
4
+ from langgraph.prebuilt import ToolNode
5
+ from langgraph.graph import StateGraph, END
6
+ from langgraph.graph.message import add_messages
7
+ from langchain_core.messages import HumanMessage
8
+ from langchain.schema.runnable import RunnableLambda
9
+
10
+ # tools
11
+ from langchain_core.tools import tool
12
+ from langchain_community.tools.tavily_search import TavilySearchResults
13
+ from langchain_community.tools.arxiv.tool import ArxivQueryRun
14
+ from langchain_community.tools.openweathermap.tool import OpenWeatherMapQueryRun
15
+
16
+ @tool
17
+ def multiply(first_int: int, second_int: int) -> int:
18
+ """Multiply two integers together."""
19
+ return first_int * second_int
20
+
21
+ tavily_search = TavilySearchResults(max_results=5)
22
+ weather_query = OpenWeatherMapQueryRun()
23
+ arxiv_query = ArxivQueryRun()
24
+
25
+ tool_belt = [
26
+ tavily_search,
27
+ weather_query,
28
+ arxiv_query,
29
+ multiply,
30
+ ]
31
+
32
+ llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
33
+ llm = llm.bind_tools(tool_belt)
34
+
35
+ class AgentState(TypedDict):
36
+ messages: Annotated[list, add_messages]
37
+ loop_count: int
38
+
39
+ async def call_model(state):
40
+ messages = state["messages"]
41
+ response = llm.invoke(messages)
42
+ return {"messages" : [response]}
43
+
44
+ def should_continue(state):
45
+ last_message = state["messages"][-1]
46
+ if last_message.tool_calls:
47
+ return "action"
48
+ return END
49
+
50
+ tool_node = ToolNode(tool_belt)
51
+
52
+ graph = StateGraph(AgentState)
53
+ graph.add_node("agent", call_model)
54
+ graph.add_node("action", tool_node)
55
+
56
+ graph.set_entry_point("agent")
57
+ graph.add_conditional_edges(
58
+ "agent",
59
+ should_continue
60
+ )
61
+ graph.add_edge("action", "agent")
62
+
63
+ tool_call_graph = graph.compile()
64
+
65
+ async def main():
66
+ inputs = {"messages" : [HumanMessage(content="Search Arxiv for the QLoRA paper, then search each of the authors to find out their latest Tweet using Tavily! and solve 5 x 5 please.")]}
67
+
68
+ async for chunk in tool_call_graph.astream(inputs, stream_mode="updates"):
69
+ for node, values in chunk.items():
70
+ # print(f"Receiving update from node: '{node}'")
71
+ # if node == "action":
72
+ # print(f"Tool Used: {values['messages'][0].name}")
73
+ print(values["messages"])
74
+ print('\n')
75
+
76
+
77
+ asyncio.run(main())
pyproject.toml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "langgraphsandbox"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.12"
7
+ dependencies = [
8
+ "arxiv>=2.1.3",
9
+ "chainlit>=2.1.0",
10
+ "fastapi>=0.115.8",
11
+ "langchain>=0.3.17",
12
+ "langchain-community>=0.3.16",
13
+ "langchain-openai>=0.3.3",
14
+ "langgraph>=0.2.69",
15
+ "uvicorn[standard]>=0.34.0",
16
+ "yfinance>=0.2.52",
17
+ ]
requirements.txt DELETED
@@ -1,2 +0,0 @@
1
- fastapi
2
- uvicorn[standard]
 
 
 
uv.lock ADDED
The diff for this file is too large to render. See raw diff