Spaces:
Sleeping
Sleeping
File size: 3,825 Bytes
ddb9676 1c48a8d 34a47f0 1c48a8d 34a47f0 ddb9676 34a47f0 ddb9676 34a47f0 ac0b367 34a47f0 7e89de8 a8a550d 7e89de8 ac0b367 ddb9676 d4ffe9c 9d26912 7e89de8 9d26912 7e89de8 ddb9676 34a47f0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
import gradio as gr
import os
os.environ["OPENAI_API_KEY"] = os.getenv('api_key')
import math
import types
import uuid
from langchain.chat_models import init_chat_model
from langchain.embeddings import init_embeddings
from langgraph.store.memory import InMemoryStore
from langgraph_bigtool import create_agent
from langgraph_bigtool.utils import (
convert_positional_only_function_to_tool
)
# Collect functions from `math` built-in
all_tools = []
for function_name in dir(math):
function = getattr(math, function_name)
if not isinstance(
function, types.BuiltinFunctionType
):
continue
# This is an idiosyncrasy of the `math` library
if tool := convert_positional_only_function_to_tool(
function
):
all_tools.append(tool)
# Create registry of tools. This is a dict mapping
# identifiers to tool instances.
tool_registry = {
str(uuid.uuid4()): tool
for tool in all_tools
}
# Index tool names and descriptions in the LangGraph
# Store. Here we use a simple in-memory store.
embeddings = init_embeddings("openai:text-embedding-3-small")
store = InMemoryStore(
index={
"embed": embeddings,
"dims": 1536,
"fields": ["description"],
}
)
for tool_id, tool in tool_registry.items():
store.put(
("tools",),
tool_id,
{
"description": f"{tool.name}: {tool.description}",
},
)
# Initialize agent
llm = init_chat_model("openai:gpt-4o-mini")
builder = create_agent(llm, tool_registry)
agent = builder.compile(store=store)
from langchain_core.tools import Tool
import sympy
from sympy import symbols
def make_sympy_tool(func, name, description):
def _tool(expr: str) -> str:
local_symbols = symbols("x y z a b c n")
parsed_expr = sympy.sympify(expr, locals={s.name: s for s in local_symbols})
result = func(parsed_expr)
return str(result)
return Tool.from_function(
name=name,
description=description,
func=_tool
)
from sympy import simplify, expand, factor
sympy_tools = [
make_sympy_tool(simplify, "simplify", "Simplifies a symbolic expression"),
make_sympy_tool(expand, "expand", "Expands a symbolic expression"),
make_sympy_tool(factor, "factor", "Factors a symbolic expression"),
]
for tool in sympy_tools:
tool_id = str(uuid.uuid4())
tool_registry[tool_id] = tool
store.put(
("tools",),
tool_id,
{"description": f"{tool.name}: {tool.description}"},
)
builder = create_agent(llm, tool_registry)
agent = builder.compile(store=store)
def pvsnp(problem):
output = []
for step in agent.stream(
{"messages": "Use tools to answer:"+problem},
stream_mode="updates",
):
for _, update in step.items():
for message in update.get("messages", []):
message.pretty_print()
output.append(message.pretty_print())
return output
iface = gr.Interface(
fn=pvsnp,
inputs=gr.Textbox(label="What problem would you like to classify as P or NP?"),
outputs=gr.Markdown(label="Agent's response"), # Output as HTML
title="PolyMath",
description="PolyMath is an advanced AI agent that guides users through the intricate maze of computational complexity. This agent scrutinizes problem descriptions with sophisticated LLM prompts and symbolic reasoning. It classifies problems into categories such as P, NP, NP-complete, NP-hard, or beyond (e.g., PSPACE, EXPTIME), while providing clear, concise explanations of its reasoning. As part of AI Quotient’s Millennium Math Challenge, it is the first step towards solving the P vs NP problem.",
theme = gr.themes.Ocean(),
examples = ["Simplify x*2+2x+1"]
)
# Launch the app
iface.launch()
|