Spaces:
Running
Running
LANGSMITH_TRACING=True | |
LANGSMITH_ENDPOINT="https://api.smith.langchain.com" | |
LANGSMITH_API_KEY="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" | |
LANGSMITH_PROJECT="pr-complicated-disagreement-8" | |
import gradio as gr | |
from google import genai | |
import json | |
from langsmith import traceable | |
from IPython.display import display, HTML, Markdown | |
def show_json(obj): | |
print(json.dumps(obj.model_dump(exclude_none=True), indent=2)) | |
return json.dumps(obj.model_dump(exclude_none=True), indent=2) | |
def show_parts(r): | |
parts = r.candidates[0].content.parts | |
if parts is None: | |
finish_reason = r.candidates[0].finish_reason | |
print(f'{finish_reason=}') | |
return | |
for part in r.candidates[0].content.parts: | |
if part.text: | |
display(Markdown(part.text)) | |
output = part.text | |
elif part.executable_code: | |
display(Markdown(f'```python\n{part.executable_code.code}\n```')) | |
output = part.executable_code | |
else: | |
show_json(part) | |
grounding_metadata = r.candidates[0].grounding_metadata | |
if grounding_metadata and grounding_metadata.search_entry_point: | |
display(HTML(grounding_metadata.search_entry_point.rendered_content)) | |
return output | |
client = genai.Client(api_key="AIzaSyD6voSAiSUim17kB90skpdisMMyFXZPxMo") | |
MODEL_ID = "gemini-2.0-flash-thinking-exp" | |
""" | |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference | |
""" | |
#client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
''' | |
import google.generativeai as genai2 | |
genai2.configure(api_key=os.environ["GOOGLE_API_KEY"]) | |
model_gen = genai2.GenerativeModel(model_name="gemini-2.0-flash-thinking-exp", | |
generation_config=generation_config, | |
system_instruction=system_instruction, | |
safety_settings=safety_settings) | |
''' | |
def model_response(text): | |
response = client.models.generate_content( | |
model=MODEL_ID, | |
contents = text | |
) | |
return response.text | |
search_tool = {'google_search': {}} | |
code_tool = {'code_execution':{}} | |
tools = [ | |
{'google_search': {}}, | |
{'code_execution': {}} | |
] | |
soccer_chat = client.chats.create(model="gemini-2.0-flash-exp", config={'tools': [search_tool]}) | |
coder_chat = client.chats.create(model="gemini-2.0-flash-exp", config={'tools': [code_tool]}) | |
def agentville(problem): | |
memory = {} | |
output = "" | |
final_response = "" | |
plan = model_response(f'''You are a thinker. Think long and hard about the problem: {problem} and come up with the steps required to solve the problem. You are supposed to come up with 3 steps to build a solution.''') | |
print ("Plan", plan) | |
output += plan | |
yield output, final_response | |
for i in range(1,6): | |
print ("Step:", i) | |
output += f"Step {i} " | |
yield output, final_response | |
step = model_response(f'''Extract the {i}th step from the given plan: {plan}. Figure out which of the below agents are required to solve it. | |
Once you figure out the agent, just output the name of the agent. | |
Do not output anything else. The agents at your disposal are: | |
Researcher: Has access to Google search tool, can search for resources and answer the questions | |
Coder: Expert programmer. Can solve any problem at disposal. Output the name of the agent and nothing else. | |
Your response should be in the following format: | |
"Step": <the complete description of the step to be executed> | |
"agent_name": "Researcher/Coder" | |
''') | |
print ("Current step", step) | |
output += step | |
yield output, final_response | |
if 'Coder' in step: | |
print ("Agent is coder") | |
r = coder_chat.send_message(f'''Complete the step:{step}''') | |
execution_step = show_parts(r) | |
output += execution_step | |
yield output, final_response | |
else: | |
print ("Agent is Researcher") | |
r = soccer_chat.send_message(f'''Complete the step: {step}''') | |
execution_step = show_parts(r) | |
output += execution_step | |
yield output, final_response | |
memory[i] = execution_step | |
final_response = model_response(f'''Given the problem statement:{problem} and the progress made by agents: {memory}, come up with the final answer. Do not explain what | |
the agents have done. Focus on getting the final answer.''') | |
print ("Final response", final_response) | |
output += final_response | |
yield output, final_response | |
return output, final_response | |
import gradio as gr | |
iface = gr.Interface( | |
fn=agentville, | |
#inputs=["text", "text", "text"], | |
inputs = gr.Textbox(label="Problem"), | |
outputs= [gr.Textbox(label="Agents processing"), | |
gr.Markdown(label="Final response")], | |
title="Agentville: Power Up Your World β‘οΈπ with Autonomous Agents π€π‘ (Research Preview)", | |
description="Watch multiple agents collaborate seamlessly to tackle complex challenges", | |
theme = gr.themes.Ocean(), | |
article = "All the conversations would be logged for quality improvement", | |
examples = ["A white paper on the evolution of Gen AI","Explain how Quantum computing can disrupt our life","Is SAAS really dead in the wake of recent Gen AI developments?"] | |
) | |
# Launch the Gradio app | |
if __name__ == "__main__": | |
iface.queue(max_size=20).launch(share=True,debug=True) | |