Spaces:
Running
Running
File size: 5,179 Bytes
6f7cf12 54f05c8 c4ae3d1 6d090e2 d0fd20c 54f05c8 d0fd20c 6d090e2 0eb1396 c4ae3d1 6d090e2 11126c0 0eb1396 7017fa5 0eb1396 11126c0 0eb1396 11126c0 0eb1396 6d090e2 0eb1396 2a9236f 0eb1396 c4ae3d1 0eb1396 54f05c8 568011e fd5029c 54f05c8 c4ae3d1 0eb1396 c4ae3d1 0eb1396 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
LANGSMITH_TRACING=True
LANGSMITH_ENDPOINT="https://api.smith.langchain.com"
LANGSMITH_API_KEY="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"
LANGSMITH_PROJECT="pr-complicated-disagreement-8"
import gradio as gr
from google import genai
import json
from langsmith import traceable
from IPython.display import display, HTML, Markdown
def show_json(obj):
print(json.dumps(obj.model_dump(exclude_none=True), indent=2))
return json.dumps(obj.model_dump(exclude_none=True), indent=2)
def show_parts(r):
parts = r.candidates[0].content.parts
if parts is None:
finish_reason = r.candidates[0].finish_reason
print(f'{finish_reason=}')
return
for part in r.candidates[0].content.parts:
if part.text:
display(Markdown(part.text))
output = part.text
elif part.executable_code:
display(Markdown(f'```python\n{part.executable_code.code}\n```'))
output = part.executable_code
else:
show_json(part)
grounding_metadata = r.candidates[0].grounding_metadata
if grounding_metadata and grounding_metadata.search_entry_point:
display(HTML(grounding_metadata.search_entry_point.rendered_content))
return output
client = genai.Client(api_key="AIzaSyD6voSAiSUim17kB90skpdisMMyFXZPxMo")
MODEL_ID = "gemini-2.0-flash-thinking-exp"
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
#client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
'''
import google.generativeai as genai2
genai2.configure(api_key=os.environ["GOOGLE_API_KEY"])
model_gen = genai2.GenerativeModel(model_name="gemini-2.0-flash-thinking-exp",
generation_config=generation_config,
system_instruction=system_instruction,
safety_settings=safety_settings)
'''
def model_response(text):
response = client.models.generate_content(
model=MODEL_ID,
contents = text
)
return response.text
search_tool = {'google_search': {}}
code_tool = {'code_execution':{}}
tools = [
{'google_search': {}},
{'code_execution': {}}
]
soccer_chat = client.chats.create(model="gemini-2.0-flash-exp", config={'tools': [search_tool]})
coder_chat = client.chats.create(model="gemini-2.0-flash-exp", config={'tools': [code_tool]})
def agentville(problem):
memory = {}
output = ""
final_response = ""
plan = model_response(f'''You are a thinker. Think long and hard about the problem: {problem} and come up with the steps required to solve the problem. You are supposed to come up with 3 steps to build a solution.''')
print ("Plan", plan)
output += plan
yield output, final_response
for i in range(1,6):
print ("Step:", i)
output += f"Step {i} "
yield output, final_response
step = model_response(f'''Extract the {i}th step from the given plan: {plan}. Figure out which of the below agents are required to solve it.
Once you figure out the agent, just output the name of the agent.
Do not output anything else. The agents at your disposal are:
Researcher: Has access to Google search tool, can search for resources and answer the questions
Coder: Expert programmer. Can solve any problem at disposal. Output the name of the agent and nothing else.
Your response should be in the following format:
"Step": <the complete description of the step to be executed>
"agent_name": "Researcher/Coder"
''')
print ("Current step", step)
output += step
yield output, final_response
if 'Coder' in step:
print ("Agent is coder")
r = coder_chat.send_message(f'''Complete the step:{step}''')
execution_step = show_parts(r)
output += execution_step
yield output, final_response
else:
print ("Agent is Researcher")
r = soccer_chat.send_message(f'''Complete the step: {step}''')
execution_step = show_parts(r)
output += execution_step
yield output, final_response
memory[i] = execution_step
final_response = model_response(f'''Given the problem statement:{problem} and the progress made by agents: {memory}, come up with the final answer. Do not explain what
the agents have done. Focus on getting the final answer.''')
print ("Final response", final_response)
output += final_response
yield output, final_response
return output, final_response
import gradio as gr
iface = gr.Interface(
fn=agentville,
#inputs=["text", "text", "text"],
inputs = gr.Textbox(label="Problem"),
outputs= [gr.Textbox(label="Agents processing"),
gr.Markdown(label="Final response")],
title="Agentville: Power Up Your World ⚡️🌍 with Autonomous Agents 🤖💡 (Research Preview)",
description="Watch multiple agents collaborate seamlessly to tackle complex challenges",
theme = gr.themes.Ocean(),
article = "All the conversations would be logged for quality improvement",
examples = ["A white paper on the evolution of Gen AI","Explain how Quantum computing can disrupt our life","Is SAAS really dead in the wake of recent Gen AI developments?"]
)
# Launch the Gradio app
if __name__ == "__main__":
iface.queue(max_size=20).launch(share=True,debug=True)
|