Toumaima's picture
Update app.py
7fb49b8 verified
raw
history blame
11.1 kB
import os
import gradio as gr
import requests
import pandas as pd
from transformers import AutoModelForCausalLM, AutoTokenizer
from gradio import get_oauth_profile
# ---------- Imports for Advanced Agent ----------
import re
from langgraph.graph import StateGraph, MessagesState
from langgraph.prebuilt import tools_condition, ToolNode
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.tools import tool
from langchain_community.document_loaders import WikipediaLoader, ArxivLoader
from langchain_community.tools.tavily_search import TavilySearchResults
from groq import Groq
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
# ---------- Tools ----------
from langchain_core.tools import tool
from langchain_community.document_loaders import WikipediaLoader, ArxivLoader
from langchain_community.tools.tavily_search import TavilySearchResults
@tool
def wiki_search(query: str) -> str:
"""Search Wikipedia for a given query and return content from up to 2 relevant pages."""
docs = WikipediaLoader(query=query, load_max_docs=2).load()
return "\n\n".join([doc.page_content for doc in docs])
@tool
def web_search(query: str) -> str:
"""Search the web using the Tavily API and return content from up to 3 search results."""
docs = TavilySearchResults(max_results=3).invoke(query)
return "\n\n".join([doc.page_content for doc in docs])
@tool
def arvix_search(query: str) -> str:
"""Search academic papers on Arxiv for a given query and return up to 3 result summaries."""
docs = ArxivLoader(query=query, load_max_docs=3).load()
return "\n\n".join([doc.page_content[:1000] for doc in docs])
# Tool-based LangGraph builder
def build_tool_graph(system_prompt):
llm = AutoModelForCausalLM.from_pretrained("gpt2") # Load Hugging Face GPT-2 model
tokenizer = AutoTokenizer.from_pretrained("gpt2")
def assistant(state: MessagesState):
input_text = state["messages"][-1]["content"]
inputs = tokenizer(input_text, return_tensors="pt")
outputs = llm.generate(**inputs)
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
return {"messages": [{"content": result}]}
builder = StateGraph(MessagesState)
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode([wiki_search, web_search, arvix_search]))
builder.set_entry_point("assistant")
builder.set_finish_point("assistant")
builder.add_conditional_edges("assistant", tools_condition)
builder.add_edge("tools", "assistant")
return builder.compile()
# --- Advanced BasicAgent Class ---
class BasicAgent:
def __init__(self):
print("BasicAgent initialized.")
self.client = Groq(api_key=os.environ.get("GROQ_API_KEY", ""))
self.agent_prompt = (
"""You are a general AI assistant. I will ask you a question. Report your thoughts, and
finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER].
YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated
list of numbers and/or strings.
If you are asked for a number, don't use comma to write your number neither use units such as $
or percent sign unless specified otherwise.
If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the
digits in plain text unless specified otherwise.
If you are asked for a comma separated list, apply the above rules depending of whether the element
to be put in the list is a number or a string."""
)
self.tool_chain = build_tool_graph(self.agent_prompt)
def format_final_answer(self, answer: str) -> str:
cleaned = " ".join(answer.split())
match = re.search(r"FINAL ANSWER:\s*(.*)", cleaned, re.IGNORECASE)
return f"FINAL ANSWER: {match.group(1).strip()}" if match else f"FINAL ANSWER: {cleaned}"
def query_groq(self, question: str) -> str:
full_prompt = f"{self.agent_prompt}\n\nQuestion: {question}"
try:
response = self.client.chat.completions.create(
model="llama3-8b-8192",
messages=[{"role": "user", "content": full_prompt}]
)
answer = response.choices[0].message.content
print(f"[Groq Raw Response]: {answer}")
return self.format_final_answer(answer).upper()
except Exception as e:
print(f"[Groq ERROR]: {e}")
return self.format_final_answer("GROQ_ERROR")
def query_tools(self, question: str) -> str:
try:
input_state = {
"messages": [
SystemMessage(content=self.agent_prompt),
HumanMessage(content=question)
]
}
result = self.tool_chain.invoke(input_state)
final_msg = result["messages"][-1].content
print(f"[LangGraph Final Response]: {final_msg}")
return self.format_final_answer(final_msg)
except Exception as e:
print(f"[LangGraph ERROR]: {e}")
return self.format_final_answer("TOOL_ERROR")
def __call__(self, question: str) -> str:
print(f"Received question: {question[:50]}...")
if "commutative" in question.lower():
return self.check_commutativity()
if self.maybe_reversed(question):
print("Detected likely reversed riddle.")
return self.solve_riddle(question)
if "use tools" in question.lower():
return self.query_tools(question)
return self.query_groq(question)
def check_commutativity(self):
S = ['a', 'b', 'c', 'd', 'e']
counter_example_elements = set()
index = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4}
self.operation_table = [
['a', 'b', 'c', 'b', 'd'],
['b', 'c', 'a', 'e', 'c'],
['c', 'a', 'b', 'b', 'a'],
['b', 'e', 'b', 'e', 'd'],
['d', 'b', 'a', 'd', 'c']
]
for x in S:
for y in S:
x_idx = index[x]
y_idx = index[y]
if self.operation_table[x_idx][y_idx] != self.operation_table[y_idx][x_idx]:
counter_example_elements.add(x)
counter_example_elements.add(y)
return self.format_final_answer(", ".join(sorted(counter_example_elements)))
def maybe_reversed(self, text: str) -> bool:
words = text.split()
reversed_ratio = sum(
1 for word in words if word[::-1].lower() in {
"if", "you", "understand", "this", "sentence", "write",
"opposite", "of", "the", "word", "left", "answer"
}
) / len(words)
return reversed_ratio > 0.3
def solve_riddle(self, question: str) -> str:
question = question[::-1]
if "opposite of the word" in question:
match = re.search(r"opposite of the word ['\"](\w+)['\"]", question)
if match:
word = match.group(1).lower()
opposites = {
"left": "right", "up": "down", "hot": "cold",
"true": "false", "yes": "no", "black": "white"
}
opposite = opposites.get(word, f"UNKNOWN_OPPOSITE_OF_{word}")
return f"FINAL ANSWER: {opposite.upper()}"
return self.format_final_answer("COULD_NOT_SOLVE")
# --- Evaluation Logic ---
def run_and_submit_all(profile: gr.OAuthProfile | None):
space_id = os.getenv("SPACE_ID")
if profile:
username = profile.username
print(f"User logged in: {username}")
else:
return "Please Login to Hugging Face with the button.", None
api_url = DEFAULT_API_URL
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
try:
agent = BasicAgent()
except Exception as e:
return f"Error initializing agent: {e}", None
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
if not questions_data:
return "Fetched questions list is empty or invalid format.", None
except Exception as e:
return f"Error fetching questions: {e}", None
results_log = []
answers_payload = []
for item in questions_data:
task_id = item.get("task_id")
question_text = item.get("question")
if not task_id or question_text is None:
continue
try:
submitted_answer = agent(question_text)
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
except Exception as e:
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
if not answers_payload:
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
submission_data = {
"username": username.strip(),
"agent_code": agent_code,
"answers": answers_payload
}
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
result_data = response.json()
final_status = (
f"Submission Successful!\n"
f"User: {result_data.get('username')}\n"
f"Overall Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
f"Message: {result_data.get('message', 'No message received.')}"
)
return final_status, pd.DataFrame(results_log)
except Exception as e:
return f"Submission Failed: {e}", pd.DataFrame(results_log)
# --- Gradio UI ---
with gr.Blocks() as demo:
gr.Markdown("# Basic Agent Evaluation Runner")
gr.Markdown(
"""
**Instructions:**
1. Clone and customize your agent logic.
2. Log in with Hugging Face.
3. Click the button to run evaluation and submit your answers.
"""
)
gr.LoginButton()
oauth_profile = gr.OAuthProfile()
test_checkbox = gr.Checkbox(label="Enable Test Mode (Skip Submission)", value=False)
run_button = gr.Button("Run Evaluation")
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
run_button.click(
fn=run_and_submit_all,
inputs=[gr.OAuthProfile(), test_checkbox],
outputs=[status_output, results_table]
)
if __name__ == "__main__":
print("Launching Gradio Interface...")
demo.launch(debug=True, share=False)