|
"""qResearch v3.0: Dual-Agent Research System""" |
|
|
|
import os |
|
import gradio as gr |
|
from dotenv import load_dotenv |
|
from smolagents import CodeAgent, HfApiModel, Tool |
|
|
|
|
|
load_dotenv(override=True) |
|
|
|
class DuckDuckGoSearchTool(Tool): |
|
"""Web search tool for primary research""" |
|
name = "web_search" |
|
description = "Performs privacy-focused web searches using DuckDuckGo" |
|
inputs = { |
|
"query": {"type": "string", "description": "Research query"}, |
|
"max_results": {"type": "integer", "description": "Number of results (3-10)", "default": 5} |
|
} |
|
output_type = "string" |
|
|
|
def forward(self, query: str, max_results: int = 5) -> str: |
|
from duckduckgo_search import DDGS |
|
with DDGS() as ddgs: |
|
results = list(ddgs.text(query, max_results=max_results)) |
|
return "\n".join([f"{idx+1}. {r['title']} ({r['href']}): {r['body']}" |
|
for idx, r in enumerate(results)]) |
|
|
|
class ResearchSystem: |
|
def __init__(self): |
|
|
|
self.model = HfApiModel( |
|
model_id="Qwen/Qwen2.5-Coder-32B-Instruct", |
|
custom_role_conversions={"tool-call": "assistant", "tool-response": "user"}, |
|
use_auth_token=False, |
|
trust_remote_code=True |
|
) |
|
|
|
|
|
self.researcher = CodeAgent( |
|
tools=[DuckDuckGoSearchTool()], |
|
model=self.model, |
|
system_prompt="""You are a Senior Research Analyst. Your tasks: |
|
1. Conduct comprehensive web research using available tools |
|
2. Synthesize findings into a detailed draft report |
|
3. Include all relevant sources and data points |
|
4. Maintain raw factual accuracy without formatting""" |
|
) |
|
|
|
|
|
self.formatter = CodeAgent( |
|
tools=[], |
|
model=self.model, |
|
system_prompt="""You are an MLA Formatting Specialist. Your tasks: |
|
1. Receive raw research content |
|
2. Apply proper MLA 9th edition formatting |
|
3. Verify citation integrity |
|
4. Structure content with: |
|
- Header block |
|
- Title capitalization |
|
- In-text citations |
|
- Works Cited section |
|
5. Ensure academic tone and clarity""" |
|
) |
|
|
|
def _process_research(self, query: str) -> str: |
|
"""Execute two-stage research process""" |
|
|
|
raw_response = self.researcher.run( |
|
task=f"Conduct research about: {query}", |
|
temperature=0.7 |
|
) |
|
|
|
|
|
formatted_response = self.formatter.run( |
|
task=f"Format this research into MLA:\n{raw_response}", |
|
temperature=0.3 |
|
) |
|
|
|
return formatted_response |
|
|
|
def create_interface(self): |
|
with gr.Blocks(theme=gr.themes.Soft(), title="qResearch v3") as interface: |
|
gr.Markdown("# qResearch Dual-Agent System\n*Research → Analysis → MLA Formatting*") |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=2): |
|
chat_history = gr.Chatbot( |
|
label="Agent Communication", |
|
avatar_images={ |
|
"user": "👤", |
|
"assistant": "🤖", |
|
"Researcher": "🔍", |
|
"Formatter": "✒️" |
|
}, |
|
height=500 |
|
) |
|
|
|
with gr.Column(scale=1): |
|
research_steps = gr.JSON( |
|
label="Processing Steps", |
|
value={"Current Stage": "Awaiting Input"} |
|
) |
|
|
|
with gr.Row(): |
|
input_box = gr.Textbox( |
|
placeholder="Enter research topic...", |
|
lines=2, |
|
label="Research Query" |
|
) |
|
submit_btn = gr.Button("Start Research", variant="primary") |
|
|
|
submit_btn.click( |
|
self._handle_query, |
|
inputs=[input_box], |
|
outputs=[chat_history, research_steps] |
|
) |
|
|
|
return interface |
|
|
|
def _handle_query(self, query: str): |
|
try: |
|
|
|
researcher_msg = gr.ChatMessage(role="Researcher", content=f"Beginning research: {query}") |
|
|
|
|
|
formatted_response = self._process_research(query) |
|
|
|
|
|
formatter_msg = gr.ChatMessage(role="Formatter", content=formatted_response) |
|
|
|
return [ |
|
researcher_msg, |
|
formatter_msg |
|
], { |
|
"Completed Stages": ["Research Collection", "MLA Formatting"], |
|
"Current Status": "Research Complete" |
|
} |
|
|
|
except Exception as e: |
|
error_msg = gr.ChatMessage(role="System", content=f"Error: {str(e)}") |
|
return [error_msg], {"Error": str(e)} |
|
|
|
if __name__ == "__main__": |
|
research_system = ResearchSystem() |
|
interface = research_system.create_interface() |
|
interface.launch( |
|
server_name="0.0.0.0", |
|
server_port=7860, |
|
share=False, |
|
show_error=True |
|
) |