Update app.py
Browse files
app.py
CHANGED
@@ -1,15 +1,10 @@
|
|
1 |
-
"""qResearch: Dual-Agent Research System
|
2 |
-
|
3 |
-
A research system that combines web search, analysis, and proper MLA formatting
|
4 |
-
using a dual-agent approach with specialized roles.
|
5 |
-
"""
|
6 |
|
7 |
import os
|
8 |
import gradio as gr
|
9 |
from smolagents import CodeAgent, HfApiModel, tool
|
10 |
from typing import Dict, List, Optional, Tuple, Union
|
11 |
|
12 |
-
# Advanced Research Tools
|
13 |
@tool
|
14 |
def web_search(query: str, max_results: int = 5) -> str:
|
15 |
"""Performs comprehensive web searches using DuckDuckGo
|
@@ -48,79 +43,61 @@ def analyze_content(text: str, analysis_type: str = "general") -> str:
|
|
48 |
Returns:
|
49 |
str: Structured analysis results including key points and findings
|
50 |
"""
|
51 |
-
points = []
|
52 |
-
|
53 |
if "academic" in analysis_type.lower():
|
54 |
-
|
55 |
-
"
|
56 |
-
"
|
57 |
-
"-
|
58 |
-
"
|
59 |
-
"\
|
60 |
-
"-
|
61 |
-
"
|
62 |
-
|
|
|
|
|
63 |
elif "citations" in analysis_type.lower():
|
64 |
-
|
65 |
-
"Citation Analysis
|
66 |
-
"
|
67 |
-
"- Publication details"
|
68 |
-
"-
|
69 |
-
"
|
70 |
-
"-
|
71 |
-
"-
|
72 |
-
|
73 |
else:
|
74 |
-
|
75 |
-
"
|
76 |
-
"
|
77 |
-
"-
|
78 |
-
"
|
79 |
-
"
|
80 |
-
"
|
81 |
-
"-
|
82 |
-
|
83 |
-
|
84 |
-
return "\n".join(points)
|
85 |
|
86 |
class ResearchSystem:
|
87 |
def __init__(self):
|
88 |
-
# System configuration
|
89 |
-
self.config = {
|
90 |
-
"max_research_depth": 3,
|
91 |
-
"min_sources": 2,
|
92 |
-
"format_style": "MLA",
|
93 |
-
"cache_results": True
|
94 |
-
}
|
95 |
-
|
96 |
-
# Initialize model with role specialization
|
97 |
self.model = HfApiModel(
|
98 |
model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
|
99 |
custom_role_conversions={
|
100 |
"tool-call": "assistant",
|
101 |
-
"tool-response": "user"
|
102 |
-
"researcher": "assistant",
|
103 |
-
"formatter": "assistant"
|
104 |
}
|
105 |
)
|
106 |
|
107 |
-
# Research agent with enhanced capabilities
|
108 |
self.researcher = CodeAgent(
|
109 |
tools=[web_search, analyze_content],
|
110 |
model=self.model
|
111 |
)
|
112 |
|
113 |
-
# Formatting agent specializing in academic standards
|
114 |
self.formatter = CodeAgent(
|
115 |
tools=[],
|
116 |
model=self.model
|
117 |
)
|
118 |
-
|
119 |
-
# Result cache for performance
|
120 |
-
self.cache: Dict[str, Tuple[str, str]] = {}
|
121 |
|
122 |
def create_interface(self):
|
123 |
-
"""Creates an enhanced Gradio interface with advanced features"""
|
124 |
with gr.Blocks(title="qResearch", theme=gr.themes.Soft()) as interface:
|
125 |
gr.Markdown(
|
126 |
"# qResearch Pro\n"
|
@@ -134,7 +111,7 @@ class ResearchSystem:
|
|
134 |
label="Research Process",
|
135 |
height=600,
|
136 |
show_label=True,
|
137 |
-
type="messages"
|
138 |
)
|
139 |
|
140 |
with gr.Column(scale=1):
|
@@ -178,7 +155,6 @@ class ResearchSystem:
|
|
178 |
outputs=[chat]
|
179 |
)
|
180 |
|
181 |
-
# Clear chat using the proper method
|
182 |
def clear_chat():
|
183 |
return None
|
184 |
|
@@ -187,7 +163,6 @@ class ResearchSystem:
|
|
187 |
outputs=[chat]
|
188 |
)
|
189 |
|
190 |
-
# Help information
|
191 |
with gr.Accordion("Usage Guide", open=False):
|
192 |
gr.Markdown("""
|
193 |
### How to Use qResearch Pro
|
@@ -206,56 +181,41 @@ class ResearchSystem:
|
|
206 |
query: str,
|
207 |
depth: int = 3,
|
208 |
num_sources: int = 5) -> List[Dict[str, str]]:
|
209 |
-
"""
|
210 |
-
Processes a research query with enhanced capabilities
|
211 |
-
|
212 |
-
Args:
|
213 |
-
query: The research query
|
214 |
-
depth: Desired research depth (1-5)
|
215 |
-
num_sources: Number of sources to include (1-10)
|
216 |
-
|
217 |
-
Returns:
|
218 |
-
List of message dictionaries for the chatbot interface
|
219 |
-
"""
|
220 |
try:
|
221 |
-
#
|
222 |
-
|
223 |
-
|
224 |
-
return [
|
225 |
-
{"role": "user", "content": query},
|
226 |
-
{"role": "assistant", "content": f"📚 Research Findings:\n{raw_research}"},
|
227 |
-
{"role": "assistant", "content": f"📝 MLA Formatted:\n{formatted}"}
|
228 |
-
]
|
229 |
|
230 |
-
#
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
"
|
236 |
-
"
|
237 |
-
|
238 |
-
"3. Supporting evidence and context"
|
239 |
-
)
|
240 |
|
241 |
-
|
242 |
-
|
|
|
|
|
|
|
|
|
243 |
|
244 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
245 |
format_prompt = (
|
246 |
-
"Format
|
247 |
-
"
|
248 |
-
"2. Academic formatting\n"
|
249 |
-
"3. Bibliography\n\n"
|
250 |
-
f"Content to format:\n{raw_research}"
|
251 |
)
|
252 |
formatted = self.formatter.run(format_prompt)
|
253 |
|
254 |
-
# Cache results
|
255 |
-
if self.config["cache_results"]:
|
256 |
-
self.cache[query] = (raw_research, formatted)
|
257 |
-
|
258 |
-
# Return results in new message format
|
259 |
return [
|
260 |
{"role": "user", "content": query},
|
261 |
{"role": "assistant", "content": f"📚 Research Findings:\n{raw_research}"},
|
@@ -273,6 +233,6 @@ if __name__ == "__main__":
|
|
273 |
system = ResearchSystem()
|
274 |
system.create_interface().launch(
|
275 |
server_port=7860,
|
276 |
-
share=True,
|
277 |
-
show_api=False
|
278 |
)
|
|
|
1 |
+
"""qResearch: Dual-Agent Research System"""
|
|
|
|
|
|
|
|
|
2 |
|
3 |
import os
|
4 |
import gradio as gr
|
5 |
from smolagents import CodeAgent, HfApiModel, tool
|
6 |
from typing import Dict, List, Optional, Tuple, Union
|
7 |
|
|
|
8 |
@tool
|
9 |
def web_search(query: str, max_results: int = 5) -> str:
|
10 |
"""Performs comprehensive web searches using DuckDuckGo
|
|
|
43 |
Returns:
|
44 |
str: Structured analysis results including key points and findings
|
45 |
"""
|
|
|
|
|
46 |
if "academic" in analysis_type.lower():
|
47 |
+
return (
|
48 |
+
"Academic Analysis:\n"
|
49 |
+
"1. Main Arguments:\n"
|
50 |
+
f" - Key points from text: {text[:200]}...\n"
|
51 |
+
"2. Evidence Quality:\n"
|
52 |
+
" - Source credibility assessment\n"
|
53 |
+
" - Data verification\n"
|
54 |
+
"3. Research Context:\n"
|
55 |
+
" - Field relevance\n"
|
56 |
+
" - Current research status"
|
57 |
+
)
|
58 |
elif "citations" in analysis_type.lower():
|
59 |
+
return (
|
60 |
+
"Citation Analysis:\n"
|
61 |
+
"1. Source Information:\n"
|
62 |
+
" - Publication details\n"
|
63 |
+
" - Author credentials\n"
|
64 |
+
"2. Reference Quality:\n"
|
65 |
+
" - Academic standards\n"
|
66 |
+
" - Citation format"
|
67 |
+
)
|
68 |
else:
|
69 |
+
return (
|
70 |
+
"General Analysis:\n"
|
71 |
+
"1. Key Findings:\n"
|
72 |
+
f" - Main points from content: {text[:200]}...\n"
|
73 |
+
"2. Supporting Evidence:\n"
|
74 |
+
" - Data and examples\n"
|
75 |
+
"3. Practical Applications:\n"
|
76 |
+
" - Real-world relevance\n"
|
77 |
+
" - Implementation possibilities"
|
78 |
+
)
|
|
|
79 |
|
80 |
class ResearchSystem:
|
81 |
def __init__(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
self.model = HfApiModel(
|
83 |
model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
|
84 |
custom_role_conversions={
|
85 |
"tool-call": "assistant",
|
86 |
+
"tool-response": "user"
|
|
|
|
|
87 |
}
|
88 |
)
|
89 |
|
|
|
90 |
self.researcher = CodeAgent(
|
91 |
tools=[web_search, analyze_content],
|
92 |
model=self.model
|
93 |
)
|
94 |
|
|
|
95 |
self.formatter = CodeAgent(
|
96 |
tools=[],
|
97 |
model=self.model
|
98 |
)
|
|
|
|
|
|
|
99 |
|
100 |
def create_interface(self):
|
|
|
101 |
with gr.Blocks(title="qResearch", theme=gr.themes.Soft()) as interface:
|
102 |
gr.Markdown(
|
103 |
"# qResearch Pro\n"
|
|
|
111 |
label="Research Process",
|
112 |
height=600,
|
113 |
show_label=True,
|
114 |
+
type="messages"
|
115 |
)
|
116 |
|
117 |
with gr.Column(scale=1):
|
|
|
155 |
outputs=[chat]
|
156 |
)
|
157 |
|
|
|
158 |
def clear_chat():
|
159 |
return None
|
160 |
|
|
|
163 |
outputs=[chat]
|
164 |
)
|
165 |
|
|
|
166 |
with gr.Accordion("Usage Guide", open=False):
|
167 |
gr.Markdown("""
|
168 |
### How to Use qResearch Pro
|
|
|
181 |
query: str,
|
182 |
depth: int = 3,
|
183 |
num_sources: int = 5) -> List[Dict[str, str]]:
|
184 |
+
"""Processes a research query with enhanced capabilities"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
try:
|
186 |
+
# Initial broad search
|
187 |
+
initial_results = web_search(query, max_results=num_sources)
|
188 |
+
initial_analysis = analyze_content(initial_results, "general")
|
|
|
|
|
|
|
|
|
|
|
189 |
|
190 |
+
# Focused research on key aspects
|
191 |
+
aspects = [
|
192 |
+
"overview",
|
193 |
+
"detailed analysis",
|
194 |
+
"current developments",
|
195 |
+
"expert opinions",
|
196 |
+
"practical applications"
|
197 |
+
][:depth]
|
|
|
|
|
198 |
|
199 |
+
detailed_research = []
|
200 |
+
for aspect in aspects:
|
201 |
+
aspect_query = f"{query} {aspect}"
|
202 |
+
search_results = web_search(aspect_query, max_results=2)
|
203 |
+
aspect_analysis = analyze_content(search_results, "academic")
|
204 |
+
detailed_research.append(f"=== {aspect.upper()} ===\n{aspect_analysis}\n")
|
205 |
|
206 |
+
# Compile research findings
|
207 |
+
raw_research = (
|
208 |
+
f"INITIAL OVERVIEW:\n{initial_analysis}\n\n"
|
209 |
+
f"DETAILED RESEARCH:\n{''.join(detailed_research)}"
|
210 |
+
)
|
211 |
+
|
212 |
+
# Format in MLA style
|
213 |
format_prompt = (
|
214 |
+
"Format this research in MLA style:\n"
|
215 |
+
f"{raw_research}"
|
|
|
|
|
|
|
216 |
)
|
217 |
formatted = self.formatter.run(format_prompt)
|
218 |
|
|
|
|
|
|
|
|
|
|
|
219 |
return [
|
220 |
{"role": "user", "content": query},
|
221 |
{"role": "assistant", "content": f"📚 Research Findings:\n{raw_research}"},
|
|
|
233 |
system = ResearchSystem()
|
234 |
system.create_interface().launch(
|
235 |
server_port=7860,
|
236 |
+
share=True,
|
237 |
+
show_api=False
|
238 |
)
|