Quazim0t0 commited on
Commit
eb339cc
·
verified ·
1 Parent(s): 15e06d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -436
app.py CHANGED
@@ -1,3 +1,9 @@
 
 
 
 
 
 
1
  import argparse
2
  import json
3
  import os
@@ -34,480 +40,111 @@ from tqdm import tqdm
34
  from smolagents import (
35
  CodeAgent,
36
  HfApiModel,
37
- LiteLLMModel,
38
- Model,
39
  ToolCallingAgent,
40
  )
41
  from smolagents.agent_types import AgentText, AgentImage, AgentAudio
42
  from smolagents.gradio_ui import pull_messages_from_step, handle_agent_output_types
43
-
44
  from smolagents import Tool
45
 
46
-
47
- class GoogleSearchTool(Tool):
48
- name = "web_search"
49
- description = """Performs a google web search for your query then returns a string of the top search results."""
50
- inputs = {
51
- "query": {"type": "string", "description": "The search query to perform."},
52
- "filter_year": {
53
- "type": "integer",
54
- "description": "Optionally restrict results to a certain year",
55
- "nullable": True,
56
- },
57
- }
58
- output_type = "string"
59
-
60
- def __init__(self):
61
- super().__init__(self)
62
- import os
63
-
64
- self.serpapi_key = os.getenv("SERPER_API_KEY")
65
-
66
- def forward(self, query: str, filter_year: Optional[int] = None) -> str:
67
- import requests
68
-
69
- if self.serpapi_key is None:
70
- raise ValueError("Missing SerpAPI key. Make sure you have 'SERPER_API_KEY' in your env variables.")
71
-
72
- params = {
73
- "engine": "google",
74
- "q": query,
75
- "api_key": self.serpapi_key,
76
- "google_domain": "google.com",
77
- }
78
-
79
- headers = {
80
- 'X-API-KEY': self.serpapi_key,
81
- 'Content-Type': 'application/json'
82
- }
83
-
84
- if filter_year is not None:
85
- params["tbs"] = f"cdr:1,cd_min:01/01/{filter_year},cd_max:12/31/{filter_year}"
86
-
87
- response = requests.request("POST", "https://google.serper.dev/search", headers=headers, data=json.dumps(params))
88
-
89
-
90
- if response.status_code == 200:
91
- results = response.json()
92
- else:
93
- raise ValueError(response.json())
94
-
95
- if "organic" not in results.keys():
96
- print("REZZZ", results.keys())
97
- if filter_year is not None:
98
- raise Exception(
99
- f"No results found for query: '{query}' with filtering on year={filter_year}. Use a less restrictive query or do not filter on year."
100
- )
101
- else:
102
- raise Exception(f"No results found for query: '{query}'. Use a less restrictive query.")
103
- if len(results["organic"]) == 0:
104
- year_filter_message = f" with filter year={filter_year}" if filter_year is not None else ""
105
- return f"No results found for '{query}'{year_filter_message}. Try with a more general query, or remove the year filter."
106
-
107
- web_snippets = []
108
- if "organic" in results:
109
- for idx, page in enumerate(results["organic"]):
110
- date_published = ""
111
- if "date" in page:
112
- date_published = "\nDate published: " + page["date"]
113
-
114
- source = ""
115
- if "source" in page:
116
- source = "\nSource: " + page["source"]
117
-
118
- snippet = ""
119
- if "snippet" in page:
120
- snippet = "\n" + page["snippet"]
121
-
122
- redacted_version = f"{idx}. [{page['title']}]({page['link']}){date_published}{source}\n{snippet}"
123
-
124
- redacted_version = redacted_version.replace("Your browser can't play this video.", "")
125
- web_snippets.append(redacted_version)
126
-
127
- return "## Search Results\n" + "\n\n".join(web_snippets)
128
-
129
- # web_search = GoogleSearchTool()
130
-
131
- # print(web_search(query="Donald Trump news"))
132
- # quit()
133
- AUTHORIZED_IMPORTS = [
134
- "requests",
135
- "zipfile",
136
- "os",
137
- "pandas",
138
- "numpy",
139
- "sympy",
140
- "json",
141
- "bs4",
142
- "pubchempy",
143
- "xml",
144
- "yahoo_finance",
145
- "Bio",
146
- "sklearn",
147
- "scipy",
148
- "pydub",
149
- "io",
150
- "PIL",
151
- "chess",
152
- "PyPDF2",
153
- "pptx",
154
- "torch",
155
- "datetime",
156
- "fractions",
157
- "csv",
158
- ]
159
  load_dotenv(override=True)
160
  login(os.getenv("HF_TOKEN"))
161
 
162
- append_answer_lock = threading.Lock()
163
-
164
  custom_role_conversions = {"tool-call": "assistant", "tool-response": "user"}
165
 
166
- user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0"
167
-
168
- BROWSER_CONFIG = {
169
- "viewport_size": 1024 * 5,
170
- "downloads_folder": "downloads_folder",
171
- "request_kwargs": {
172
- "headers": {"User-Agent": user_agent},
173
- "timeout": 300,
174
- },
175
- "serpapi_key": os.getenv("SERPAPI_API_KEY"),
176
- }
177
-
178
- os.makedirs(f"./{BROWSER_CONFIG['downloads_folder']}", exist_ok=True)
179
-
180
- model = LiteLLMModel(
181
- "gpt-4o",
182
- custom_role_conversions=custom_role_conversions,
183
- api_key=os.getenv("OPENAI_API_KEY")
184
  )
185
 
186
- text_limit = 20000
187
- ti_tool = TextInspectorTool(model, text_limit)
188
-
189
- browser = SimpleTextBrowser(**BROWSER_CONFIG)
190
-
191
- WEB_TOOLS = [
192
- GoogleSearchTool(),
193
- VisitTool(browser),
194
- PageUpTool(browser),
195
- PageDownTool(browser),
196
- FinderTool(browser),
197
- FindNextTool(browser),
198
- ArchiveSearchTool(browser),
199
- TextInspectorTool(model, text_limit),
200
- ]
201
-
202
- # Agent creation in a factory function
203
- def create_agent():
204
- """Creates a fresh agent instance for each session"""
205
- return CodeAgent(
206
- model=model,
207
- tools=[visualizer] + WEB_TOOLS,
208
- max_steps=10,
209
- verbosity_level=1,
210
- additional_authorized_imports=AUTHORIZED_IMPORTS,
211
- planning_interval=4,
212
- )
213
-
214
- document_inspection_tool = TextInspectorTool(model, 20000)
215
-
216
- def stream_to_gradio(
217
- agent,
218
- task: str,
219
- reset_agent_memory: bool = False,
220
- additional_args: Optional[dict] = None,
221
- ):
222
- """Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
223
- for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
224
- for message in pull_messages_from_step(
225
- step_log,
226
- ):
227
- yield message
228
 
229
- final_answer = step_log # Last log is the run's final_answer
230
- final_answer = handle_agent_output_types(final_answer)
231
 
232
- if isinstance(final_answer, AgentText):
233
- yield gr.ChatMessage(
234
- role="assistant",
235
- content=f"**Final answer:**\n{final_answer.to_string()}\n",
236
- )
237
- elif isinstance(final_answer, AgentImage):
238
- yield gr.ChatMessage(
239
- role="assistant",
240
- content={"path": final_answer.to_string(), "mime_type": "image/png"},
241
- )
242
- elif isinstance(final_answer, AgentAudio):
243
- yield gr.ChatMessage(
244
- role="assistant",
245
- content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
246
- )
247
- else:
248
- yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
249
 
 
 
 
 
 
250
 
251
- class GradioUI:
252
- """A one-line interface to launch your agent in Gradio"""
 
 
253
 
254
- def __init__(self, file_upload_folder: str | None = None):
255
-
256
- self.file_upload_folder = file_upload_folder
257
- if self.file_upload_folder is not None:
258
- if not os.path.exists(file_upload_folder):
259
- os.mkdir(file_upload_folder)
260
 
261
- def interact_with_agent(self, prompt, messages, session_state):
262
- # Get or create session-specific agent
263
- if 'agent' not in session_state:
264
- session_state['agent'] = create_agent()
265
 
266
- # Adding monitoring
 
 
267
  try:
268
- # log the existence of agent memory
269
- has_memory = hasattr(session_state['agent'], 'memory')
270
- print(f"Agent has memory: {has_memory}")
271
- if has_memory:
272
- print(f"Memory type: {type(session_state['agent'].memory)}")
273
-
274
  messages.append(gr.ChatMessage(role="user", content=prompt))
275
  yield messages
276
 
277
  for msg in stream_to_gradio(session_state['agent'], task=prompt, reset_agent_memory=False):
278
- messages.append(msg)
 
279
  yield messages
280
  yield messages
281
  except Exception as e:
282
  print(f"Error in interaction: {str(e)}")
283
  raise
284
 
285
- def upload_file(
286
- self,
287
- file,
288
- file_uploads_log,
289
- allowed_file_types=[
290
- "application/pdf",
291
- "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
292
- "text/plain",
293
- ],
294
- ):
295
- """
296
- Handle file uploads, default allowed types are .pdf, .docx, and .txt
297
- """
298
- if file is None:
299
- return gr.Textbox("No file uploaded", visible=True), file_uploads_log
300
-
301
- try:
302
- mime_type, _ = mimetypes.guess_type(file.name)
303
- except Exception as e:
304
- return gr.Textbox(f"Error: {e}", visible=True), file_uploads_log
305
-
306
- if mime_type not in allowed_file_types:
307
- return gr.Textbox("File type disallowed", visible=True), file_uploads_log
308
-
309
- # Sanitize file name
310
- original_name = os.path.basename(file.name)
311
- sanitized_name = re.sub(
312
- r"[^\w\-.]", "_", original_name
313
- ) # Replace any non-alphanumeric, non-dash, or non-dot characters with underscores
314
-
315
- type_to_ext = {}
316
- for ext, t in mimetypes.types_map.items():
317
- if t not in type_to_ext:
318
- type_to_ext[t] = ext
319
-
320
- # Ensure the extension correlates to the mime type
321
- sanitized_name = sanitized_name.split(".")[:-1]
322
- sanitized_name.append("" + type_to_ext[mime_type])
323
- sanitized_name = "".join(sanitized_name)
324
-
325
- # Save the uploaded file to the specified folder
326
- file_path = os.path.join(self.file_upload_folder, os.path.basename(sanitized_name))
327
- shutil.copy(file.name, file_path)
328
-
329
- return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
330
-
331
- def log_user_message(self, text_input, file_uploads_log):
332
- return (
333
- text_input
334
- + (
335
- f"\nYou have been provided with these files, which might be helpful or not: {file_uploads_log}"
336
- if len(file_uploads_log) > 0
337
- else ""
338
- ),
339
- gr.Textbox(value="", interactive=False, placeholder="Please wait while Steps are getting populated"),
340
- gr.Button(interactive=False)
341
- )
342
-
343
- def detect_device(self, request: gr.Request):
344
- # Check whether the user device is a mobile or a computer
345
-
346
- if not request:
347
- return "Unknown device"
348
- # Method 1: Check sec-ch-ua-mobile header
349
- is_mobile_header = request.headers.get('sec-ch-ua-mobile')
350
- if is_mobile_header:
351
- return "Mobile" if '?1' in is_mobile_header else "Desktop"
352
-
353
- # Method 2: Check user-agent string
354
- user_agent = request.headers.get('user-agent', '').lower()
355
- mobile_keywords = ['android', 'iphone', 'ipad', 'mobile', 'phone']
356
-
357
- if any(keyword in user_agent for keyword in mobile_keywords):
358
- return "Mobile"
359
-
360
- # Method 3: Check platform
361
- platform = request.headers.get('sec-ch-ua-platform', '').lower()
362
- if platform:
363
- if platform in ['"android"', '"ios"']:
364
- return "Mobile"
365
- elif platform in ['"windows"', '"macos"', '"linux"']:
366
- return "Desktop"
367
-
368
- # Default case if no clear indicators
369
- return "Desktop"
370
-
371
- def launch(self, **kwargs):
372
-
373
  with gr.Blocks(theme="ocean", fill_height=True) as demo:
374
- # Different layouts for mobile and computer devices
375
  @gr.render()
376
  def layout(request: gr.Request):
377
  device = self.detect_device(request)
378
- print(f"device - {device}")
379
- # Render layout with sidebar
380
  if device == "Desktop":
381
- with gr.Blocks(fill_height=True,) as sidebar_demo:
382
  with gr.Sidebar():
383
- gr.Markdown("""# open Deep Research - free the AI agents!
384
 
385
- OpenAI just published [Deep Research](https://openai.com/index/introducing-deep-research/), a very nice assistant that can perform deep searches on the web to answer user questions.
386
-
387
- However, their agent has a huge downside: it's not open. So we've started a 24-hour rush to replicate and open-source it. Our resulting [open-Deep-Research agent](https://github.com/huggingface/smolagents/tree/main/examples/open_deep_research) took the #1 rank of any open submission on the GAIA leaderboard! ✨
388
-
389
- You can try a simplified version here.<br><br>""")
390
- with gr.Group():
391
- gr.Markdown("**Your request**", container=True)
392
- text_input = gr.Textbox(lines=3, label="Your request", container=False, placeholder="Enter your prompt here and press Shift+Enter or press the button")
393
- launch_research_btn = gr.Button("Run", variant="primary")
394
-
395
- # If an upload folder is provided, enable the upload feature
396
- if self.file_upload_folder is not None:
397
- upload_file = gr.File(label="Upload a file")
398
- upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False)
399
- upload_file.change(
400
- self.upload_file,
401
- [upload_file, file_uploads_log],
402
- [upload_status, file_uploads_log],
403
- )
404
-
405
- gr.HTML("<br><br><h4><center>Powered by:</center></h4>")
406
- with gr.Row():
407
- gr.HTML("""<div style="display: flex; align-items: center; gap: 8px; font-family: system-ui, -apple-system, sans-serif;">
408
- <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png" style="width: 32px; height: 32px; object-fit: contain;" alt="logo">
409
- <a href="https://github.com/huggingface/smolagents"><b>huggingface/smolagents</b></a>
410
- </div>""")
411
-
412
- # Add session state to store session-specific data
413
- session_state = gr.State({}) # Initialize empty state for each session
414
- stored_messages = gr.State([])
415
- file_uploads_log = gr.State([])
416
- chatbot = gr.Chatbot(
417
- label="open-Deep-Research",
418
- type="messages",
419
- avatar_images=(
420
- None,
421
- "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png",
422
- ),
423
- resizeable=False,
424
- scale=1,
425
- elem_id="my-chatbot"
426
- )
427
-
428
- text_input.submit(
429
- self.log_user_message,
430
- [text_input, file_uploads_log],
431
- [stored_messages, text_input, launch_research_btn],
432
- ).then(self.interact_with_agent,
433
- # Include session_state in function calls
434
- [stored_messages, chatbot, session_state],
435
- [chatbot]
436
- ).then(lambda : (gr.Textbox(interactive=True, placeholder="Enter your prompt here and press the button"), gr.Button(interactive=True)),
437
- None,
438
- [text_input, launch_research_btn])
439
- launch_research_btn.click(
440
- self.log_user_message,
441
- [text_input, file_uploads_log],
442
- [stored_messages, text_input, launch_research_btn],
443
- ).then(self.interact_with_agent,
444
- # Include session_state in function calls
445
- [stored_messages, chatbot, session_state],
446
- [chatbot]
447
- ).then(lambda : (gr.Textbox(interactive=True, placeholder="Enter your prompt here and press the button"), gr.Button(interactive=True)),
448
- None,
449
- [text_input, launch_research_btn])
450
-
451
- # Render simple layout
452
  else:
453
- with gr.Blocks(fill_height=True,) as simple_demo:
454
- gr.Markdown("""# open Deep Research - free the AI agents!
455
- _Built with [smolagents](https://github.com/huggingface/smolagents)_
456
-
457
- OpenAI just published [Deep Research](https://openai.com/index/introducing-deep-research/), a very nice assistant that can perform deep searches on the web to answer user questions.
458
-
459
- However, their agent has a huge downside: it's not open. So we've started a 24-hour rush to replicate and open-source it. Our resulting [open-Deep-Research agent](https://github.com/huggingface/smolagents/tree/main/examples/open_deep_research) took the #1 rank of any open submission on the GAIA leaderboard! ✨
460
-
461
- You can try a simplified version below. 👇""")
462
- # Add session state to store session-specific data
463
- session_state = gr.State({}) # Initialize empty state for each session
464
- stored_messages = gr.State([])
465
- file_uploads_log = gr.State([])
466
- chatbot = gr.Chatbot(
467
- label="open-Deep-Research",
468
- type="messages",
469
- avatar_images=(
470
- None,
471
- "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png",
472
- ),
473
- resizeable=True,
474
- scale=1,
475
- )
476
- # If an upload folder is provided, enable the upload feature
477
- if self.file_upload_folder is not None:
478
- upload_file = gr.File(label="Upload a file")
479
- upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False)
480
- upload_file.change(
481
- self.upload_file,
482
- [upload_file, file_uploads_log],
483
- [upload_status, file_uploads_log],
484
- )
485
- text_input = gr.Textbox(lines=1, label="Your request", placeholder="Enter your prompt here and press the button")
486
- launch_research_btn = gr.Button("Run", variant="primary",)
487
 
488
- text_input.submit(
489
- self.log_user_message,
490
- [text_input, file_uploads_log],
491
- [stored_messages, text_input, launch_research_btn],
492
- ).then(self.interact_with_agent,
493
- # Include session_state in function calls
494
- [stored_messages, chatbot, session_state],
495
- [chatbot]
496
- ).then(lambda : (gr.Textbox(interactive=True, placeholder="Enter your prompt here and press the button"), gr.Button(interactive=True)),
497
- None,
498
- [text_input, launch_research_btn])
499
- launch_research_btn.click(
500
- self.log_user_message,
501
- [text_input, file_uploads_log],
502
- [stored_messages, text_input, launch_research_btn],
503
- ).then(self.interact_with_agent,
504
- # Include session_state in function calls
505
- [stored_messages, chatbot, session_state],
506
- [chatbot]
507
- ).then(lambda : (gr.Textbox(interactive=True, placeholder="Enter your prompt here and press the button"), gr.Button(interactive=True)),
508
- None,
509
- [text_input, launch_research_btn])
510
-
511
- demo.launch(debug=True, **kwargs)
512
 
513
- GradioUI().launch()
 
 
 
 
1
+ """qResearch: Advanced AI Research Assistant
2
+
3
+ Modified implementation of deep research capabilities using Qwen2.5-Coder
4
+ and DuckDuckGo search integration.
5
+ """
6
+
7
  import argparse
8
  import json
9
  import os
 
40
  from smolagents import (
41
  CodeAgent,
42
  HfApiModel,
 
 
43
  ToolCallingAgent,
44
  )
45
  from smolagents.agent_types import AgentText, AgentImage, AgentAudio
46
  from smolagents.gradio_ui import pull_messages_from_step, handle_agent_output_types
 
47
  from smolagents import Tool
48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  load_dotenv(override=True)
50
  login(os.getenv("HF_TOKEN"))
51
 
52
+ # Custom role conversions for model interaction
 
53
  custom_role_conversions = {"tool-call": "assistant", "tool-response": "user"}
54
 
55
+ # Initialize Qwen2.5-Coder-32B-Instruct model
56
+ model = HfApiModel(
57
+ model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
58
+ custom_role_conversions=custom_role_conversions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  )
60
 
61
+ class DuckDuckGoSearchTool(Tool):
62
+ """Web search tool using DuckDuckGo's search API"""
63
+ name = "web_search"
64
+ description = "Performs web searches using DuckDuckGo's search engine"
65
+ inputs = {
66
+ "query": {"type": "string", "description": "Search query text"},
67
+ "max_results": {"type": "integer", "description": "Number of results to return", "default": 5}
68
+ }
69
+ output_type = "string"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
+ def forward(self, query: str, max_results: int = 5) -> str:
72
+ from duckduckgo_search import DDGS
73
 
74
+ with DDGS() as ddgs:
75
+ results = [r for r in ddgs.text(query, max_results=max_results)]
76
+
77
+ search_results = []
78
+ for idx, result in enumerate(results):
79
+ search_results.append(
80
+ f"{idx+1}. [{result['title']}]({result['href']})\n{result['body']}"
81
+ )
82
+
83
+ return "## Search Results\n" + "\n\n".join(search_results)
 
 
 
 
 
 
 
84
 
85
+ # Initialize research agent with DuckDuckGo tool
86
+ agent = CodeAgent(
87
+ tools=[DuckDuckGoSearchTool()],
88
+ model=model
89
+ )
90
 
91
+ class ResearchInterface:
92
+ def __init__(self):
93
+ self.file_upload_folder = "uploaded_files"
94
+ os.makedirs(self.file_upload_folder, exist_ok=True)
95
 
96
+ def detect_device(self, request: gr.Request):
97
+ user_agent = request.headers.get("user-agent", "").lower()
98
+ return "Mobile" if ("mobile" in user_agent) else "Desktop"
 
 
 
99
 
100
+ def format_mla_response(self, content: str) -> str:
101
+ """Formats agent responses in MLA style"""
102
+ return f"{content}\n\n*Note: Research conducted using qResearch AI system (qResearch, 2024)*"
 
103
 
104
+ def interact_with_agent(self, messages, chatbot, session_state):
105
+ # Agent interaction logic remains similar
106
+ # Modified response formatting for MLA
107
  try:
 
 
 
 
 
 
108
  messages.append(gr.ChatMessage(role="user", content=prompt))
109
  yield messages
110
 
111
  for msg in stream_to_gradio(session_state['agent'], task=prompt, reset_agent_memory=False):
112
+ formatted_msg = self.format_mla_response(msg.content)
113
+ messages.append(formatted_msg)
114
  yield messages
115
  yield messages
116
  except Exception as e:
117
  print(f"Error in interaction: {str(e)}")
118
  raise
119
 
120
+ def create_interface(self):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  with gr.Blocks(theme="ocean", fill_height=True) as demo:
 
122
  @gr.render()
123
  def layout(request: gr.Request):
124
  device = self.detect_device(request)
 
 
125
  if device == "Desktop":
126
+ with gr.Blocks(fill_height=True) as sidebar_demo:
127
  with gr.Sidebar():
128
+ gr.Markdown("""# qResearch - Advanced AI Research System
129
 
130
+ Developed as an open-source alternative to proprietary research assistants.
131
+ """)
132
+ # Interface elements remain similar
133
+
134
+ with gr.Row():
135
+ gr.Markdown("<div style='text-align: center; width: 100%; margin-top: 20px'>"
136
+ "Research conducted using qResearch AI system (qResearch, 2024)"
137
+ "</div>")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  else:
139
+ # Mobile interface
140
+ with gr.Blocks(fill_height=True) as simple_demo:
141
+ gr.Markdown("""# qResearch Mobile
142
+ *Advanced research capabilities in your pocket*""")
143
+ # Mobile interface elements
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
 
145
+ return demo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
 
147
+ if __name__ == "__main__":
148
+ research_interface = ResearchInterface()
149
+ demo = research_interface.create_interface()
150
+ demo.launch()