m-ric HF Staff commited on
Commit
609c341
·
verified ·
1 Parent(s): 90438ac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +185 -102
app.py CHANGED
@@ -1,23 +1,27 @@
1
- import argparse
2
  import json
 
3
  import os
 
 
4
  import threading
5
- from concurrent.futures import ThreadPoolExecutor, as_completed
6
- from datetime import datetime
7
- from pathlib import Path
8
- from typing import List, Optional
9
 
10
- import datasets
11
- import pandas as pd
12
  from dotenv import load_dotenv
13
  from huggingface_hub import login
14
- import gradio as gr
15
-
16
- from scripts.reformulator import prepare_response
17
- from scripts.run_agents import (
18
- get_single_file_description,
19
- get_zip_description,
20
  )
 
 
 
 
 
 
 
 
21
  from scripts.text_inspector_tool import TextInspectorTool
22
  from scripts.text_web_browser import (
23
  ArchiveSearchTool,
@@ -29,19 +33,6 @@ from scripts.text_web_browser import (
29
  VisitTool,
30
  )
31
  from scripts.visual_qa import visualizer
32
- from tqdm import tqdm
33
-
34
- from smolagents import (
35
- CodeAgent,
36
- HfApiModel,
37
- LiteLLMModel,
38
- Model,
39
- ToolCallingAgent,
40
- )
41
- from smolagents.agent_types import AgentText, AgentImage, AgentAudio
42
- from smolagents.gradio_ui import pull_messages_from_step, handle_agent_output_types
43
-
44
- from smolagents import Tool
45
 
46
 
47
  class GoogleSearchTool(Tool):
@@ -67,7 +58,9 @@ class GoogleSearchTool(Tool):
67
  import requests
68
 
69
  if self.serpapi_key is None:
70
- raise ValueError("Missing SerpAPI key. Make sure you have 'SERPER_API_KEY' in your env variables.")
 
 
71
 
72
  params = {
73
  "engine": "google",
@@ -76,16 +69,19 @@ class GoogleSearchTool(Tool):
76
  "google_domain": "google.com",
77
  }
78
 
79
- headers = {
80
- 'X-API-KEY': self.serpapi_key,
81
- 'Content-Type': 'application/json'
82
- }
83
 
84
  if filter_year is not None:
85
- params["tbs"] = f"cdr:1,cd_min:01/01/{filter_year},cd_max:12/31/{filter_year}"
86
-
87
- response = requests.request("POST", "https://google.serper.dev/search", headers=headers, data=json.dumps(params))
88
-
 
 
 
 
 
 
89
 
90
  if response.status_code == 200:
91
  results = response.json()
@@ -99,9 +95,13 @@ class GoogleSearchTool(Tool):
99
  f"No results found for query: '{query}' with filtering on year={filter_year}. Use a less restrictive query or do not filter on year."
100
  )
101
  else:
102
- raise Exception(f"No results found for query: '{query}'. Use a less restrictive query.")
 
 
103
  if len(results["organic"]) == 0:
104
- year_filter_message = f" with filter year={filter_year}" if filter_year is not None else ""
 
 
105
  return f"No results found for '{query}'{year_filter_message}. Try with a more general query, or remove the year filter."
106
 
107
  web_snippets = []
@@ -121,11 +121,14 @@ class GoogleSearchTool(Tool):
121
 
122
  redacted_version = f"{idx}. [{page['title']}]({page['link']}){date_published}{source}\n{snippet}"
123
 
124
- redacted_version = redacted_version.replace("Your browser can't play this video.", "")
 
 
125
  web_snippets.append(redacted_version)
126
 
127
  return "## Search Results\n" + "\n\n".join(web_snippets)
128
 
 
129
  # web_search = GoogleSearchTool()
130
 
131
  # print(web_search(query="Donald Trump news"))
@@ -198,6 +201,7 @@ WEB_TOOLS = [
198
  TextInspectorTool(model, text_limit),
199
  ]
200
 
 
201
  # Agent creation in a factory function
202
  def create_agent():
203
  """Creates a fresh agent instance for each session"""
@@ -210,8 +214,10 @@ def create_agent():
210
  planning_interval=4,
211
  )
212
 
 
213
  document_inspection_tool = TextInspectorTool(model, 20000)
214
 
 
215
  def stream_to_gradio(
216
  agent,
217
  task: str,
@@ -219,7 +225,9 @@ def stream_to_gradio(
219
  additional_args: Optional[dict] = None,
220
  ):
221
  """Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
222
- for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
 
 
223
  for message in pull_messages_from_step(
224
  step_log,
225
  ):
@@ -244,14 +252,15 @@ def stream_to_gradio(
244
  content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
245
  )
246
  else:
247
- yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
 
 
248
 
249
 
250
  class GradioUI:
251
  """A one-line interface to launch your agent in Gradio"""
252
 
253
  def __init__(self, file_upload_folder: str | None = None):
254
-
255
  self.file_upload_folder = file_upload_folder
256
  if self.file_upload_folder is not None:
257
  if not os.path.exists(file_upload_folder):
@@ -259,21 +268,23 @@ class GradioUI:
259
 
260
  def interact_with_agent(self, prompt, messages, session_state):
261
  # Get or create session-specific agent
262
- if 'agent' not in session_state:
263
- session_state['agent'] = create_agent()
264
 
265
  # Adding monitoring
266
  try:
267
  # log the existence of agent memory
268
- has_memory = hasattr(session_state['agent'], 'memory')
269
  print(f"Agent has memory: {has_memory}")
270
  if has_memory:
271
  print(f"Memory type: {type(session_state['agent'].memory)}")
272
-
273
  messages.append(gr.ChatMessage(role="user", content=prompt))
274
  yield messages
275
-
276
- for msg in stream_to_gradio(session_state['agent'], task=prompt, reset_agent_memory=False):
 
 
277
  messages.append(msg)
278
  yield messages
279
  yield messages
@@ -322,10 +333,14 @@ class GradioUI:
322
  sanitized_name = "".join(sanitized_name)
323
 
324
  # Save the uploaded file to the specified folder
325
- file_path = os.path.join(self.file_upload_folder, os.path.basename(sanitized_name))
 
 
326
  shutil.copy(file.name, file_path)
327
 
328
- return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
 
 
329
 
330
  def log_user_message(self, text_input, file_uploads_log):
331
  return (
@@ -335,49 +350,55 @@ class GradioUI:
335
  if len(file_uploads_log) > 0
336
  else ""
337
  ),
338
- gr.Textbox(value="", interactive=False, placeholder="Please wait while Steps are getting populated"),
339
- gr.Button(interactive=False)
 
 
 
 
340
  )
341
 
342
  def detect_device(self, request: gr.Request):
343
  # Check whether the user device is a mobile or a computer
344
-
345
  if not request:
346
  return "Unknown device"
347
  # Method 1: Check sec-ch-ua-mobile header
348
- is_mobile_header = request.headers.get('sec-ch-ua-mobile')
349
  if is_mobile_header:
350
- return "Mobile" if '?1' in is_mobile_header else "Desktop"
351
-
352
  # Method 2: Check user-agent string
353
- user_agent = request.headers.get('user-agent', '').lower()
354
- mobile_keywords = ['android', 'iphone', 'ipad', 'mobile', 'phone']
355
-
356
  if any(keyword in user_agent for keyword in mobile_keywords):
357
  return "Mobile"
358
-
359
  # Method 3: Check platform
360
- platform = request.headers.get('sec-ch-ua-platform', '').lower()
361
  if platform:
362
  if platform in ['"android"', '"ios"']:
363
  return "Mobile"
364
  elif platform in ['"windows"', '"macos"', '"linux"']:
365
  return "Desktop"
366
-
367
  # Default case if no clear indicators
368
- return "Desktop"
369
-
370
- def launch(self, **kwargs):
371
 
 
372
  with gr.Blocks(theme="ocean", fill_height=True) as demo:
373
  # Different layouts for mobile and computer devices
374
  @gr.render()
375
  def layout(request: gr.Request):
376
  device = self.detect_device(request)
377
  print(f"device - {device}")
378
- # Render layout with sidebar
379
  if device == "Desktop":
380
- with gr.Blocks(fill_height=True,) as sidebar_demo:
 
 
 
381
  with gr.Sidebar():
382
  gr.Markdown("""# open Deep Research - free the AI agents!
383
 
@@ -388,30 +409,42 @@ class GradioUI:
388
  You can try a simplified version here (uses `Qwen-Coder-32B` instead of `o1`, so much less powerful than the original open-Deep-Research).<br><br>""")
389
  with gr.Group():
390
  gr.Markdown("**Your request**", container=True)
391
- text_input = gr.Textbox(lines=3, label="Your request", container=False, placeholder="Enter your prompt here and press Shift+Enter or press the button")
392
- launch_research_btn = gr.Button("Run", variant="primary")
 
 
 
 
 
 
 
393
 
394
  # If an upload folder is provided, enable the upload feature
395
  if self.file_upload_folder is not None:
396
  upload_file = gr.File(label="Upload a file")
397
- upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False)
 
 
 
 
398
  upload_file.change(
399
  self.upload_file,
400
  [upload_file, file_uploads_log],
401
  [upload_status, file_uploads_log],
402
  )
403
-
404
  gr.HTML("<br><br><h4><center>Powered by:</center></h4>")
405
  with gr.Row():
406
  gr.HTML("""<div style="display: flex; align-items: center; gap: 8px; font-family: system-ui, -apple-system, sans-serif;">
407
  <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png" style="width: 32px; height: 32px; object-fit: contain;" alt="logo">
408
  <a target="_blank" href="https://github.com/huggingface/smolagents"><b>huggingface/smolagents</b></a>
409
- </div>""")
410
 
411
  # Add session state to store session-specific data
412
- session_state = gr.State({}) # Initialize empty state for each session
 
 
413
  stored_messages = gr.State([])
414
- file_uploads_log = gr.State([])
415
  chatbot = gr.Chatbot(
416
  label="open-Deep-Research",
417
  type="messages",
@@ -421,35 +454,55 @@ class GradioUI:
421
  ),
422
  resizeable=False,
423
  scale=1,
424
- elem_id="my-chatbot"
425
  )
426
 
427
  text_input.submit(
428
  self.log_user_message,
429
  [text_input, file_uploads_log],
430
  [stored_messages, text_input, launch_research_btn],
431
- ).then(self.interact_with_agent,
 
432
  # Include session_state in function calls
433
  [stored_messages, chatbot, session_state],
434
- [chatbot]
435
- ).then(lambda : (gr.Textbox(interactive=True, placeholder="Enter your prompt here and press the button"), gr.Button(interactive=True)),
436
- None,
437
- [text_input, launch_research_btn])
 
 
 
 
 
 
 
 
438
  launch_research_btn.click(
439
  self.log_user_message,
440
  [text_input, file_uploads_log],
441
  [stored_messages, text_input, launch_research_btn],
442
- ).then(self.interact_with_agent,
 
443
  # Include session_state in function calls
444
  [stored_messages, chatbot, session_state],
445
- [chatbot]
446
- ).then(lambda : (gr.Textbox(interactive=True, placeholder="Enter your prompt here and press the button"), gr.Button(interactive=True)),
447
- None,
448
- [text_input, launch_research_btn])
449
-
 
 
 
 
 
 
 
 
450
  # Render simple layout
451
  else:
452
- with gr.Blocks(fill_height=True,) as simple_demo:
 
 
453
  gr.Markdown("""# open Deep Research - free the AI agents!
454
  _Built with [smolagents](https://github.com/huggingface/smolagents)_
455
 
@@ -459,7 +512,9 @@ class GradioUI:
459
 
460
  You can try a simplified version below (uses `Qwen-Coder-32B` instead of `o1`, so much less powerful than the original open-Deep-Research)👇""")
461
  # Add session state to store session-specific data
462
- session_state = gr.State({}) # Initialize empty state for each session
 
 
463
  stored_messages = gr.State([])
464
  file_uploads_log = gr.State([])
465
  chatbot = gr.Chatbot(
@@ -475,38 +530,66 @@ class GradioUI:
475
  # If an upload folder is provided, enable the upload feature
476
  if self.file_upload_folder is not None:
477
  upload_file = gr.File(label="Upload a file")
478
- upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False)
 
 
479
  upload_file.change(
480
  self.upload_file,
481
  [upload_file, file_uploads_log],
482
  [upload_status, file_uploads_log],
483
  )
484
- text_input = gr.Textbox(lines=1, label="Your request", placeholder="Enter your prompt here and press the button")
485
- launch_research_btn = gr.Button("Run", variant="primary",)
486
-
 
 
 
 
 
 
 
487
  text_input.submit(
488
  self.log_user_message,
489
  [text_input, file_uploads_log],
490
  [stored_messages, text_input, launch_research_btn],
491
- ).then(self.interact_with_agent,
 
492
  # Include session_state in function calls
493
  [stored_messages, chatbot, session_state],
494
- [chatbot]
495
- ).then(lambda : (gr.Textbox(interactive=True, placeholder="Enter your prompt here and press the button"), gr.Button(interactive=True)),
496
- None,
497
- [text_input, launch_research_btn])
 
 
 
 
 
 
 
 
498
  launch_research_btn.click(
499
  self.log_user_message,
500
  [text_input, file_uploads_log],
501
  [stored_messages, text_input, launch_research_btn],
502
- ).then(self.interact_with_agent,
 
503
  # Include session_state in function calls
504
  [stored_messages, chatbot, session_state],
505
- [chatbot]
506
- ).then(lambda : (gr.Textbox(interactive=True, placeholder="Enter your prompt here and press the button"), gr.Button(interactive=True)),
507
- None,
508
- [text_input, launch_research_btn])
509
-
 
 
 
 
 
 
 
 
510
  demo.launch(debug=True, **kwargs)
511
 
 
512
  GradioUI().launch()
 
 
1
  import json
2
+ import mimetypes
3
  import os
4
+ import re
5
+ import shutil
6
  import threading
7
+ from typing import Optional
 
 
 
8
 
9
+ import gradio as gr
 
10
  from dotenv import load_dotenv
11
  from huggingface_hub import login
12
+ from smolagents import (
13
+ CodeAgent,
14
+ HfApiModel,
15
+ Tool,
 
 
16
  )
17
+ from smolagents.agent_types import (
18
+ AgentAudio,
19
+ AgentImage,
20
+ AgentText,
21
+ handle_agent_output_types,
22
+ )
23
+ from smolagents.gradio_ui import pull_messages_from_step
24
+
25
  from scripts.text_inspector_tool import TextInspectorTool
26
  from scripts.text_web_browser import (
27
  ArchiveSearchTool,
 
33
  VisitTool,
34
  )
35
  from scripts.visual_qa import visualizer
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
 
38
  class GoogleSearchTool(Tool):
 
58
  import requests
59
 
60
  if self.serpapi_key is None:
61
+ raise ValueError(
62
+ "Missing SerpAPI key. Make sure you have 'SERPER_API_KEY' in your env variables."
63
+ )
64
 
65
  params = {
66
  "engine": "google",
 
69
  "google_domain": "google.com",
70
  }
71
 
72
+ headers = {"X-API-KEY": self.serpapi_key, "Content-Type": "application/json"}
 
 
 
73
 
74
  if filter_year is not None:
75
+ params["tbs"] = (
76
+ f"cdr:1,cd_min:01/01/{filter_year},cd_max:12/31/{filter_year}"
77
+ )
78
+
79
+ response = requests.request(
80
+ "POST",
81
+ "https://google.serper.dev/search",
82
+ headers=headers,
83
+ data=json.dumps(params),
84
+ )
85
 
86
  if response.status_code == 200:
87
  results = response.json()
 
95
  f"No results found for query: '{query}' with filtering on year={filter_year}. Use a less restrictive query or do not filter on year."
96
  )
97
  else:
98
+ raise Exception(
99
+ f"No results found for query: '{query}'. Use a less restrictive query."
100
+ )
101
  if len(results["organic"]) == 0:
102
+ year_filter_message = (
103
+ f" with filter year={filter_year}" if filter_year is not None else ""
104
+ )
105
  return f"No results found for '{query}'{year_filter_message}. Try with a more general query, or remove the year filter."
106
 
107
  web_snippets = []
 
121
 
122
  redacted_version = f"{idx}. [{page['title']}]({page['link']}){date_published}{source}\n{snippet}"
123
 
124
+ redacted_version = redacted_version.replace(
125
+ "Your browser can't play this video.", ""
126
+ )
127
  web_snippets.append(redacted_version)
128
 
129
  return "## Search Results\n" + "\n\n".join(web_snippets)
130
 
131
+
132
  # web_search = GoogleSearchTool()
133
 
134
  # print(web_search(query="Donald Trump news"))
 
201
  TextInspectorTool(model, text_limit),
202
  ]
203
 
204
+
205
  # Agent creation in a factory function
206
  def create_agent():
207
  """Creates a fresh agent instance for each session"""
 
214
  planning_interval=4,
215
  )
216
 
217
+
218
  document_inspection_tool = TextInspectorTool(model, 20000)
219
 
220
+
221
  def stream_to_gradio(
222
  agent,
223
  task: str,
 
225
  additional_args: Optional[dict] = None,
226
  ):
227
  """Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
228
+ for step_log in agent.run(
229
+ task, stream=True, reset=reset_agent_memory, additional_args=additional_args
230
+ ):
231
  for message in pull_messages_from_step(
232
  step_log,
233
  ):
 
252
  content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
253
  )
254
  else:
255
+ yield gr.ChatMessage(
256
+ role="assistant", content=f"**Final answer:** {str(final_answer)}"
257
+ )
258
 
259
 
260
  class GradioUI:
261
  """A one-line interface to launch your agent in Gradio"""
262
 
263
  def __init__(self, file_upload_folder: str | None = None):
 
264
  self.file_upload_folder = file_upload_folder
265
  if self.file_upload_folder is not None:
266
  if not os.path.exists(file_upload_folder):
 
268
 
269
  def interact_with_agent(self, prompt, messages, session_state):
270
  # Get or create session-specific agent
271
+ if "agent" not in session_state:
272
+ session_state["agent"] = create_agent()
273
 
274
  # Adding monitoring
275
  try:
276
  # log the existence of agent memory
277
+ has_memory = hasattr(session_state["agent"], "memory")
278
  print(f"Agent has memory: {has_memory}")
279
  if has_memory:
280
  print(f"Memory type: {type(session_state['agent'].memory)}")
281
+
282
  messages.append(gr.ChatMessage(role="user", content=prompt))
283
  yield messages
284
+
285
+ for msg in stream_to_gradio(
286
+ session_state["agent"], task=prompt, reset_agent_memory=False
287
+ ):
288
  messages.append(msg)
289
  yield messages
290
  yield messages
 
333
  sanitized_name = "".join(sanitized_name)
334
 
335
  # Save the uploaded file to the specified folder
336
+ file_path = os.path.join(
337
+ self.file_upload_folder, os.path.basename(sanitized_name)
338
+ )
339
  shutil.copy(file.name, file_path)
340
 
341
+ return gr.Textbox(
342
+ f"File uploaded: {file_path}", visible=True
343
+ ), file_uploads_log + [file_path]
344
 
345
  def log_user_message(self, text_input, file_uploads_log):
346
  return (
 
350
  if len(file_uploads_log) > 0
351
  else ""
352
  ),
353
+ gr.Textbox(
354
+ value="",
355
+ interactive=False,
356
+ placeholder="Please wait while Steps are getting populated",
357
+ ),
358
+ gr.Button(interactive=False),
359
  )
360
 
361
  def detect_device(self, request: gr.Request):
362
  # Check whether the user device is a mobile or a computer
363
+
364
  if not request:
365
  return "Unknown device"
366
  # Method 1: Check sec-ch-ua-mobile header
367
+ is_mobile_header = request.headers.get("sec-ch-ua-mobile")
368
  if is_mobile_header:
369
+ return "Mobile" if "?1" in is_mobile_header else "Desktop"
370
+
371
  # Method 2: Check user-agent string
372
+ user_agent = request.headers.get("user-agent", "").lower()
373
+ mobile_keywords = ["android", "iphone", "ipad", "mobile", "phone"]
374
+
375
  if any(keyword in user_agent for keyword in mobile_keywords):
376
  return "Mobile"
377
+
378
  # Method 3: Check platform
379
+ platform = request.headers.get("sec-ch-ua-platform", "").lower()
380
  if platform:
381
  if platform in ['"android"', '"ios"']:
382
  return "Mobile"
383
  elif platform in ['"windows"', '"macos"', '"linux"']:
384
  return "Desktop"
385
+
386
  # Default case if no clear indicators
387
+ return "Desktop"
 
 
388
 
389
+ def launch(self, **kwargs):
390
  with gr.Blocks(theme="ocean", fill_height=True) as demo:
391
  # Different layouts for mobile and computer devices
392
  @gr.render()
393
  def layout(request: gr.Request):
394
  device = self.detect_device(request)
395
  print(f"device - {device}")
396
+ # Render layout with sidebar
397
  if device == "Desktop":
398
+ with gr.Blocks(
399
+ fill_height=True,
400
+ ):
401
+ file_uploads_log = gr.State([])
402
  with gr.Sidebar():
403
  gr.Markdown("""# open Deep Research - free the AI agents!
404
 
 
409
  You can try a simplified version here (uses `Qwen-Coder-32B` instead of `o1`, so much less powerful than the original open-Deep-Research).<br><br>""")
410
  with gr.Group():
411
  gr.Markdown("**Your request**", container=True)
412
+ text_input = gr.Textbox(
413
+ lines=3,
414
+ label="Your request",
415
+ container=False,
416
+ placeholder="Enter your prompt here and press Shift+Enter or press the button",
417
+ )
418
+ launch_research_btn = gr.Button(
419
+ "Run", variant="primary"
420
+ )
421
 
422
  # If an upload folder is provided, enable the upload feature
423
  if self.file_upload_folder is not None:
424
  upload_file = gr.File(label="Upload a file")
425
+ upload_status = gr.Textbox(
426
+ label="Upload Status",
427
+ interactive=False,
428
+ visible=False,
429
+ )
430
  upload_file.change(
431
  self.upload_file,
432
  [upload_file, file_uploads_log],
433
  [upload_status, file_uploads_log],
434
  )
435
+
436
  gr.HTML("<br><br><h4><center>Powered by:</center></h4>")
437
  with gr.Row():
438
  gr.HTML("""<div style="display: flex; align-items: center; gap: 8px; font-family: system-ui, -apple-system, sans-serif;">
439
  <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png" style="width: 32px; height: 32px; object-fit: contain;" alt="logo">
440
  <a target="_blank" href="https://github.com/huggingface/smolagents"><b>huggingface/smolagents</b></a>
441
+ </div>""")
442
 
443
  # Add session state to store session-specific data
444
+ session_state = gr.State(
445
+ {}
446
+ ) # Initialize empty state for each session
447
  stored_messages = gr.State([])
 
448
  chatbot = gr.Chatbot(
449
  label="open-Deep-Research",
450
  type="messages",
 
454
  ),
455
  resizeable=False,
456
  scale=1,
457
+ elem_id="my-chatbot",
458
  )
459
 
460
  text_input.submit(
461
  self.log_user_message,
462
  [text_input, file_uploads_log],
463
  [stored_messages, text_input, launch_research_btn],
464
+ ).then(
465
+ self.interact_with_agent,
466
  # Include session_state in function calls
467
  [stored_messages, chatbot, session_state],
468
+ [chatbot],
469
+ ).then(
470
+ lambda: (
471
+ gr.Textbox(
472
+ interactive=True,
473
+ placeholder="Enter your prompt here and press the button",
474
+ ),
475
+ gr.Button(interactive=True),
476
+ ),
477
+ None,
478
+ [text_input, launch_research_btn],
479
+ )
480
  launch_research_btn.click(
481
  self.log_user_message,
482
  [text_input, file_uploads_log],
483
  [stored_messages, text_input, launch_research_btn],
484
+ ).then(
485
+ self.interact_with_agent,
486
  # Include session_state in function calls
487
  [stored_messages, chatbot, session_state],
488
+ [chatbot],
489
+ ).then(
490
+ lambda: (
491
+ gr.Textbox(
492
+ interactive=True,
493
+ placeholder="Enter your prompt here and press the button",
494
+ ),
495
+ gr.Button(interactive=True),
496
+ ),
497
+ None,
498
+ [text_input, launch_research_btn],
499
+ )
500
+
501
  # Render simple layout
502
  else:
503
+ with gr.Blocks(
504
+ fill_height=True,
505
+ ):
506
  gr.Markdown("""# open Deep Research - free the AI agents!
507
  _Built with [smolagents](https://github.com/huggingface/smolagents)_
508
 
 
512
 
513
  You can try a simplified version below (uses `Qwen-Coder-32B` instead of `o1`, so much less powerful than the original open-Deep-Research)👇""")
514
  # Add session state to store session-specific data
515
+ session_state = gr.State(
516
+ {}
517
+ ) # Initialize empty state for each session
518
  stored_messages = gr.State([])
519
  file_uploads_log = gr.State([])
520
  chatbot = gr.Chatbot(
 
530
  # If an upload folder is provided, enable the upload feature
531
  if self.file_upload_folder is not None:
532
  upload_file = gr.File(label="Upload a file")
533
+ upload_status = gr.Textbox(
534
+ label="Upload Status", interactive=False, visible=False
535
+ )
536
  upload_file.change(
537
  self.upload_file,
538
  [upload_file, file_uploads_log],
539
  [upload_status, file_uploads_log],
540
  )
541
+ text_input = gr.Textbox(
542
+ lines=1,
543
+ label="Your request",
544
+ placeholder="Enter your prompt here and press the button",
545
+ )
546
+ launch_research_btn = gr.Button(
547
+ "Run",
548
+ variant="primary",
549
+ )
550
+
551
  text_input.submit(
552
  self.log_user_message,
553
  [text_input, file_uploads_log],
554
  [stored_messages, text_input, launch_research_btn],
555
+ ).then(
556
+ self.interact_with_agent,
557
  # Include session_state in function calls
558
  [stored_messages, chatbot, session_state],
559
+ [chatbot],
560
+ ).then(
561
+ lambda: (
562
+ gr.Textbox(
563
+ interactive=True,
564
+ placeholder="Enter your prompt here and press the button",
565
+ ),
566
+ gr.Button(interactive=True),
567
+ ),
568
+ None,
569
+ [text_input, launch_research_btn],
570
+ )
571
  launch_research_btn.click(
572
  self.log_user_message,
573
  [text_input, file_uploads_log],
574
  [stored_messages, text_input, launch_research_btn],
575
+ ).then(
576
+ self.interact_with_agent,
577
  # Include session_state in function calls
578
  [stored_messages, chatbot, session_state],
579
+ [chatbot],
580
+ ).then(
581
+ lambda: (
582
+ gr.Textbox(
583
+ interactive=True,
584
+ placeholder="Enter your prompt here and press the button",
585
+ ),
586
+ gr.Button(interactive=True),
587
+ ),
588
+ None,
589
+ [text_input, launch_research_btn],
590
+ )
591
+
592
  demo.launch(debug=True, **kwargs)
593
 
594
+
595
  GradioUI().launch()