ffreemt commited on
Commit
872d67f
·
1 Parent(s): 3cde0a6

switch to OpenAIServerModel

Browse files
Files changed (2) hide show
  1. app.py +19 -12
  2. requirements.txt +2 -2
app.py CHANGED
@@ -12,8 +12,9 @@ from dotenv import load_dotenv
12
  # from huggingface_hub import login
13
  from smolagents import (
14
  CodeAgent,
15
- HfApiModel,
16
- LiteLLMModel,
 
17
  Tool,
18
  # GoogleSearchTool,
19
  DuckDuckGoSearchTool,
@@ -96,7 +97,8 @@ _ = "" if os.getenv("OPENAI_API_KEY") is None else os.getenv("OPENAI_API_KEY")[:
96
 
97
  if os.getenv("MODEL_ID") and os.getenv("OPENAI_API_BASE"):
98
  logger.debug(f"""using LiteLLMModel: {model_id=}, {os.getenv("OPENAI_API_BASE")=}, os.getenv("OPENAI_API_BASE")={_}""")
99
- model = LiteLLMModel(
 
100
  # "gpt-4o",
101
  # os.getenv("MODEL_ID", "gpt-4o-mini"),
102
  model_id,
@@ -286,13 +288,6 @@ class GradioUI:
286
  ):
287
  file_uploads_log = gr.State([])
288
  with gr.Sidebar():
289
- gr.Markdown("""# open Deep Research - free the AI agents!
290
-
291
- OpenAI just (February 2, 2025) published [Deep Research](https://openai.com/index/introducing-deep-research/), an amazing assistant that can perform deep searches on the web to answer user questions.
292
-
293
- However, their agent has a huge downside: it's not open. So we've started a 24-hour rush to replicate and open-source it. Our (Huggingface's) resulting [open-Deep-Research agent](https://github.com/huggingface/smolagents/tree/main/examples/open_deep_research) took the #1 rank of any open submission on the GAIA leaderboard! ✨
294
-
295
- You can try a simplified version here that uses `Qwen-Coder-32B` (via smolagnet.HfApiModel) instead of `o1`. Modified: if you set MODEL_ID, OPENAI_API_BASE and OPENAI_API_KEY in the .env or env vars (in hf space these can be set in settings, .env will override env vars), the correspoding model will be used. N.B. if you see errors, it might be because whatever quota is exceeded, clone/duplicate this space and plug in your own resources and run your own deep-research.<br><br>""")
296
  with gr.Group():
297
  gr.Markdown("**Your request**", container=True)
298
  text_input = gr.Textbox(
@@ -326,6 +321,15 @@ class GradioUI:
326
  <a target="_blank" href="https://github.com/huggingface/smolagents"><b>huggingface/smolagents</b></a>
327
  </div>""")
328
 
 
 
 
 
 
 
 
 
 
329
  # Add session state to store session-specific data
330
  session_state = gr.State(
331
  {}
@@ -477,5 +481,8 @@ class GradioUI:
477
 
478
  demo.launch(debug=True, **kwargs)
479
 
480
-
481
- GradioUI().launch()
 
 
 
 
12
  # from huggingface_hub import login
13
  from smolagents import (
14
  CodeAgent,
15
+ # HfApiModel,
16
+ # LiteLLMModel,
17
+ OpenAIServerModel,
18
  Tool,
19
  # GoogleSearchTool,
20
  DuckDuckGoSearchTool,
 
97
 
98
  if os.getenv("MODEL_ID") and os.getenv("OPENAI_API_BASE"):
99
  logger.debug(f"""using LiteLLMModel: {model_id=}, {os.getenv("OPENAI_API_BASE")=}, os.getenv("OPENAI_API_BASE")={_}""")
100
+ # model = LiteLLMModel(
101
+ model = OpenAIServerModel(
102
  # "gpt-4o",
103
  # os.getenv("MODEL_ID", "gpt-4o-mini"),
104
  model_id,
 
288
  ):
289
  file_uploads_log = gr.State([])
290
  with gr.Sidebar():
 
 
 
 
 
 
 
291
  with gr.Group():
292
  gr.Markdown("**Your request**", container=True)
293
  text_input = gr.Textbox(
 
321
  <a target="_blank" href="https://github.com/huggingface/smolagents"><b>huggingface/smolagents</b></a>
322
  </div>""")
323
 
324
+ # -----
325
+ gr.Markdown("""# open Deep Research - free the AI agents!
326
+
327
+ OpenAI just (February 2, 2025) published [Deep Research](https://openai.com/index/introducing-deep-research/), an amazing assistant that can perform deep searches on the web to answer user questions.
328
+
329
+ However, their agent has a huge downside: it's not open. So we've started a 24-hour rush to replicate and open-source it. Our (Huggingface's) resulting [open-Deep-Research agent](https://github.com/huggingface/smolagents/tree/main/examples/open_deep_research) took the #1 rank of any open submission on the GAIA leaderboard! ✨
330
+
331
+ You can try a simplified version here that uses `Qwen-Coder-32B` (via smolagnet.HfApiModel) instead of `o1`. Modified: if you set MODEL_ID, OPENAI_API_BASE and OPENAI_API_KEY in the .env or env vars (in hf space these can be set in settings, .env will override env vars), the correspoding model will be used. N.B. if you see errors, it might be because whatever quota is exceeded, clone/duplicate this space and plug in your own resources and run your own deep-research.<br><br>""")
332
+
333
  # Add session state to store session-specific data
334
  session_state = gr.State(
335
  {}
 
481
 
482
  demo.launch(debug=True, **kwargs)
483
 
484
+ # can this fix ctrl-c no response? no
485
+ try:
486
+ GradioUI().launch()
487
+ except KeyboardInterrupt:
488
+ ...
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- smolagents[litellm]==1.14.0
2
  anthropic>=0.37.1
3
  beautifulsoup4>=4.12.3
4
  datasets>=2.21.0
@@ -38,7 +38,7 @@ python-pptx
38
  torch
39
  xlrd
40
  SpeechRecognition
41
- litellm>=1.67.2
42
 
43
  duckduckgo-search
44
  loguru
 
1
+ smolagents
2
  anthropic>=0.37.1
3
  beautifulsoup4>=4.12.3
4
  datasets>=2.21.0
 
38
  torch
39
  xlrd
40
  SpeechRecognition
41
+ # litellm>=1.67.2
42
 
43
  duckduckgo-search
44
  loguru