PLBot commited on
Commit
382c69f
·
verified ·
1 Parent(s): ca2229d

switch to Gemini API key

Browse files
Files changed (1) hide show
  1. app.py +17 -7
app.py CHANGED
@@ -6,6 +6,8 @@ import yaml
6
  from tools.final_answer import FinalAnswerTool
7
  from Gradio_UI import GradioUI
8
  from smolagents.agent_types import AgentImage
 
 
9
 
10
  # Import tools
11
  from tools.final_answer import FinalAnswerTool
@@ -42,13 +44,21 @@ def get_current_time_in_timezone(timezone: str) -> str:
42
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
43
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
44
 
45
- model = HfApiModel(
46
- max_tokens=2096,
47
- temperature=0.5,
48
- # model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
49
- model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud',
50
- # model_id='deepseek-ai/deepseek-coder-33B-instruct',
51
- custom_role_conversions=None,
 
 
 
 
 
 
 
 
52
  )
53
 
54
 
 
6
  from tools.final_answer import FinalAnswerTool
7
  from Gradio_UI import GradioUI
8
  from smolagents.agent_types import AgentImage
9
+ import os
10
+ from smolagents import LiteLLMModel
11
 
12
  # Import tools
13
  from tools.final_answer import FinalAnswerTool
 
44
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
45
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
46
 
47
+ # model = HfApiModel(
48
+ # max_tokens=2096,
49
+ # temperature=0.5,
50
+ # # model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
51
+ # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud',
52
+ # # model_id='deepseek-ai/deepseek-coder-33B-instruct',
53
+ # custom_role_conversions=None,
54
+ # )
55
+
56
+ # Use LLM API directly
57
+ model = LiteLLMModel(
58
+ model="gemini-1.5-pro",
59
+ api_key=os.environ.get("GEMINI_API_KEY"),
60
+ max_tokens=2096,
61
+ temperature=0.5
62
  )
63
 
64