Maximofn commited on
Commit
2aa9e04
·
1 Parent(s): b813d0f

Actualiza la inicialización del modelo a InferenceClient y añade la importación de InferenceClient desde huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +11 -6
app.py CHANGED
@@ -1,4 +1,5 @@
1
  from smolagents import CodeAgent, HfApiModel, load_tool, tool
 
2
  import datetime
3
  import requests
4
  import pytz
@@ -79,12 +80,16 @@ web_searcher = DuckDuckGoSearchTool(max_results=5)
79
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
80
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
81
 
82
- model = HfApiModel(
83
- max_tokens=2096,
84
- temperature=0.5,
85
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
86
- custom_role_conversions=None,
87
- token=HF_TOKEN
 
 
 
 
88
  )
89
 
90
 
 
1
  from smolagents import CodeAgent, HfApiModel, load_tool, tool
2
+ from huggingface_hub import InferenceClient
3
  import datetime
4
  import requests
5
  import pytz
 
80
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
81
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
82
 
83
+ # model = HfApiModel(
84
+ # max_tokens=2096,
85
+ # temperature=0.5,
86
+ # model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
87
+ # custom_role_conversions=None,
88
+ # token=HF_TOKEN
89
+ # )
90
+ model = InferenceClient(
91
+ model="Qwen/Qwen2.5-Coder-32B-Instruct",
92
+ api_key=HF_TOKEN
93
  )
94
 
95