Spaces:
Sleeping
Sleeping
Revertir la inicialización del modelo a HfApiModel y eliminar la importación de InferenceClient
Browse files
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
from smolagents import CodeAgent, HfApiModel, load_tool, tool
|
2 |
-
from huggingface_hub import InferenceClient
|
3 |
import datetime
|
4 |
import requests
|
5 |
import pytz
|
@@ -80,16 +79,12 @@ web_searcher = DuckDuckGoSearchTool(max_results=5)
|
|
80 |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
81 |
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
|
82 |
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
# )
|
90 |
-
model = InferenceClient(
|
91 |
-
model="Qwen/Qwen2.5-Coder-32B-Instruct",
|
92 |
-
api_key=HF_TOKEN
|
93 |
)
|
94 |
|
95 |
|
|
|
1 |
from smolagents import CodeAgent, HfApiModel, load_tool, tool
|
|
|
2 |
import datetime
|
3 |
import requests
|
4 |
import pytz
|
|
|
79 |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
80 |
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
|
81 |
|
82 |
+
model = HfApiModel(
|
83 |
+
max_tokens=2096,
|
84 |
+
temperature=0.5,
|
85 |
+
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
86 |
+
custom_role_conversions=None,
|
87 |
+
token=HF_TOKEN
|
|
|
|
|
|
|
|
|
88 |
)
|
89 |
|
90 |
|