Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -18,35 +18,45 @@ from huggingface_hub import InferenceClient, login
|
|
18 |
|
19 |
class BasicAgent:
|
20 |
def __init__(self):
|
21 |
-
# Required
|
22 |
-
login(token=os.environ["HF_TOKEN"])
|
23 |
|
|
|
24 |
self.client = InferenceClient(
|
25 |
-
model="Qwen/Qwen2-
|
26 |
token=os.environ["HF_TOKEN"],
|
27 |
-
timeout=
|
28 |
)
|
29 |
|
30 |
# Verify model access
|
31 |
-
test_response = self.
|
32 |
if "4" not in test_response:
|
33 |
-
raise RuntimeError(
|
34 |
|
35 |
-
def
|
36 |
-
|
37 |
-
|
38 |
-
Answer with ONLY the exact value requested.<|im_end|>
|
39 |
<|im_start|>user
|
40 |
{question}<|im_end|>
|
41 |
<|im_start|>assistant
|
42 |
"""
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
except Exception as e:
|
51 |
print(f"Error: {str(e)}")
|
52 |
return ""
|
|
|
18 |
|
19 |
class BasicAgent:
|
20 |
def __init__(self):
|
21 |
+
login(token=os.environ["HF_TOKEN"]) # Required authentication
|
|
|
22 |
|
23 |
+
# Primary model (7B quantized)
|
24 |
self.client = InferenceClient(
|
25 |
+
model="Qwen/Qwen2-0.5B-Instruct", # 1.2GB, free-tier compatible
|
26 |
token=os.environ["HF_TOKEN"],
|
27 |
+
timeout=60
|
28 |
)
|
29 |
|
30 |
# Verify model access
|
31 |
+
test_response = self._call_model("2+2=")
|
32 |
if "4" not in test_response:
|
33 |
+
raise RuntimeError("Model initialization failed")
|
34 |
|
35 |
+
def _call_model(self, question: str) -> str:
|
36 |
+
"""Optimized prompt engineering for GAIA"""
|
37 |
+
prompt = f"""<|im_start|>system
|
38 |
+
Answer with ONLY the exact value requested. No explanations.<|im_end|>
|
39 |
<|im_start|>user
|
40 |
{question}<|im_end|>
|
41 |
<|im_start|>assistant
|
42 |
"""
|
43 |
+
return self.client.text_generation(
|
44 |
+
prompt=prompt,
|
45 |
+
temperature=0.01,
|
46 |
+
max_new_tokens=50,
|
47 |
+
stop_sequences=["<|im_end|>"]
|
48 |
+
)
|
49 |
+
|
50 |
+
def __call__(self, question: str) -> str:
|
51 |
+
try:
|
52 |
+
raw_response = self._call_model(question)
|
53 |
+
|
54 |
+
# Robust answer extraction
|
55 |
+
answer = raw_response.split("<|im_start|>assistant")[-1]
|
56 |
+
answer = answer.split("<|im_end|>")[0].strip()
|
57 |
+
|
58 |
+
# GAIA-compliant normalization
|
59 |
+
return re.sub(r'[^a-zA-Z0-9]', '', answer).lower()
|
60 |
except Exception as e:
|
61 |
print(f"Error: {str(e)}")
|
62 |
return ""
|