0r0b0r0s commited on
Commit
3378ba7
·
verified ·
1 Parent(s): 49bf09e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -33
app.py CHANGED
@@ -18,51 +18,33 @@ from huggingface_hub import InferenceClient, login
18
 
19
  class BasicAgent:
20
  def __init__(self):
21
- # Initialize with GAIA-proven Qwen model
 
22
  self.client = InferenceClient(
23
- model="Qwen/Qwen2-72B-Instruct",
24
  token=os.environ["HF_TOKEN"],
25
  timeout=120
26
  )
27
 
28
- # Verify model access
29
- test_response = self._call_model("2+2=")
30
- if not test_response.startswith("4"):
31
- raise RuntimeError("Model initialization failed")
32
-
33
- def _call_model(self, question: str) -> str:
34
- """Core model call with GAIA-optimized prompt"""
35
- prompt = f"""<|im_start|>system
36
- Answer with ONLY the exact value requested, no explanations. Follow GAIA format strictly.<|im_end|>
37
  <|im_start|>user
38
  {question}<|im_end|>
39
  <|im_start|>assistant
40
  """
41
- return self.client.text_generation(
42
- prompt=prompt,
43
- temperature=0.05,
44
- max_new_tokens=100,
45
- stop_sequences=["<|im_end|>"],
46
- repetition_penalty=1.1
47
- )
48
-
49
- def __call__(self, question: str) -> str:
50
- try:
51
- # Get raw model response
52
- raw_response = self._call_model(question)
53
-
54
- # Strict GAIA-compliant extraction
55
- answer = raw_response.split("<|im_start|>assistant")[-1]
56
- answer = answer.split("<|im_end|>")[0].strip()
57
-
58
- # Normalization for exact matching
59
- return re.sub(r'[^a-zA-Z0-9, ]', '', answer).lower()
60
- except Exception as e:
61
- print(f"Error: {str(e)}")
62
  return ""
63
 
64
 
65
-
66
  def run_and_submit_all( profile: gr.OAuthProfile | None):
67
  """
68
  Fetches all questions, runs the BasicAgent on them, submits all answers,
 
18
 
19
  class BasicAgent:
20
  def __init__(self):
21
+ login(token=os.environ["HF_TOKEN"]) # Required for gated access
22
+
23
  self.client = InferenceClient(
24
+ model="Qwen/Qwen2-7B-Instruct",
25
  token=os.environ["HF_TOKEN"],
26
  timeout=120
27
  )
28
 
29
+ def __call__(self, question: str) -> str:
30
+ try:
31
+ prompt = f"""<|im_start|>system
32
+ Answer with ONLY the exact value requested.<|im_end|>
 
 
 
 
 
33
  <|im_start|>user
34
  {question}<|im_end|>
35
  <|im_start|>assistant
36
  """
37
+ response = self.client.text_generation(
38
+ prompt=prompt,
39
+ temperature=0.01,
40
+ max_new_tokens=50,
41
+ stop_sequences=["<|im_end|>"]
42
+ )
43
+ return response.split("<|im_start|>assistant")[-1].split("<|im_end|>")[0].strip()
44
+ except Exception:
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  return ""
46
 
47
 
 
48
  def run_and_submit_all( profile: gr.OAuthProfile | None):
49
  """
50
  Fetches all questions, runs the BasicAgent on them, submits all answers,