0r0b0r0s commited on
Commit
594a71c
·
verified ·
1 Parent(s): 40b70c8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -14
app.py CHANGED
@@ -3,7 +3,7 @@ import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
6
- from huggingface_hub import InferenceClient
7
 
8
 
9
  # (Keep Constants as is)
@@ -12,24 +12,32 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
12
 
13
  # --- Basic Agent Definition ---
14
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
 
 
 
15
  class BasicAgent:
16
  def __init__(self, hf_token: str):
17
- print("Advanced Agent initialized.")
18
- self.client = InferenceClient(token=hf_token)
19
-
20
- def __call__(self, question: str) -> str:
21
- print(f"Processing question: {question[:50]}...")
22
 
23
- response = self.client.text_generation(
24
- prompt=f"GAIA Question: {question}\nAnswer:",
25
- model="meta-llama/Meta-Llama-3-70B-Instruc",
26
- temperature=0.3,
27
- max_new_tokens=512,
28
- repetition_penalty=1.1
29
  )
30
 
31
- print(f"Generated answer: {response[:50]}...")
32
- return response
 
 
 
 
 
 
 
 
 
33
 
34
  def run_and_submit_all( profile: gr.OAuthProfile | None):
35
  """
 
3
  import requests
4
  import inspect
5
  import pandas as pd
6
+ from huggingface_hub import InferenceClient, login
7
 
8
 
9
  # (Keep Constants as is)
 
12
 
13
  # --- Basic Agent Definition ---
14
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
15
+ # --- GAIA-Optimized Agent Implementation ---
16
+ from huggingface_hub import InferenceClient, login
17
+
18
  class BasicAgent:
19
  def __init__(self, hf_token: str):
20
+ # Validate authentication
21
+ login(token=hf_token, add_to_git_credential=False)
 
 
 
22
 
23
+ # Initialize client with production endpoint
24
+ self.client = InferenceClient(
25
+ model="meta-llama/Meta-Llama-3-70B-Instruct",
26
+ token=hf_token,
27
+ timeout=45
 
28
  )
29
 
30
+ def __call__(self, question: str) -> str:
31
+ try:
32
+ response = self.client.text_generation(
33
+ prompt=f"GAIA Question: {question}\nAnswer:",
34
+ temperature=0.3,
35
+ max_new_tokens=512
36
+ )
37
+ return response.split("Answer:")[-1].strip()
38
+ except Exception as e:
39
+ return f"Model Error: {str(e)}"
40
+
41
 
42
  def run_and_submit_all( profile: gr.OAuthProfile | None):
43
  """