apsys commited on
Commit
a7b55ff
·
1 Parent(s): cdd1956

added restart

Browse files
Files changed (1) hide show
  1. src/submission/submit.py +24 -1
src/submission/submit.py CHANGED
@@ -8,6 +8,8 @@ import tempfile
8
  from datetime import datetime
9
  from typing import Dict, List, Tuple
10
  import shutil
 
 
11
 
12
  from huggingface_hub import HfApi
13
  from datasets import load_dataset
@@ -99,6 +101,24 @@ def submit_leaderboard_to_hub(entries: List[Dict], version="v0") -> Tuple[bool,
99
  return False, f"Error updating leaderboard: {e}"
100
 
101
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  def process_submission(file_path: str, metadata: Dict, version="v0") -> str:
103
  """
104
  Process a submission to the GuardBench leaderboard.
@@ -212,7 +232,10 @@ def process_submission(file_path: str, metadata: Dict, version="v0") -> str:
212
  success, message = submit_leaderboard_to_hub(all_entries, version)
213
  if not success:
214
  return styled_error(message)
215
- return styled_message(f"Submission successful! Model evaluated and leaderboard updated.")
 
 
 
216
 
217
  except Exception as eval_error:
218
  return styled_error(f"Error during evaluation: {eval_error}")
 
8
  from datetime import datetime
9
  from typing import Dict, List, Tuple
10
  import shutil
11
+ import threading
12
+ import time
13
 
14
  from huggingface_hub import HfApi
15
  from datasets import load_dataset
 
101
  return False, f"Error updating leaderboard: {e}"
102
 
103
 
104
+ def restart_space_after_delay(delay_seconds: int = 2) -> None:
105
+ """
106
+ Restart the Hugging Face Space after a delay.
107
+ """
108
+ def _restart_space():
109
+ time.sleep(delay_seconds)
110
+ try:
111
+ api = HfApi(token=TOKEN)
112
+ api.restart_space(repo_id=REPO_ID)
113
+ except Exception as e:
114
+ print(f"Error restarting space: {e}")
115
+
116
+ # Start the restart in a separate thread
117
+ thread = threading.Thread(target=_restart_space)
118
+ thread.daemon = True
119
+ thread.start()
120
+
121
+
122
  def process_submission(file_path: str, metadata: Dict, version="v0") -> str:
123
  """
124
  Process a submission to the GuardBench leaderboard.
 
232
  success, message = submit_leaderboard_to_hub(all_entries, version)
233
  if not success:
234
  return styled_error(message)
235
+
236
+ restart_space_after_delay(5)
237
+
238
+ return styled_message("Submission successful! Model evaluated and leaderboard updated.")
239
 
240
  except Exception as eval_error:
241
  return styled_error(f"Error during evaluation: {eval_error}")