meg HF Staff commited on
Commit
c83b217
·
verified ·
1 Parent(s): 92305c2

Update entrypoint.sh

Browse files
Files changed (1) hide show
  1. entrypoint.sh +9 -10
entrypoint.sh CHANGED
@@ -2,30 +2,29 @@
2
 
3
  config_dir="/optimum-benchmark/examples/energy_star/"
4
 
5
- # This script is meant to be called from a python script \
6
- # that provides the REPO_ID as the first argument.
7
  REPO_ID=$1
8
  MODEL=$2
9
  TASK=$3
10
  HARDWARE=$4
 
 
11
  echo "Attempting to run."
12
  # Read the name of the model and the experiment.
13
  echo "Benchmarking Model: ${MODEL}, Task: ${TASK}, Hardware: ${HARDWARE}"
14
 
15
  # Initialize the directory for output.
16
- now=$(date +%Y-%m-%d-%H-%M-%S)
17
- run_dir="/app/runs/${TASK}/${MODEL}/${HARDWARE}/${now}"
18
- mkdir -p "$run_dir"
19
  # Save the task/model run directory to text file, for tracking purposes.
20
- echo "${run_dir}" >> attempts.txt
21
 
22
  { # try
23
  # Let the benchmarking begin!
24
- optimum-benchmark --config-name "${TASK}" --config-dir="${config_dir}" backend.model="${MODEL}" backend.processor="${MODEL}" hydra.run.dir="${run_dir}" 2> "${run_dir}/error.log"
25
  } || { # catch
26
  echo "Did not benchmark."
27
- echo "${run_dir}" >> failed_attempts.txt
28
  }
29
 
30
- echo "Finished"# updating requests dataset and results dataset."
31
- #python /process_runs.py
 
2
 
3
  config_dir="/optimum-benchmark/examples/energy_star/"
4
 
5
+ # This script is meant to be called from a python script (app.py) \
6
+ # that provides the REPO_ID as the first argument, etc.
7
  REPO_ID=$1
8
  MODEL=$2
9
  TASK=$3
10
  HARDWARE=$4
11
+ RUN_DIR=$5
12
+
13
  echo "Attempting to run."
14
  # Read the name of the model and the experiment.
15
  echo "Benchmarking Model: ${MODEL}, Task: ${TASK}, Hardware: ${HARDWARE}"
16
 
17
  # Initialize the directory for output.
18
+ mkdir -p "${RUN_DIR}"
 
 
19
  # Save the task/model run directory to text file, for tracking purposes.
20
+ echo "${RUN_DIR}" >> attempts.txt
21
 
22
  { # try
23
  # Let the benchmarking begin!
24
+ optimum-benchmark --config-name "${TASK}" --config-dir="${config_dir}" backend.model="${MODEL}" backend.processor="${MODEL}" hydra.run.dir="${RUN_DIR}" 2> "${RUN_DIR}/error.log"
25
  } || { # catch
26
  echo "Did not benchmark."
27
+ echo "${RUN_DIR}" >> failed_attempts.txt
28
  }
29
 
30
+ echo "Finished"