ksatzke commited on
Commit
256b96b
·
verified ·
1 Parent(s): 50d8dcb

Update compute_model_property.py

Browse files
Files changed (1) hide show
  1. compute_model_property.py +15 -10
compute_model_property.py CHANGED
@@ -16,6 +16,8 @@ import numpy as np
16
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
17
  from evaluate import load
18
 
 
 
19
 
20
  # 1. record each file name included
21
  # 1.1 read different file formats depending on parameters (i.e., filetype)
@@ -267,8 +269,9 @@ def compute_model_card_evaluation_results(tokenizer, model_checkpoint, raw_datas
267
  result = trainer.evaluate()
268
  return result
269
 
270
-
271
- if __name__ == "__main__":
 
272
 
273
  in_container = True
274
  if len(sys.argv) > 1:
@@ -284,16 +287,18 @@ if __name__ == "__main__":
284
 
285
  print(model_checkpoint, dataset_name, metric)
286
 
287
-
288
  model_checkpoint = model_checkpoint
289
  raw_datasets = load_dataset(dataset_name, "mrpc")
290
  metric = load("glue", "mrpc")
291
  tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
292
  output = compute_model_card_evaluation_results(tokenizer, model_checkpoint, raw_datasets, metric)
293
- print(json.dumps(output))
294
-
295
- if in_container:
296
- with open("/tmp/outputs/computation_result.json", "w") as f:
297
- json.dump(output, f, indent=4, sort_keys=True)
298
- else:
299
- print(json.dumps(output, indent=4, sort_keys=True))
 
 
 
 
16
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
17
  from evaluate import load
18
 
19
+ from fastapi import FastAPI
20
+ app = FastAPI()
21
 
22
  # 1. record each file name included
23
  # 1.1 read different file formats depending on parameters (i.e., filetype)
 
269
  result = trainer.evaluate()
270
  return result
271
 
272
+ #if __name__ == "__main__":
273
+ @app.get("/")
274
+ def return_output():
275
 
276
  in_container = True
277
  if len(sys.argv) > 1:
 
287
 
288
  print(model_checkpoint, dataset_name, metric)
289
 
 
290
  model_checkpoint = model_checkpoint
291
  raw_datasets = load_dataset(dataset_name, "mrpc")
292
  metric = load("glue", "mrpc")
293
  tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
294
  output = compute_model_card_evaluation_results(tokenizer, model_checkpoint, raw_datasets, metric)
295
+
296
+ #print(json.dumps(output))
297
+
298
+ #if in_container:
299
+ # with open("/tmp/outputs/computation_result.json", "w") as f:
300
+ # json.dump(output, f, indent=4, sort_keys=True)
301
+ #else:
302
+ # print(json.dumps(output, indent=4, sort_keys=True))
303
+
304
+ return output