jayebaku commited on
Commit
0232ca4
·
verified ·
1 Parent(s): 4354e0b

Update classifier.py

Browse files
Files changed (1) hide show
  1. classifier.py +7 -7
classifier.py CHANGED
@@ -7,16 +7,16 @@ def classify(tweet, event_model, hftoken, threshold):
7
  results = {"text": None, "event": None, "score": None}
8
 
9
  # event type prediction with transformers pipeline
10
- # event_predictor = tpipeline(task="text-classification", model=event_model,
11
- # batch_size=512, token=hftoken, device="cpu")
12
- # tokenizer_kwargs = {'padding': True, 'truncation': True, 'max_length': 512}
13
- # prediction = event_predictor(tweet, **tokenizer_kwargs)[0]
14
 
15
 
16
  # with onnx pipeline
17
- onnx_classifier = opipeline("text-classification", model=event_model, accelerator="ort",
18
- batch_size=512, token=hftoken, device="cpu")
19
- prediction = onnx_classifier(tweet)[0]
20
 
21
 
22
  results["text"] = tweet
 
7
  results = {"text": None, "event": None, "score": None}
8
 
9
  # event type prediction with transformers pipeline
10
+ event_predictor = tpipeline(task="text-classification", model=event_model,
11
+ batch_size=512, token=hftoken, device="cpu")
12
+ tokenizer_kwargs = {'padding': True, 'truncation': True, 'max_length': 512}
13
+ prediction = event_predictor(tweet, **tokenizer_kwargs)[0]
14
 
15
 
16
  # with onnx pipeline
17
+ # onnx_classifier = opipeline("text-classification", model=event_model, accelerator="ort",
18
+ # batch_size=512, token=hftoken, device="cpu")
19
+ # prediction = onnx_classifier(tweet)[0]
20
 
21
 
22
  results["text"] = tweet