Zen0 commited on
Commit
f1edb98
·
verified ·
1 Parent(s): 9685f7b

Update tasks/text.py

Browse files
Files changed (1) hide show
  1. tasks/text.py +38 -3
tasks/text.py CHANGED
@@ -56,9 +56,44 @@ async def evaluate_text(request: TextEvaluationRequest):
56
  # Update the code below to replace the random baseline by your model inference within the inference pass where the energy consumption and emissions are tracked.
57
  #--------------------------------------------------------------------------------------------
58
 
59
- # Make random predictions (placeholder for actual model inference)
60
- true_labels = test_dataset["label"]
61
- predictions = [random.randint(0, 7) for _ in range(len(true_labels))]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  #--------------------------------------------------------------------------------------------
64
  # YOUR MODEL INFERENCE STOPS HERE
 
56
  # Update the code below to replace the random baseline by your model inference within the inference pass where the energy consumption and emissions are tracked.
57
  #--------------------------------------------------------------------------------------------
58
 
59
+ #--------------------------------------------------------------------------------------------
60
+ # Load your model and tokenizer from Hugging Face
61
+ #--------------------------------------------------------------------------------------------
62
+
63
+ model_name = "Zen0/FrugalDisinfoHunter" # Model identifier from Hugging Face
64
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
65
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
66
+
67
+ #--------------------------------------------------------------------------------------------
68
+ # Load the dataset
69
+ #--------------------------------------------------------------------------------------------
70
+
71
+ # Assuming 'quotaclimat/frugalaichallenge-text-train' is the dataset you are working with
72
+ dataset = load_dataset("quotaclimat/frugalaichallenge-text-train")
73
+
74
+ # Access the test dataset (you can change this if you want to use a different split)
75
+ test_dataset = dataset['test'] # Assuming you have a 'test' split available
76
+
77
+ #--------------------------------------------------------------------------------------------
78
+ # Tokenize the text data
79
+ #--------------------------------------------------------------------------------------------
80
+
81
+ # Tokenize the test data (the text field contains the quotes)
82
+ test_texts = test_dataset["text"] # The field 'text' contains the climate quotes
83
+
84
+ inputs = tokenizer(test_texts, padding=True, truncation=True, return_tensors="pt", max_length=512)
85
+
86
+ #--------------------------------------------------------------------------------------------
87
+ # Inference
88
+ #--------------------------------------------------------------------------------------------
89
+
90
+ # Run inference on the dataset using the model
91
+ with torch.no_grad(): # Disable gradient calculations
92
+ outputs = model(**inputs)
93
+ logits = outputs.logits
94
+
95
+ # Get predictions from the logits (choose the class with the highest logit)
96
+ predictions = torch.argmax(logits, dim=-1).cpu().numpy() # Convert to numpy array for use
97
 
98
  #--------------------------------------------------------------------------------------------
99
  # YOUR MODEL INFERENCE STOPS HERE