fix: Pass tokenizer to calculate_log_probabilities function to resolve error
Browse files
app.py
CHANGED
@@ -18,7 +18,7 @@ def process_input_text(input_text, tokenizer, device):
|
|
18 |
input_ids = inputs["input_ids"]
|
19 |
return inputs, input_ids
|
20 |
|
21 |
-
def calculate_log_probabilities(model, inputs, input_ids):
|
22 |
with torch.no_grad():
|
23 |
outputs = model(**inputs, labels=input_ids)
|
24 |
logits = outputs.logits[0, :-1, :]
|
@@ -54,7 +54,7 @@ def main():
|
|
54 |
input_text = "He asked me to prostrate myself before the king, but I rifused."
|
55 |
inputs, input_ids = process_input_text(input_text, tokenizer, device)
|
56 |
|
57 |
-
result = calculate_log_probabilities(model, inputs, input_ids)
|
58 |
|
59 |
words = split_into_words([token for token, _ in result], [logprob for _, logprob in result])
|
60 |
log_prob_threshold = -5.0
|
|
|
18 |
input_ids = inputs["input_ids"]
|
19 |
return inputs, input_ids
|
20 |
|
21 |
+
def calculate_log_probabilities(model, tokenizer, inputs, input_ids):
|
22 |
with torch.no_grad():
|
23 |
outputs = model(**inputs, labels=input_ids)
|
24 |
logits = outputs.logits[0, :-1, :]
|
|
|
54 |
input_text = "He asked me to prostrate myself before the king, but I rifused."
|
55 |
inputs, input_ids = process_input_text(input_text, tokenizer, device)
|
56 |
|
57 |
+
result = calculate_log_probabilities(model, tokenizer, inputs, input_ids)
|
58 |
|
59 |
words = split_into_words([token for token, _ in result], [logprob for _, logprob in result])
|
60 |
log_prob_threshold = -5.0
|