Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ print(dataset)
|
|
13 |
tokenizer = AutoTokenizer.from_pretrained("allenai/scibert_scivocab_uncased")
|
14 |
|
15 |
def tokenize_function(examples):
|
16 |
-
return tokenizer(examples["abstract"], padding="max_length", truncation=True)
|
17 |
|
18 |
dataset = dataset.map(tokenize_function, batched=True)
|
19 |
|
@@ -23,7 +23,7 @@ model = AutoModelForSequenceClassification.from_pretrained("allenai/scibert_sciv
|
|
23 |
# Schritt 4: Trainingsparameter setzen
|
24 |
training_args = TrainingArguments(
|
25 |
output_dir="./results",
|
26 |
-
|
27 |
per_device_train_batch_size=8,
|
28 |
per_device_eval_batch_size=8,
|
29 |
num_train_epochs=3,
|
@@ -48,7 +48,7 @@ tokenizer.save_pretrained("./trained_model")
|
|
48 |
|
49 |
# Schritt 7: Modell für Gradio bereitstellen
|
50 |
def predict(text):
|
51 |
-
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length", max_length=
|
52 |
with torch.no_grad():
|
53 |
outputs = model(**inputs)
|
54 |
logits = outputs.logits
|
|
|
13 |
tokenizer = AutoTokenizer.from_pretrained("allenai/scibert_scivocab_uncased")
|
14 |
|
15 |
def tokenize_function(examples):
|
16 |
+
return tokenizer(examples["abstract"], padding="max_length", truncation=True, max_length=151)
|
17 |
|
18 |
dataset = dataset.map(tokenize_function, batched=True)
|
19 |
|
|
|
23 |
# Schritt 4: Trainingsparameter setzen
|
24 |
training_args = TrainingArguments(
|
25 |
output_dir="./results",
|
26 |
+
eval_strategy="epoch",
|
27 |
per_device_train_batch_size=8,
|
28 |
per_device_eval_batch_size=8,
|
29 |
num_train_epochs=3,
|
|
|
48 |
|
49 |
# Schritt 7: Modell für Gradio bereitstellen
|
50 |
def predict(text):
|
51 |
+
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length", max_length=151)
|
52 |
with torch.no_grad():
|
53 |
outputs = model(**inputs)
|
54 |
logits = outputs.logits
|