File size: 565 Bytes
17c487a
7507a36
524f780
17c487a
 
 
 
e116825
0116c46
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
import torch
from transformers import pipeline

# Force the model to use CPU explicitly
device = torch.device("cpu")

summarizer = pipeline("summarization", model="t5-base", device=-1)  # -1 ensures CPU usage

text = "This is a long text that needs summarization."

# Dynamically adjust max_length based on input length
input_length = len(text.split())  # Approximate token count
max_length = min(50, int(input_length * 0.8))  # 80% of input length

summary = summarizer(text, max_length=max_length, min_length=5, do_sample=False)
print(summary[0]["summary_text"])