mike23415's picture
Update app.py
17c487a verified
raw
history blame
565 Bytes
import torch
from transformers import pipeline
# Force the model to use CPU explicitly
device = torch.device("cpu")
summarizer = pipeline("summarization", model="t5-base", device=-1) # -1 ensures CPU usage
text = "This is a long text that needs summarization."
# Dynamically adjust max_length based on input length
input_length = len(text.split()) # Approximate token count
max_length = min(50, int(input_length * 0.8)) # 80% of input length
summary = summarizer(text, max_length=max_length, min_length=5, do_sample=False)
print(summary[0]["summary_text"])