LTP / app.py
sashdev's picture
Update app.py
97c782c verified
raw
history blame
819 Bytes
from transformers import T5Tokenizer, T5ForConditionalGeneration
import torch
# Load the T5 tokenizer and model
model_name = "t5-small" # You can use any T5 model available
tokenizer = T5Tokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name)
# Example function to use the model
def summarize(text):
# Tokenize the input text
inputs = tokenizer.encode("summarize: " + text, return_tensors="pt", max_length=512, truncation=True)
# Generate summary
outputs = model.generate(inputs, max_length=150, min_length=30, length_penalty=2.0, num_beams=4, early_stopping=True)
summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
return summary
# Example usage
text_to_summarize = "Your input text goes here."
print(summarize(text_to_summarize))