File size: 819 Bytes
97c782c 19f79d5 9fc880b 97c782c 4146933 97c782c fdeaa3e 97c782c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 |
from transformers import T5Tokenizer, T5ForConditionalGeneration
import torch
# Load the T5 tokenizer and model
model_name = "t5-small" # You can use any T5 model available
tokenizer = T5Tokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name)
# Example function to use the model
def summarize(text):
# Tokenize the input text
inputs = tokenizer.encode("summarize: " + text, return_tensors="pt", max_length=512, truncation=True)
# Generate summary
outputs = model.generate(inputs, max_length=150, min_length=30, length_penalty=2.0, num_beams=4, early_stopping=True)
summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
return summary
# Example usage
text_to_summarize = "Your input text goes here."
print(summarize(text_to_summarize))
|