import streamlit as st from transformers import AutoTokenizer, AutoModelForSeq2SeqLM # Load the paraphrasing model and tokenizer (using caching to avoid reloading) @st.cache_resource def load_model(): # Using a T5-based paraphrasing model available on Hugging Face model_name = "Vamsi/T5_Paraphrase_Paws" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) return model, tokenizer st.title("📝 Paraphrasing Tool") st.write("Enter text below to receive a paraphrased version using AI.") # Text input area input_text = st.text_area("Input Text") # Paraphrase button if st.button("Paraphrase"): if input_text.strip(): model, tokenizer = load_model() # For T5-based paraphrasing, format the input with a prompt text = "paraphrase: " + input_text + " " encoding = tokenizer.encode_plus( text, padding="max_length", max_length=256, return_tensors="pt", truncation=True ) input_ids = encoding["input_ids"] attention_mask = encoding["attention_mask"] # Generate paraphrased output outputs = model.generate( input_ids=input_ids, attention_mask=attention_mask, max_length=256, num_beams=5, num_return_sequences=1, do_sample=True ) paraphrased_text = tokenizer.decode(outputs[0], skip_special_tokens=True) st.subheader("Paraphrased Text:") st.write(paraphrased_text) else: st.warning("Please enter some text to paraphrase.")