Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline | |
# Load grammar correction model | |
model_name = "vennify/t5-base-grammar-correction" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer) | |
# Load explanation model | |
from transformers import T5ForConditionalGeneration, T5Tokenizer | |
explain_model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large") | |
explain_tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large") | |
def correct_text(text): | |
input_text = "gec: " + text | |
result = pipe(input_text, max_length=512, clean_up_tokenization_spaces=True) | |
return result[0]['generated_text'] | |
def explain_corrections(original, corrected): | |
prompt = f"""Original: {original} | |
Corrected: {corrected} | |
Explain the changes made, identify grammar or spelling issues, and give writing improvement tips.""" | |
inputs = explain_tokenizer(prompt, return_tensors="pt", truncation=True) | |
outputs = explain_model.generate(**inputs, max_length=512) | |
return explain_tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Streamlit UI | |
st.title("βοΈ English Writing Assistant") | |
user_input = st.text_area("Enter your sentence, paragraph, or essay:") | |
if st.button("Check & Improve"): | |
if user_input.strip() == "": | |
st.warning("Please enter some text.") | |
else: | |
corrected = correct_text(user_input) | |
explanation = explain_corrections(user_input, corrected) | |
st.subheader("β Corrected Text:") | |
st.write(corrected) | |
st.subheader("π Explanation & Suggestions:") | |
st.write(explanation) | |