Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,79 +1,80 @@
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
-
|
3 |
-
from
|
4 |
-
import
|
5 |
-
import torch
|
6 |
|
7 |
-
#
|
8 |
-
|
9 |
-
def load_model():
|
10 |
-
model_name = "vennify/t5-base-grammar-correction"
|
11 |
-
tokenizer = T5Tokenizer.from_pretrained(model_name)
|
12 |
-
model = T5ForConditionalGeneration.from_pretrained(model_name)
|
13 |
-
return tokenizer, model
|
14 |
|
15 |
-
|
|
|
16 |
|
17 |
-
#
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
|
|
|
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
clean_word = re.sub(r'[^\w\s]', '', word)
|
26 |
-
if clean_word.isalpha():
|
27 |
-
corrected_word = spell.correction(clean_word.lower()) or clean_word
|
28 |
-
# Restore punctuation
|
29 |
-
trailing = ''.join(re.findall(r'[^\w\s]', word))
|
30 |
-
corrected_words.append(corrected_word + trailing)
|
31 |
-
else:
|
32 |
-
corrected_words.append(word)
|
33 |
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
-
#
|
37 |
-
|
38 |
-
|
39 |
-
input_ids = tokenizer.encode(input_text, return_tensors="pt", max_length=512, truncation=True)
|
40 |
-
outputs = model.generate(input_ids, max_length=512, num_beams=4, early_stopping=True)
|
41 |
-
corrected = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
42 |
-
return corrected
|
43 |
|
44 |
-
#
|
45 |
-
|
46 |
-
st.
|
47 |
-
st.write("Fixes your typos and grammar without changing your meaning.")
|
48 |
|
49 |
-
|
|
|
|
|
|
|
|
|
50 |
|
51 |
-
|
52 |
-
|
53 |
-
st.warning("Please enter a sentence.")
|
54 |
-
else:
|
55 |
-
# Step 1: Spelling correction
|
56 |
-
spelling_fixed = correct_spelling(user_input)
|
57 |
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
60 |
|
61 |
-
|
62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
-
|
65 |
-
|
66 |
-
**
|
67 |
-
{user_input}
|
68 |
|
69 |
-
|
70 |
-
|
|
|
|
|
71 |
|
72 |
-
|
73 |
-
|
|
|
|
|
74 |
|
75 |
-
|
76 |
-
|
77 |
-
- Grammar and punctuation (like capitalization) were fixed.
|
78 |
-
- Your original message and word choices were preserved.
|
79 |
-
""")
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
import streamlit as st
|
4 |
+
import google.generativeai as genai
|
5 |
+
from google.auth import default
|
6 |
+
from dotenv import load_dotenv
|
|
|
7 |
|
8 |
+
# β
Set page config FIRST
|
9 |
+
st.set_page_config(page_title="Grammar & Spelling Assistant", page_icon="π§ ")
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
+
# Load environment variables
|
12 |
+
load_dotenv()
|
13 |
|
14 |
+
# β
Use Application Default Credentials
|
15 |
+
if os.getenv("GOOGLE_APPLICATION_CREDENTIALS"):
|
16 |
+
credentials, _ = default()
|
17 |
+
genai.configure(credentials=credentials)
|
18 |
+
else:
|
19 |
+
st.error("β GOOGLE_APPLICATION_CREDENTIALS is not set. Please add it to .env or Hugging Face Secrets.")
|
20 |
+
st.stop()
|
21 |
|
22 |
+
# Initialize Gemini model
|
23 |
+
model = genai.GenerativeModel("gemini-2.0-pro")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
+
# Optional styling
|
26 |
+
st.markdown("""
|
27 |
+
<style>
|
28 |
+
body, .stApp { background-color: #121212 !important; color: #e0e0e0 !important; }
|
29 |
+
.stChatInput { background: #222 !important; border: 1px solid #555 !important; }
|
30 |
+
</style>
|
31 |
+
""", unsafe_allow_html=True)
|
32 |
|
33 |
+
# App header
|
34 |
+
st.title("π Grammar Guardian")
|
35 |
+
st.caption("Correct grammar, get explanations, and improve your writing!")
|
|
|
|
|
|
|
|
|
36 |
|
37 |
+
# Initialize chat history
|
38 |
+
if "history" not in st.session_state:
|
39 |
+
st.session_state.history = []
|
|
|
40 |
|
41 |
+
# Display chat history
|
42 |
+
for message in st.session_state.history:
|
43 |
+
role, content = message["role"], message["content"]
|
44 |
+
with st.chat_message(role):
|
45 |
+
st.markdown(content)
|
46 |
|
47 |
+
# Chat input
|
48 |
+
prompt = st.chat_input("Type a sentence you'd like to improve...")
|
|
|
|
|
|
|
|
|
49 |
|
50 |
+
if prompt:
|
51 |
+
# Show user message
|
52 |
+
with st.chat_message("user"):
|
53 |
+
st.markdown(prompt)
|
54 |
+
st.session_state.history.append({"role": "user", "content": prompt})
|
55 |
|
56 |
+
# Process input with Gemini
|
57 |
+
with st.spinner("Analyzing..."):
|
58 |
+
try:
|
59 |
+
full_prompt = f"""
|
60 |
+
You are a grammar correction assistant.
|
61 |
+
When a user gives a sentence, do two things:
|
62 |
+
1. Correct the sentence.
|
63 |
+
2. Explain clearly why you corrected it.
|
64 |
|
65 |
+
Respond using this format:
|
66 |
+
**Correction:** <Corrected sentence>
|
67 |
+
**Explanation:** <Explanation of why it was corrected>
|
|
|
68 |
|
69 |
+
Sentence: {prompt}
|
70 |
+
"""
|
71 |
+
response = model.generate_content(full_prompt)
|
72 |
+
result = response.text
|
73 |
|
74 |
+
# Show assistant message
|
75 |
+
with st.chat_message("assistant"):
|
76 |
+
st.markdown(result)
|
77 |
+
st.session_state.history.append({"role": "assistant", "content": result})
|
78 |
|
79 |
+
except Exception as e:
|
80 |
+
st.error(f"Error from Gemini API: {e}")
|
|
|
|
|
|