import streamlit as st from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import torch from PIL import Image import base64 st.set_page_config(page_title="Menstrual Health Chatbot 💬", layout="centered") # Load model @st.cache_resource def load_model(): tokenizer = AutoTokenizer.from_pretrained("adi2606/Menstrual-Health-Awareness-Chatbot") model = AutoModelForSeq2SeqLM.from_pretrained("adi2606/Menstrual-Health-Awareness-Chatbot").to("cpu") return tokenizer, model tokenizer, model = load_model() # Generate response def generate_response(input_text): inputs = tokenizer(input_text, return_tensors="pt") outputs = model.generate(**inputs, max_length=128) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response # Background image def set_background(image_path): with open(image_path, "rb") as f: encoded_string = base64.b64encode(f.read()).decode() st.markdown( f""" """, unsafe_allow_html=True ) set_background("background.jpg") banner = Image.open("banner.png") st.image(banner, width=100) st.markdown(""" """, unsafe_allow_html=True) # App content st.markdown("

🩸 Menstrual Health Awareness Chatbot 💬

", unsafe_allow_html=True) st.markdown("

Ask anything about periods, PMS, hygiene, and more!

", unsafe_allow_html=True) st.markdown("### 🤔 Your Question") user_input = st.text_input("", placeholder="E.g., What are the symptoms of PMS?") if st.button("🚀 Ask"): if user_input.strip(): with st.spinner("Generating a helpful response..."): response = generate_response(user_input) st.success("✅ Here's what I found:") st.markdown(f"**💬 Chatbot:** {response}") else: st.warning("⚠️ Please enter a question to get started.") st.markdown("---") st.markdown("Made with ❤️ to spread awareness.", unsafe_allow_html=True)