import streamlit as st | |
from transformers import pipeline | |
from gtts import gTTS | |
import os | |
# Load small public instruction model | |
def load_llm(): | |
return pipeline("text2text-generation", | |
model="google/flan-t5-small", | |
tokenizer="google/flan-t5-small") | |
llm = load_llm() | |
# Text-to-speech | |
def speak(text, filename="response.mp3"): | |
tts = gTTS(text) | |
tts.save(filename) | |
audio_file = open(filename, "rb") | |
st.audio(audio_bytes := audio_file.read(), format="audio/mp3") | |
os.remove(filename) | |
# Streamlit UI | |
st.set_page_config(page_title="AI Learning Buddy", page_icon="🧸") | |
st.title("🧸 AI Learning Buddy (Ages 4–7)") | |
st.markdown("Type a fun question and hear the AI Buddy reply!") | |
user_input = st.text_input("Ask your question:") | |
if st.button("Ask the Buddy") and user_input: | |
prompt = f"Explain to a 5-year-old: {user_input}" | |
result = llm(prompt)[0]["generated_text"] | |
st.markdown(f"**AI Buddy says:** {result}") | |
speak(result) | |