import streamlit as st | |
from transformers import pipeline | |
from gtts import gTTS | |
import os | |
# Load a public, PyTorch-compatible, conversational model | |
def load_llm(): | |
return pipeline("text-generation", | |
model="declare-lab/flan-alpaca-base", | |
tokenizer="declare-lab/flan-alpaca-base", | |
max_new_tokens=100, | |
do_sample=True, | |
temperature=0.7) | |
llm = load_llm() | |
# Text-to-speech function | |
def speak(text, filename="response.mp3"): | |
tts = gTTS(text) | |
tts.save(filename) | |
audio_file = open(filename, "rb") | |
st.audio(audio_file.read(), format="audio/mp3") | |
os.remove(filename) | |
# Streamlit UI | |
st.set_page_config(page_title="AI Learning Buddy", page_icon="🧸") | |
st.title("🧸 AI Learning Buddy (Ages 4–7)") | |
st.markdown("Ask anything fun or educational and hear your buddy talk!") | |
user_input = st.text_input("What would you like to ask?") | |
if st.button("Ask the Buddy") and user_input: | |
prompt = f"Explain to a 5-year-old: {user_input}" | |
result = llm(prompt)[0]["generated_text"] | |
answer = result.split(":")[-1].strip() | |
st.markdown(f"**AI Buddy says:** {answer}") | |
speak(answer) | |