File size: 1,257 Bytes
1902db3 a715ccc 4c190fa 9becbf3 a715ccc 4c190fa edebd7a a715ccc 4c190fa a715ccc 4c190fa a715ccc c4f1406 a715ccc 9becbf3 4c190fa a715ccc 4c190fa a715ccc c4f1406 a715ccc 4c190fa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import streamlit as st
from transformers import pipeline
from gtts import gTTS
import os
# Load a public, PyTorch-compatible, conversational model
@st.cache_resource(show_spinner="Loading AI Buddy...")
def load_llm():
return pipeline("text-generation",
model="declare-lab/flan-alpaca-base",
tokenizer="declare-lab/flan-alpaca-base",
max_new_tokens=100,
do_sample=True,
temperature=0.7)
llm = load_llm()
# Text-to-speech function
def speak(text, filename="response.mp3"):
tts = gTTS(text)
tts.save(filename)
audio_file = open(filename, "rb")
st.audio(audio_file.read(), format="audio/mp3")
os.remove(filename)
# Streamlit UI
st.set_page_config(page_title="AI Learning Buddy", page_icon="🧸")
st.title("🧸 AI Learning Buddy (Ages 4–7)")
st.markdown("Ask anything fun or educational and hear your buddy talk!")
user_input = st.text_input("What would you like to ask?")
if st.button("Ask the Buddy") and user_input:
prompt = f"Explain to a 5-year-old: {user_input}"
result = llm(prompt)[0]["generated_text"]
answer = result.split(":")[-1].strip()
st.markdown(f"**AI Buddy says:** {answer}")
speak(answer)
|