import gradio as gr from transformers import pipeline from diffusers import StableDiffusionPipeline import torch # 1. Load AI Models story_gen = pipeline("text-generation", model="OpenAssistant/reward-model-deberta-v3-large") image_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda") def generate(topic): if not topic.strip(): # Check if input is empty return "Please enter a valid topic (e.g., 'Ancient Rome' or 'Renewable Energy').", None try: # System Prompt system_prompt = ( "You are an expert educational assistant. Your task is to create a 100-word story that is: " "1. Engaging and age-appropriate for teenagers. " "2. Factually accurate and aligned with the topic. " "3. Free of inappropriate or harmful content. " "4. Written in a clear and simple style. " f"Write a story about: {topic}." ) # Generate Story story = story_gen(system_prompt, max_length=300)[0]['generated_text'] # Generate Image image_prompt = f"Educational illustration about: {story}" image = image_pipe(image_prompt).images[0] return story, image except Exception as e: return f"Oops! Something went wrong. Please try again with a different topic.", None # 3. Build the Web Interface with gr.Blocks() as app: gr.Markdown("# 📖 AI Story Generator for Schools") with gr.Row(): topic_input = gr.Textbox(label="Enter a Topic (e.g., 'Leonardo da Vinci' or 'Python Coding')") with gr.Row(): story_output = gr.Textbox(label="Generated Story") image_output = gr.Image(label="Illustration") topic_input.change(generate, inputs=topic_input, outputs=[story_output, image_output]) app.launch()