import gradio as gr import torch from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler from PIL import Image import time # Load CPU-optimized model model_id = "OFA-Sys/small-stable-diffusion-v0" # Smaller model for CPU pipe = StableDiffusionPipeline.from_pretrained( model_id, torch_dtype=torch.float32 # Force float32 for CPU ) # Use DPMSolver for better CPU performance pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe = pipe.to("cpu") def generate_image(text): if not text: return None, "Please enter some text first!" start_time = time.time() try: # Generate with reduced steps for faster processing image = pipe( text, num_inference_steps=20, # Reduced from typical 50 steps guidance_scale=7.5 ).images[0] if image.mode != "RGB": image = image.convert("RGB") gen_time = time.time() - start_time return image, f"Generated in {gen_time:.1f} seconds" except Exception as e: return None, f"Error: {str(e)}" # Create Gradio interface with loading states with gr.Blocks(title="CPU Poetry to Image") as demo: gr.Markdown("# 💖 CPU-Friendly Poetry to Image") gr.Markdown("Note: Generation may take 2-5 minutes on CPU") with gr.Row(): with gr.Column(): input_text = gr.Textbox( label="Your Romantic Text", placeholder="e.g., 'Your eyes sparkle like stars'", lines=3 ) generate_btn = gr.Button("Create Magic ✨") with gr.Column(): output_image = gr.Image(label="Your Generated Art") time_info = gr.Textbox(label="Generation Time") examples = gr.Examples( examples=[ ["A moonlit beach with heart-shaped waves"], ["Two roses intertwined with golden light"], ["A love letter floating in the clouds"] ], inputs=[input_text] ) generate_btn.click( fn=generate_image, inputs=[input_text], outputs=[output_image, time_info], api_name="generate" ) if __name__ == "__main__": demo.launch()