Spaces:
Runtime error
Runtime error
File size: 2,684 Bytes
ac7bed8 3e1e198 ac7bed8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
# %% [markdown]
# # 🖼️ Tiny Stable Diffusion (CPU Version)
# **0.9GB Model | No GPU Required**
# %% [markdown]
# ## 1. Install Requirements
pip install torch
import torch
from diffusers import StableDiffusionPipeline
from huggingface_hub import snapshot_download
from PIL import Image
import gradio as gr
import os
# Force CPU mode
torch.backends.quantized.engine = 'qnnpack' # ARM optimization
device = torch.device("cpu")
# %% [markdown]
# ## 2. Download Model (0.9GB)
model_path = "./tiny_model"
os.makedirs(model_path, exist_ok=True)
# Download with progress bar
print("Downloading model... (this may take a few minutes)")
snapshot_download(
repo_id="nota-ai/bk-sdm-tiny",
local_dir=model_path,
ignore_patterns=["*.bin", "*.fp16*", "*.onnx"],
local_dir_use_symlinks=False
)
# Verify download
if not os.listdir(model_path):
raise ValueError("Model failed to download! Check internet connection")
else:
print("✔ Model downloaded successfully")
# %% [markdown]
# ## 3. Load Optimized Pipeline
print("Loading model...")
pipe = StableDiffusionPipeline.from_pretrained(
model_path,
torch_dtype=torch.float32,
safety_checker=None,
requires_safety_checker=False
).to(device)
# Memory optimizations
pipe.enable_attention_slicing()
pipe.unet = torch.compile(pipe.unet) # Compile for faster inference
# %% [markdown]
# ## 4. Generation Function
def generate_image(prompt, steps=15, seed=42):
generator = torch.Generator(device).manual_seed(seed)
print(f"Generating: {prompt}")
image = pipe(
prompt,
num_inference_steps=steps,
guidance_scale=7.0,
generator=generator,
width=256,
height=256
).images[0]
return image
# %% [markdown]
# ## 5. Gradio Interface
with gr.Blocks(title="Tiny Diffusion (CPU)", css="footer {visibility: hidden}") as demo:
gr.Markdown("## 🎨 CPU Image Generator (0.9GB Model)")
with gr.Row():
prompt = gr.Textbox(label="Prompt",
value="a cute robot wearing a hat",
placeholder="Describe your image...")
with gr.Row():
steps = gr.Slider(5, 25, value=15, label="Steps")
seed = gr.Number(42, label="Seed")
with gr.Row():
generate_btn = gr.Button("Generate", variant="primary")
with gr.Row():
output = gr.Image(label="Output", width=256, height=256)
generate_btn.click(
fn=generate_image,
inputs=[prompt, steps, seed],
outputs=output
)
# %% [markdown]
# ## 6. Launch App
print("Starting interface...")
demo.launch(
server_name="0.0.0.0",
server_port=7860,
show_error=True
) |