waniberry66's picture
uodate app.py
594c279 verified
import gradio as gr
import torch
from diffusers import StableDiffusionPipeline
import subprocess
# ติดตั้ง flash-attn เหมือนเดิม (แม้จะไม่ได้ใช้โดยตรง)
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
device = "cuda" if torch.cuda.is_available() else "cpu"
# โหลดโมเดล text-to-image
pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 if device=="cuda" else torch.float32)
pipe = pipe.to(device)
pipe.safety_checker = None # optional: remove safety checker for faster performance
def generate_image(prompt):
result = pipe(prompt)
image = result.images[0]
return image
io = gr.Interface(fn=generate_image,
inputs=[gr.Textbox(label="Enter your prompt")],
outputs=[gr.Image(label="Generated Image")],
theme="Yntec/HaleyCH_Theme_Orange",
description="⚠ Sorry for the inconvenience. The space is currently running on the CPU, which might affect performance. We appreciate your understanding."
)
io.launch(debug=True)