Spaces:
Runtime error
Runtime error
import io | |
import base64 | |
import torch | |
from flask import Flask, request, jsonify, send_file | |
from diffusers import DiffusionPipeline | |
from PIL import Image | |
import logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
app = Flask(__name__) | |
# Load the model once at startup (on CPU) | |
try: | |
logger.info("Loading Zero123Plus pipeline...") | |
pipe = DiffusionPipeline.from_pretrained( | |
"sudo-ai/zero123plus-v1.2", | |
torch_dtype=torch.float32, # CPU needs float32 | |
) | |
pipe.to("cpu") | |
logger.info("=== Application Startup at CPU mode =====") | |
except Exception as e: | |
logger.error(f"Error loading model: {e}") | |
pipe = None | |
def pil_to_base64(image): | |
buffer = io.BytesIO() | |
image.save(buffer, format="PNG") | |
return base64.b64encode(buffer.getvalue()).decode("utf-8") | |
def home(): | |
return "Zero123Plus CPU API is running!" | |
def generate(): | |
if pipe is None: | |
return jsonify({"error": "Model not loaded"}), 500 | |
try: | |
data = request.get_json() | |
image_data = data.get("image") | |
if not image_data: | |
return jsonify({"error": "No image provided"}), 400 | |
if image_data.startswith("data:image"): | |
image_data = image_data.split(",")[1] | |
image = Image.open(io.BytesIO(base64.b64decode(image_data))).convert("RGB") | |
result = pipe(image) | |
output_image = result.images[0] | |
return jsonify({"image": f"data:image/png;base64,{pil_to_base64(output_image)}"}) | |
except Exception as e: | |
logger.error(f"Error generating image: {e}") | |
return jsonify({"error": str(e)}), 500 | |
if __name__ == "__main__": | |
app.run(host="0.0.0.0", port=7860) |