Spaces:
Runtime error
Runtime error
File size: 1,146 Bytes
48056a7 1087492 48056a7 1087492 48056a7 1087492 48056a7 1087492 48056a7 1087492 48056a7 1087492 48056a7 1087492 48056a7 1087492 48056a7 1087492 48056a7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import os
from flask import Flask, request, jsonify, send_file
from diffusers import DiffusionPipeline
import torch
from PIL import Image
import io
# Set Hugging Face cache directory to a writable path
os.environ['HF_HOME'] = '/home/user/.cache/huggingface'
app = Flask(__name__)
# Load the model on CPU
pipe = DiffusionPipeline.from_pretrained(
"sudo-ai/zero123plus-v1.2",
torch_dtype=torch.float32
)
pipe.to("cpu")
@app.route("/", methods=["GET"])
def index():
return jsonify({"message": "Zero123Plus API is running."})
@app.route("/generate", methods=["POST"])
def generate():
if 'image' not in request.files:
return jsonify({"error": "No image uploaded"}), 400
image_file = request.files['image']
input_image = Image.open(image_file).convert("RGB")
# Generate new views
result = pipe(image=input_image, num_inference_steps=25)
output_image = result.images[0]
# Save to a BytesIO object
img_io = io.BytesIO()
output_image.save(img_io, 'PNG')
img_io.seek(0)
return send_file(img_io, mimetype='image/png')
if __name__ == "__main__":
app.run(host="0.0.0.0", port=7860) |