Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,6 @@
|
|
1 |
import os
|
2 |
import logging
|
3 |
from flask import Flask, request, jsonify, send_file
|
4 |
-
from diffusers.pipelines import DiffusionPipeline
|
5 |
-
try:
|
6 |
-
from diffusers.pipelines.zero123plus.pipeline_zero123plus import Zero123PlusPipeline
|
7 |
-
except ImportError:
|
8 |
-
print("Zero123PlusPipeline not found in current diffusers version")
|
9 |
import torch
|
10 |
from PIL import Image
|
11 |
import io
|
@@ -19,7 +14,6 @@ logging.basicConfig(
|
|
19 |
logger = logging.getLogger(__name__)
|
20 |
|
21 |
# Set Hugging Face cache directory to a writable path
|
22 |
-
# Make sure to set this BEFORE importing or initializing any models
|
23 |
os.environ['HF_HOME'] = '/tmp/hf_home'
|
24 |
os.environ['XDG_CACHE_HOME'] = '/tmp/cache'
|
25 |
|
@@ -36,19 +30,28 @@ def load_model():
|
|
36 |
global pipe
|
37 |
try:
|
38 |
logger.info("Loading Zero123Plus model...")
|
39 |
-
#
|
40 |
-
|
|
|
|
|
|
|
41 |
"sudo-ai/zero123plus-v1.2",
|
42 |
torch_dtype=torch.float32,
|
43 |
cache_dir="/tmp/diffusers_cache",
|
44 |
-
|
45 |
-
|
46 |
)
|
47 |
pipe.to("cpu")
|
48 |
logger.info("Model loaded successfully")
|
|
|
49 |
except Exception as e:
|
50 |
logger.error(f"Error loading model: {str(e)}")
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
# Load the model immediately
|
54 |
load_model()
|
@@ -65,10 +68,8 @@ def generate():
|
|
65 |
global pipe
|
66 |
# Check if model is loaded
|
67 |
if pipe is None:
|
68 |
-
|
69 |
-
|
70 |
-
except Exception as e:
|
71 |
-
logger.error(f"Failed to load model: {str(e)}")
|
72 |
return jsonify({"error": "Failed to initialize model"}), 500
|
73 |
|
74 |
if 'image' not in request.files:
|
@@ -84,7 +85,11 @@ def generate():
|
|
84 |
|
85 |
logger.info(f"Starting image generation with {num_steps} steps")
|
86 |
# Generate new views
|
87 |
-
result = pipe(
|
|
|
|
|
|
|
|
|
88 |
output_image = result.images[0]
|
89 |
logger.info(f"Generated image of size {output_image.size}")
|
90 |
|
|
|
1 |
import os
|
2 |
import logging
|
3 |
from flask import Flask, request, jsonify, send_file
|
|
|
|
|
|
|
|
|
|
|
4 |
import torch
|
5 |
from PIL import Image
|
6 |
import io
|
|
|
14 |
logger = logging.getLogger(__name__)
|
15 |
|
16 |
# Set Hugging Face cache directory to a writable path
|
|
|
17 |
os.environ['HF_HOME'] = '/tmp/hf_home'
|
18 |
os.environ['XDG_CACHE_HOME'] = '/tmp/cache'
|
19 |
|
|
|
30 |
global pipe
|
31 |
try:
|
32 |
logger.info("Loading Zero123Plus model...")
|
33 |
+
# Import here to ensure the environment variables are set before import
|
34 |
+
from diffusers import AutoPipelineForImage2Image
|
35 |
+
|
36 |
+
# Use AutoPipelineForImage2Image which should work with any image-to-image model
|
37 |
+
pipe = AutoPipelineForImage2Image.from_pretrained(
|
38 |
"sudo-ai/zero123plus-v1.2",
|
39 |
torch_dtype=torch.float32,
|
40 |
cache_dir="/tmp/diffusers_cache",
|
41 |
+
safety_checker=None,
|
42 |
+
low_cpu_mem_usage=True
|
43 |
)
|
44 |
pipe.to("cpu")
|
45 |
logger.info("Model loaded successfully")
|
46 |
+
return True
|
47 |
except Exception as e:
|
48 |
logger.error(f"Error loading model: {str(e)}")
|
49 |
+
return False
|
50 |
+
|
51 |
+
# Don't try to load the model at startup - we'll load it on the first request
|
52 |
+
# This prevents the app from crashing if the model can't be loaded immediately
|
53 |
+
|
54 |
+
app = Flask(__name__)
|
55 |
|
56 |
# Load the model immediately
|
57 |
load_model()
|
|
|
68 |
global pipe
|
69 |
# Check if model is loaded
|
70 |
if pipe is None:
|
71 |
+
success = load_model()
|
72 |
+
if not success:
|
|
|
|
|
73 |
return jsonify({"error": "Failed to initialize model"}), 500
|
74 |
|
75 |
if 'image' not in request.files:
|
|
|
85 |
|
86 |
logger.info(f"Starting image generation with {num_steps} steps")
|
87 |
# Generate new views
|
88 |
+
result = pipe(
|
89 |
+
image=input_image,
|
90 |
+
num_inference_steps=num_steps
|
91 |
+
)
|
92 |
+
|
93 |
output_image = result.images[0]
|
94 |
logger.info(f"Generated image of size {output_image.size}")
|
95 |
|