mac9087 commited on
Commit
e5edf92
·
verified ·
1 Parent(s): 06031b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -10
app.py CHANGED
@@ -5,26 +5,46 @@ from werkzeug.utils import secure_filename
5
  from PIL import Image
6
  import io
7
  import zipfile
 
 
8
  from diffusers import ShapEImg2ImgPipeline
9
  from diffusers.utils import export_to_obj
10
 
11
  app = Flask(__name__)
12
 
13
- # Configure upload folder
14
- UPLOAD_FOLDER = 'uploads'
15
- RESULTS_FOLDER = 'results'
 
16
  ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
17
 
 
18
  os.makedirs(UPLOAD_FOLDER, exist_ok=True)
19
  os.makedirs(RESULTS_FOLDER, exist_ok=True)
 
 
 
 
 
 
20
 
21
  app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
22
  app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB max
23
 
24
- # Initialize the model (will download on first run)
25
  device = "cuda" if torch.cuda.is_available() else "cpu"
26
- pipe = ShapEImg2ImgPipeline.from_pretrained("openai/shap-e-img2img", torch_dtype=torch.float16)
27
- pipe = pipe.to(device)
 
 
 
 
 
 
 
 
 
 
28
 
29
  def allowed_file(filename):
30
  return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@@ -64,6 +84,9 @@ def convert_image_to_3d():
64
  # Open image
65
  image = Image.open(filepath).convert("RGB")
66
 
 
 
 
67
  # Generate 3D model
68
  images = pipe(
69
  image,
@@ -73,7 +96,6 @@ def convert_image_to_3d():
73
  ).images
74
 
75
  # Create unique output directory
76
- import uuid
77
  output_id = str(uuid.uuid4())
78
  output_dir = os.path.join(RESULTS_FOLDER, output_id)
79
  os.makedirs(output_dir, exist_ok=True)
@@ -110,7 +132,9 @@ def convert_image_to_3d():
110
  return send_file(glb_path, as_attachment=True, download_name="model.glb")
111
 
112
  except Exception as e:
113
- return jsonify({"error": str(e)}), 500
 
 
114
 
115
  @app.route('/', methods=['GET'])
116
  def index():
@@ -162,11 +186,13 @@ def index():
162
  <li><code>output_format</code>: "obj" or "glb" (default: "obj")</li>
163
  </ul>
164
  <p>Example curl request:</p>
165
- <pre>curl -X POST -F "image=@your_image.jpg" -F "output_format=obj" http://localhost:5000/convert -o model.zip</pre>
166
  </div>
167
  </body>
168
  </html>
169
  """
170
 
171
  if __name__ == '__main__':
172
- app.run(host='0.0.0.0', port=int(os.environ.get('PORT', 5000)))
 
 
 
5
  from PIL import Image
6
  import io
7
  import zipfile
8
+ import uuid
9
+ import traceback
10
  from diffusers import ShapEImg2ImgPipeline
11
  from diffusers.utils import export_to_obj
12
 
13
  app = Flask(__name__)
14
 
15
+ # Configure directories - use /tmp for Hugging Face Spaces which is writable
16
+ UPLOAD_FOLDER = '/tmp/uploads'
17
+ RESULTS_FOLDER = '/tmp/results'
18
+ CACHE_DIR = '/tmp/huggingface'
19
  ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
20
 
21
+ # Create necessary directories
22
  os.makedirs(UPLOAD_FOLDER, exist_ok=True)
23
  os.makedirs(RESULTS_FOLDER, exist_ok=True)
24
+ os.makedirs(CACHE_DIR, exist_ok=True)
25
+
26
+ # Set Hugging Face cache environment variables
27
+ os.environ['HF_HOME'] = CACHE_DIR
28
+ os.environ['TRANSFORMERS_CACHE'] = os.path.join(CACHE_DIR, 'transformers')
29
+ os.environ['HF_DATASETS_CACHE'] = os.path.join(CACHE_DIR, 'datasets')
30
 
31
  app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
32
  app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB max
33
 
34
+ # Lazy loading for the model - only load when needed
35
  device = "cuda" if torch.cuda.is_available() else "cpu"
36
+ pipe = None
37
+
38
+ def load_model():
39
+ global pipe
40
+ if pipe is None:
41
+ pipe = ShapEImg2ImgPipeline.from_pretrained(
42
+ "openai/shap-e-img2img",
43
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
44
+ cache_dir=CACHE_DIR # Explicitly set cache directory
45
+ )
46
+ pipe = pipe.to(device)
47
+ return pipe
48
 
49
  def allowed_file(filename):
50
  return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
 
84
  # Open image
85
  image = Image.open(filepath).convert("RGB")
86
 
87
+ # Load model (lazy loading)
88
+ pipe = load_model()
89
+
90
  # Generate 3D model
91
  images = pipe(
92
  image,
 
96
  ).images
97
 
98
  # Create unique output directory
 
99
  output_id = str(uuid.uuid4())
100
  output_dir = os.path.join(RESULTS_FOLDER, output_id)
101
  os.makedirs(output_dir, exist_ok=True)
 
132
  return send_file(glb_path, as_attachment=True, download_name="model.glb")
133
 
134
  except Exception as e:
135
+ # Enhanced error reporting with traceback
136
+ error_details = traceback.format_exc()
137
+ return jsonify({"error": str(e), "details": error_details}), 500
138
 
139
  @app.route('/', methods=['GET'])
140
  def index():
 
186
  <li><code>output_format</code>: "obj" or "glb" (default: "obj")</li>
187
  </ul>
188
  <p>Example curl request:</p>
189
+ <pre>curl -X POST -F "image=@your_image.jpg" -F "output_format=obj" http://localhost:7860/convert -o model.zip</pre>
190
  </div>
191
  </body>
192
  </html>
193
  """
194
 
195
  if __name__ == '__main__':
196
+ # Use port 7860 which is standard for Hugging Face Spaces
197
+ port = int(os.environ.get('PORT', 7860))
198
+ app.run(host='0.0.0.0', port=port)