mike23415 commited on
Commit
7949d53
·
verified ·
1 Parent(s): 48056a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -19
app.py CHANGED
@@ -1,21 +1,42 @@
1
  import os
 
2
  from flask import Flask, request, jsonify, send_file
3
  from diffusers import DiffusionPipeline
4
  import torch
5
  from PIL import Image
6
  import io
7
 
 
 
 
 
 
 
 
 
8
  # Set Hugging Face cache directory to a writable path
9
- os.environ['HF_HOME'] = '/home/user/.cache/huggingface'
 
 
 
10
 
11
  app = Flask(__name__)
12
 
13
- # Load the model on CPU
14
- pipe = DiffusionPipeline.from_pretrained(
15
- "sudo-ai/zero123plus-v1.2",
16
- torch_dtype=torch.float32
17
- )
18
- pipe.to("cpu")
 
 
 
 
 
 
 
 
 
19
 
20
  @app.route("/", methods=["GET"])
21
  def index():
@@ -23,22 +44,34 @@ def index():
23
 
24
  @app.route("/generate", methods=["POST"])
25
  def generate():
26
- if 'image' not in request.files:
27
- return jsonify({"error": "No image uploaded"}), 400
 
 
28
 
29
- image_file = request.files['image']
30
- input_image = Image.open(image_file).convert("RGB")
 
31
 
32
- # Generate new views
33
- result = pipe(image=input_image, num_inference_steps=25)
34
- output_image = result.images[0]
 
 
 
 
 
35
 
36
- # Save to a BytesIO object
37
- img_io = io.BytesIO()
38
- output_image.save(img_io, 'PNG')
39
- img_io.seek(0)
40
 
41
- return send_file(img_io, mimetype='image/png')
 
 
 
 
42
 
43
  if __name__ == "__main__":
44
  app.run(host="0.0.0.0", port=7860)
 
1
  import os
2
+ import logging
3
  from flask import Flask, request, jsonify, send_file
4
  from diffusers import DiffusionPipeline
5
  import torch
6
  from PIL import Image
7
  import io
8
 
9
+ # Configure logging to stdout instead of files
10
+ logging.basicConfig(
11
+ level=logging.INFO,
12
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
13
+ handlers=[logging.StreamHandler()]
14
+ )
15
+ logger = logging.getLogger(__name__)
16
+
17
  # Set Hugging Face cache directory to a writable path
18
+ os.environ['HF_HOME'] = '/tmp/.cache/huggingface'
19
+
20
+ # Create cache directory if it doesn't exist
21
+ os.makedirs('/tmp/.cache/huggingface', exist_ok=True)
22
 
23
  app = Flask(__name__)
24
 
25
+ # Initialize the model
26
+ @app.before_first_request
27
+ def load_model():
28
+ global pipe
29
+ try:
30
+ logger.info("Loading Zero123Plus model...")
31
+ pipe = DiffusionPipeline.from_pretrained(
32
+ "sudo-ai/zero123plus-v1.2",
33
+ torch_dtype=torch.float32
34
+ )
35
+ pipe.to("cpu")
36
+ logger.info("Model loaded successfully")
37
+ except Exception as e:
38
+ logger.error(f"Error loading model: {str(e)}")
39
+ raise
40
 
41
  @app.route("/", methods=["GET"])
42
  def index():
 
44
 
45
  @app.route("/generate", methods=["POST"])
46
  def generate():
47
+ try:
48
+ if 'image' not in request.files:
49
+ logger.warning("No image uploaded")
50
+ return jsonify({"error": "No image uploaded"}), 400
51
 
52
+ image_file = request.files['image']
53
+ input_image = Image.open(image_file).convert("RGB")
54
+ logger.info(f"Received image of size {input_image.size}")
55
 
56
+ # Get optional parameters with defaults
57
+ num_steps = int(request.form.get('num_inference_steps', 25))
58
+
59
+ logger.info(f"Starting image generation with {num_steps} steps")
60
+ # Generate new views
61
+ result = pipe(image=input_image, num_inference_steps=num_steps)
62
+ output_image = result.images[0]
63
+ logger.info(f"Generated image of size {output_image.size}")
64
 
65
+ # Save to a BytesIO object
66
+ img_io = io.BytesIO()
67
+ output_image.save(img_io, 'PNG')
68
+ img_io.seek(0)
69
 
70
+ return send_file(img_io, mimetype='image/png')
71
+
72
+ except Exception as e:
73
+ logger.error(f"Error during generation: {str(e)}")
74
+ return jsonify({"error": str(e)}), 500
75
 
76
  if __name__ == "__main__":
77
  app.run(host="0.0.0.0", port=7860)