mike23415 commited on
Commit
3831488
·
verified ·
1 Parent(s): 583e56c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -14
app.py CHANGED
@@ -7,20 +7,20 @@ from PIL import Image
7
  from io import BytesIO
8
  import base64
9
 
10
- # Setup logging
11
  logging.basicConfig(level=logging.INFO)
12
  logger = logging.getLogger(__name__)
13
 
14
- # Initialize Flask app
15
  app = Flask(__name__)
16
 
17
- # Load the pipeline (CPU, no fp16 variant)
18
  logger.info("Loading Zero123Plus pipeline...")
19
  try:
20
  pipe = DiffusionPipeline.from_pretrained(
21
  "sudo-ai/zero123plus-v1.2",
22
  torch_dtype=torch.float32,
23
- variant=None,
24
  )
25
  pipe.to("cpu")
26
  logger.info("Pipeline loaded successfully.")
@@ -38,28 +38,31 @@ def generate():
38
  return jsonify({"error": "Model not loaded"}), 500
39
 
40
  try:
41
- image_data = request.json.get("image")
 
 
42
  if not image_data:
43
  return jsonify({"error": "No image provided"}), 400
44
 
45
- # Decode the base64 image
46
  image = Image.open(BytesIO(base64.b64decode(image_data.split(",")[-1]))).convert("RGB")
47
 
48
- # Run the pipeline
49
  logger.info("Generating 3D views...")
50
  output = pipe(image)
51
- images = output.images # List of generated views
52
 
53
- # Return the first image as base64
54
  buffered = BytesIO()
55
- images[0].save(buffered, format="PNG")
56
- img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
57
- return jsonify({"image": f"data:image/png;base64,{img_str}"})
58
 
 
 
59
  except Exception as e:
60
- logger.error(f"Inference error: {e}")
61
  return jsonify({"error": str(e)}), 500
62
 
63
  if __name__ == "__main__":
64
- logger.info("=== Application Startup at {} =====".format(torch.cuda.utilization().get('time') if torch.cuda.is_available() else "CPU mode"))
65
  app.run(host="0.0.0.0", port=7860)
 
7
  from io import BytesIO
8
  import base64
9
 
10
+ # Logging
11
  logging.basicConfig(level=logging.INFO)
12
  logger = logging.getLogger(__name__)
13
 
14
+ # Flask app
15
  app = Flask(__name__)
16
 
17
+ # Load Zero123Plus pipeline (for CPU)
18
  logger.info("Loading Zero123Plus pipeline...")
19
  try:
20
  pipe = DiffusionPipeline.from_pretrained(
21
  "sudo-ai/zero123plus-v1.2",
22
  torch_dtype=torch.float32,
23
+ variant=None, # avoid fp16 issues
24
  )
25
  pipe.to("cpu")
26
  logger.info("Pipeline loaded successfully.")
 
38
  return jsonify({"error": "Model not loaded"}), 500
39
 
40
  try:
41
+ data = request.get_json()
42
+ image_data = data.get("image")
43
+
44
  if not image_data:
45
  return jsonify({"error": "No image provided"}), 400
46
 
47
+ # Decode base64 to PIL image
48
  image = Image.open(BytesIO(base64.b64decode(image_data.split(",")[-1]))).convert("RGB")
49
 
50
+ # Run inference
51
  logger.info("Generating 3D views...")
52
  output = pipe(image)
53
+ generated_image = output.images[0]
54
 
55
+ # Convert output to base64
56
  buffered = BytesIO()
57
+ generated_image.save(buffered, format="PNG")
58
+ img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
 
59
 
60
+ return jsonify({"image": f"data:image/png;base64,{img_base64}"})
61
+
62
  except Exception as e:
63
+ logger.error(f"Generation failed: {e}")
64
  return jsonify({"error": str(e)}), 500
65
 
66
  if __name__ == "__main__":
67
+ logger.info("=== Application Startup at CPU mode =====")
68
  app.run(host="0.0.0.0", port=7860)