mike23415 commited on
Commit
b83bede
·
verified ·
1 Parent(s): accafc3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -38
app.py CHANGED
@@ -1,10 +1,7 @@
1
  import io
2
  import base64
3
- import torch
4
  import os
5
  from flask import Flask, request, jsonify
6
- from diffusers import StableDiffusionPipeline # Adjust if needed based on ldm
7
- from PIL import Image
8
  import logging
9
 
10
  logging.basicConfig(level=logging.INFO)
@@ -12,50 +9,27 @@ logger = logging.getLogger(__name__)
12
 
13
  app = Flask(__name__)
14
 
15
- # Load the model once at startup (on CPU)
16
- try:
17
- logger.info("Loading CompVis/ldm-stable-diffusion-v1 pipeline...")
18
- pipe = StableDiffusionPipeline.from_pretrained(
19
- "CompVis/ldm-stable-diffusion-v1",
20
- torch_dtype=torch.float32,
21
- cache_dir="/tmp/hf_home",
22
- )
23
- pipe.to("cpu")
24
- logger.info("=== Application Startup at CPU mode =====")
25
- except Exception as e:
26
- logger.error(f"Error loading model: {e}", exc_info=True)
27
- pipe = None
28
-
29
- def pil_to_base64(image):
30
- buffer = io.BytesIO()
31
- image.save(buffer, format="PNG")
32
- return base64.b64encode(buffer.getvalue()).decode("utf-8")
33
-
34
  @app.route("/")
35
  def home():
36
- return "CompVis ldm-stable-diffusion-v1 CPU API is running!"
37
 
38
  @app.route("/generate", methods=["POST"])
39
  def generate():
40
- if pipe is None:
41
- return jsonify({"error": "Model not loaded"}), 500
42
-
43
  try:
44
- data = request.get_json()
45
- prompt = data.get("prompt") # Use text prompt for 2D image generation
46
-
47
- if not prompt:
48
- return jsonify({"error": "No prompt provided"}), 400
49
-
50
- logger.info("Generating image with pipeline...")
51
- result = pipe(prompt) # Generate image from text
52
- image = result.images[0] # Get the first generated image
53
 
54
- logger.info("Image generated successfully")
55
- return jsonify({"image": f"data:image/png;base64,{pil_to_base64(image)}"})
 
56
 
 
 
57
  except Exception as e:
58
- logger.error(f"Error generating image: {e}", exc_info=True)
59
  return jsonify({"error": str(e)}), 500
60
 
61
  if __name__ == "__main__":
 
1
  import io
2
  import base64
 
3
  import os
4
  from flask import Flask, request, jsonify
 
 
5
  import logging
6
 
7
  logging.basicConfig(level=logging.INFO)
 
9
 
10
  app = Flask(__name__)
11
 
12
+ # Serve precomputed .glb file
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  @app.route("/")
14
  def home():
15
+ return "Precomputed 3D Model API is running!"
16
 
17
  @app.route("/generate", methods=["POST"])
18
  def generate():
 
 
 
19
  try:
20
+ # Path to precomputed .glb file in your Space
21
+ glb_path = os.path.join("/app", "data", "sample_model.glb")
22
+ if not os.path.exists(glb_path):
23
+ return jsonify({"error": "Precomputed model not found"}), 404
 
 
 
 
 
24
 
25
+ logger.info("Serving precomputed .glb file...")
26
+ with open(glb_path, "rb") as f:
27
+ mesh_data = base64.b64encode(f.read()).decode("utf-8")
28
 
29
+ logger.info("Mesh served successfully")
30
+ return jsonify({"mesh": f"data:model/gltf-binary;base64,{mesh_data}"})
31
  except Exception as e:
32
+ logger.error(f"Error serving mesh: {e}", exc_info=True)
33
  return jsonify({"error": str(e)}), 500
34
 
35
  if __name__ == "__main__":