mike23415 commited on
Commit
b7b284e
·
verified ·
1 Parent(s): 16f183a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -28
app.py CHANGED
@@ -3,7 +3,7 @@ import base64
3
  import torch
4
  import os
5
  from flask import Flask, request, jsonify
6
- from diffusers import StableDiffusionPipeline # Placeholder; adjust based on InstantMesh docs
7
  from PIL import Image
8
  import logging
9
 
@@ -12,17 +12,13 @@ logger = logging.getLogger(__name__)
12
 
13
  app = Flask(__name__)
14
 
15
- # Load the model once at startup (on CPU) with token from environment
16
  try:
17
- logger.info("Loading TencentARC InstantMesh pipeline...")
18
- token = os.getenv("HF_TOKEN")
19
- if not token:
20
- raise ValueError("HF_TOKEN environment variable not set")
21
  pipe = StableDiffusionPipeline.from_pretrained(
22
- "TencentARC/InstantMesh",
23
  torch_dtype=torch.float32,
24
  cache_dir="/tmp/hf_home",
25
- token=token, # Reintroduce token authentication
26
  )
27
  pipe.to("cpu")
28
  logger.info("=== Application Startup at CPU mode =====")
@@ -37,7 +33,7 @@ def pil_to_base64(image):
37
 
38
  @app.route("/")
39
  def home():
40
- return "TencentARC InstantMesh CPU API is running!"
41
 
42
  @app.route("/generate", methods=["POST"])
43
  def generate():
@@ -46,30 +42,20 @@ def generate():
46
 
47
  try:
48
  data = request.get_json()
49
- image_data = data.get("image")
50
 
51
- if not image_data:
52
- return jsonify({"error": "No image provided"}), 400
53
 
54
- if image_data.startswith("data:image"):
55
- image_data = image_data.split(",")[1]
 
56
 
57
- image = Image.open(io.BytesIO(base64.b64decode(image_data))).convert("RGB")
58
-
59
- logger.info("Processing image with pipeline...")
60
- result = pipe(image) # Adjust based on InstantMesh documentation
61
- output_mesh = result.mesh # Hypothetical; check InstantMesh output format
62
-
63
- output_path = "/tmp/output.glb"
64
- output_mesh.save(output_path)
65
- with open(output_path, "rb") as f:
66
- mesh_data = base64.b64encode(f.read()).decode("utf-8")
67
-
68
- logger.info("Mesh processed successfully")
69
- return jsonify({"mesh": f"data:model/gltf-binary;base64,{mesh_data}"})
70
 
71
  except Exception as e:
72
- logger.error(f"Error generating mesh: {e}", exc_info=True)
73
  return jsonify({"error": str(e)}), 500
74
 
75
  if __name__ == "__main__":
 
3
  import torch
4
  import os
5
  from flask import Flask, request, jsonify
6
+ from diffusers import StableDiffusionPipeline
7
  from PIL import Image
8
  import logging
9
 
 
12
 
13
  app = Flask(__name__)
14
 
15
+ # Load the model once at startup (on CPU)
16
  try:
17
+ logger.info("Loading runwayml/stable-diffusion-v1-5 pipeline...")
 
 
 
18
  pipe = StableDiffusionPipeline.from_pretrained(
19
+ "runwayml/stable-diffusion-v1-5",
20
  torch_dtype=torch.float32,
21
  cache_dir="/tmp/hf_home",
 
22
  )
23
  pipe.to("cpu")
24
  logger.info("=== Application Startup at CPU mode =====")
 
33
 
34
  @app.route("/")
35
  def home():
36
+ return "Stable Diffusion CPU API is running!"
37
 
38
  @app.route("/generate", methods=["POST"])
39
  def generate():
 
42
 
43
  try:
44
  data = request.get_json()
45
+ prompt = data.get("prompt") # Use text prompt instead of image
46
 
47
+ if not prompt:
48
+ return jsonify({"error": "No prompt provided"}), 400
49
 
50
+ logger.info("Generating image with pipeline...")
51
+ result = pipe(prompt) # Generate image from text
52
+ image = result.images[0] # Get the first generated image
53
 
54
+ logger.info("Image generated successfully")
55
+ return jsonify({"image": f"data:image/png;base64,{pil_to_base64(image)}"})
 
 
 
 
 
 
 
 
 
 
 
56
 
57
  except Exception as e:
58
+ logger.error(f"Error generating image: {e}", exc_info=True)
59
  return jsonify({"error": str(e)}), 500
60
 
61
  if __name__ == "__main__":