dkatz2391 commited on
Commit
e68e0ca
·
verified ·
1 Parent(s): 434fa76

added logging - Clear start and end markers for the function call. Logging of input parameters.

Browse files
Files changed (1) hide show
  1. app.py +89 -22
app.py CHANGED
@@ -17,6 +17,7 @@ from trellis.utils import render_utils, postprocessing_utils
17
 
18
  import traceback
19
  import sys
 
20
 
21
 
22
  # Add JSON encoder for NumPy arrays
@@ -270,8 +271,8 @@ def generate_and_extract_glb(
270
  slat_sampling_steps: int,
271
  mesh_simplify: float, # Added from extract_glb
272
  texture_size: int, # Added from extract_glb
273
- req: gr.Request,
274
- ) -> str: # MODIFIED: Returns only the final GLB path string
275
  """
276
  Combines 3D model generation and GLB extraction into a single step
277
  for API usage, avoiding the need to transfer the state object.
@@ -288,17 +289,30 @@ def generate_and_extract_glb(
288
  req (gr.Request): Gradio request object.
289
 
290
  Returns:
291
- str: The absolute path to the generated GLB file within the Space's filesystem.
292
- Returns None if any step fails.
293
  """
294
- user_dir = os.path.join(TMP_DIR, str(req.session_hash))
295
- os.makedirs(user_dir, exist_ok=True)
 
 
 
 
 
 
 
 
296
 
297
- print(f"[{req.session_hash}] API: Starting combined generation and extraction for prompt: {prompt}")
 
 
 
 
298
 
299
  # --- Step 1: Generate 3D Model (adapted from text_to_3d) ---
300
  try:
301
- print(f"[{req.session_hash}] API: Running generation pipeline...")
 
302
  outputs = pipeline.run(
303
  prompt,
304
  seed=seed,
@@ -312,34 +326,87 @@ def generate_and_extract_glb(
312
  "cfg_strength": slat_guidance_strength,
313
  },
314
  )
315
- # Keep handles to the direct outputs (no need to pack/unpack state)
 
 
 
 
 
 
 
316
  gs_output = outputs['gaussian'][0]
317
  mesh_output = outputs['mesh'][0]
318
- print(f"[{req.session_hash}] API: Generation pipeline completed.")
 
 
 
 
 
 
319
  except Exception as e:
320
- print(f"[{req.session_hash}] API: ERROR during generation pipeline: {e}")
321
- traceback.print_exc()
322
- torch.cuda.empty_cache()
 
 
 
 
 
 
323
  return None # Return None on failure
324
 
325
  # --- Step 2: Extract GLB (adapted from extract_glb) ---
 
326
  try:
327
- print(f"[{req.session_hash}] API: Extracting GLB (simplify={mesh_simplify}, texture={texture_size})...")
 
 
 
 
 
 
328
  # Directly use the outputs from the pipeline
329
  glb = postprocessing_utils.to_glb(gs_output, mesh_output, simplify=mesh_simplify, texture_size=texture_size, verbose=False)
330
- glb_path = os.path.join(user_dir, 'api_generated_sample.glb') # Use a distinct name for API outputs
331
- print(f"[{req.session_hash}] API: Saving GLB to {glb_path}")
 
 
 
 
 
 
 
 
332
  glb.export(glb_path)
333
- print(f"[{req.session_hash}] API: GLB extraction completed.")
 
 
 
334
  except Exception as e:
335
- print(f"[{req.session_hash}] API: ERROR during GLB extraction: {e}")
 
336
  traceback.print_exc()
337
- torch.cuda.empty_cache()
 
 
 
 
 
338
  return None # Return None on failure
339
 
340
- torch.cuda.empty_cache()
341
- print(f"[{req.session_hash}] API: Combined process successful. Returning GLB path: {glb_path}")
342
- return glb_path # Return only the path to the generated GLB
 
 
 
 
 
 
 
 
 
 
343
  # --- END NEW COMBINED API FUNCTION ---
344
 
345
 
 
17
 
18
  import traceback
19
  import sys
20
+ import time
21
 
22
 
23
  # Add JSON encoder for NumPy arrays
 
271
  slat_sampling_steps: int,
272
  mesh_simplify: float, # Added from extract_glb
273
  texture_size: int, # Added from extract_glb
274
+ req: gr.Request, # Keep req for potential session info if needed
275
+ ) -> Optional[str]: # MODIFIED: Explicitly show it can return None
276
  """
277
  Combines 3D model generation and GLB extraction into a single step
278
  for API usage, avoiding the need to transfer the state object.
 
289
  req (gr.Request): Gradio request object.
290
 
291
  Returns:
292
+ Optional[str]: The absolute path to the generated GLB file within the Space's filesystem,
293
+ or None if any step fails.
294
  """
295
+ session_hash = "API_CALL" # Use a generic identifier for API calls if req is None or lacks session
296
+ if req and hasattr(req, 'session_hash') and req.session_hash:
297
+ session_hash = req.session_hash
298
+
299
+ user_dir = os.path.join(TMP_DIR, str(session_hash))
300
+ try:
301
+ os.makedirs(user_dir, exist_ok=True)
302
+ except Exception as e:
303
+ print(f"[{session_hash}] API: ERROR creating directory {user_dir}: {e}")
304
+ return None # Cannot proceed without directory
305
 
306
+ print(f"[{session_hash}] API: ===== generate_and_extract_glb START ====")
307
+ print(f"[{session_hash}] API: Prompt: '{prompt}', Seed: {seed}, Simplify: {mesh_simplify}, Texture: {texture_size}")
308
+
309
+ gs_output = None
310
+ mesh_output = None
311
 
312
  # --- Step 1: Generate 3D Model (adapted from text_to_3d) ---
313
  try:
314
+ print(f"[{session_hash}] API: Step 1 - Running generation pipeline...")
315
+ t_start_gen = time.time() # Add timing
316
  outputs = pipeline.run(
317
  prompt,
318
  seed=seed,
 
326
  "cfg_strength": slat_guidance_strength,
327
  },
328
  )
329
+ t_end_gen = time.time()
330
+ print(f"[{session_hash}] API: Step 1 - Generation pipeline completed in {t_end_gen - t_start_gen:.2f}s.")
331
+
332
+ # Validate outputs immediately
333
+ if not outputs or 'gaussian' not in outputs or not outputs['gaussian'] or 'mesh' not in outputs or not outputs['mesh']:
334
+ print(f"[{session_hash}] API: ERROR - Pipeline output is missing expected keys or values.")
335
+ return None
336
+
337
  gs_output = outputs['gaussian'][0]
338
  mesh_output = outputs['mesh'][0]
339
+
340
+ if gs_output is None or mesh_output is None:
341
+ print(f"[{session_hash}] API: ERROR - Pipeline returned None for gs_output or mesh_output.")
342
+ return None
343
+
344
+ print(f"[{session_hash}] API: Step 1 - Outputs obtained (gs type: {type(gs_output)}, mesh type: {type(mesh_output)}).")
345
+
346
  except Exception as e:
347
+ print(f"[{session_hash}] API: ERROR during generation pipeline step: {e}")
348
+ # Print detailed traceback
349
+ traceback.print_exc()
350
+ # Clean up CUDA memory before returning
351
+ try:
352
+ torch.cuda.empty_cache()
353
+ print(f"[{session_hash}] API: CUDA cache cleared after generation error.")
354
+ except Exception as cache_e:
355
+ print(f"[{session_hash}] API: Error clearing CUDA cache after generation error: {cache_e}")
356
  return None # Return None on failure
357
 
358
  # --- Step 2: Extract GLB (adapted from extract_glb) ---
359
+ glb_path = None # Initialize glb_path
360
  try:
361
+ print(f"[{session_hash}] API: Step 2 - Extracting GLB (simplify={mesh_simplify}, texture={texture_size})...")
362
+ # Check if inputs from previous step are valid
363
+ if gs_output is None or mesh_output is None:
364
+ print(f"[{session_hash}] API: ERROR - Cannot proceed with GLB extraction, gs_output or mesh_output is None.")
365
+ return None
366
+
367
+ t_start_glb = time.time()
368
  # Directly use the outputs from the pipeline
369
  glb = postprocessing_utils.to_glb(gs_output, mesh_output, simplify=mesh_simplify, texture_size=texture_size, verbose=False)
370
+ t_end_glb = time.time()
371
+ print(f"[{session_hash}] API: Step 2 - GLB object created in {t_end_glb - t_start_glb:.2f}s.")
372
+
373
+ if glb is None:
374
+ print(f"[{session_hash}] API: ERROR - postprocessing_utils.to_glb returned None.")
375
+ return None
376
+
377
+ glb_path = os.path.join(user_dir, f'api_generated_{session_hash}.glb') # Use unique name
378
+ print(f"[{session_hash}] API: Step 2 - Saving GLB to {glb_path}...")
379
+ t_start_save = time.time()
380
  glb.export(glb_path)
381
+ t_end_save = time.time()
382
+ print(f"[{session_hash}] API: Step 2 - GLB saved in {t_end_save - t_start_save:.2f}s.")
383
+ print(f"[{session_hash}] API: Step 2 - GLB extraction completed successfully.")
384
+
385
  except Exception as e:
386
+ print(f"[{session_hash}] API: ERROR during GLB extraction step: {e}")
387
+ # Print detailed traceback
388
  traceback.print_exc()
389
+ # Clean up CUDA memory before returning
390
+ try:
391
+ torch.cuda.empty_cache()
392
+ print(f"[{session_hash}] API: CUDA cache cleared after extraction error.")
393
+ except Exception as cache_e:
394
+ print(f"[{session_hash}] API: Error clearing CUDA cache after extraction error: {cache_e}")
395
  return None # Return None on failure
396
 
397
+ # --- Final Cleanup and Return ---
398
+ try:
399
+ torch.cuda.empty_cache()
400
+ print(f"[{session_hash}] API: Final CUDA cache cleared.")
401
+ except Exception as cache_e:
402
+ print(f"[{session_hash}] API: Error clearing final CUDA cache: {cache_e}")
403
+
404
+ if glb_path and os.path.exists(glb_path):
405
+ print(f"[{session_hash}] API: ===== generate_and_extract_glb END (Success) ===== Returning GLB path: {glb_path}")
406
+ return glb_path # Return only the path to the generated GLB
407
+ else:
408
+ print(f"[{session_hash}] API: ===== generate_and_extract_glb END (Failure) ===== GLB path not generated or does not exist.")
409
+ return None # Ensure None is returned if glb_path wasn't set or file doesn't exist
410
  # --- END NEW COMBINED API FUNCTION ---
411
 
412