LTT commited on
Commit
e87b20d
·
verified ·
1 Parent(s): f7222ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -4,7 +4,6 @@ import subprocess
4
  import shlex
5
  import spaces
6
  import torch
7
- import numpy as numpy
8
  access_token = os.getenv("HUGGINGFACE_TOKEN")
9
  subprocess.run(
10
  shlex.split(
@@ -97,15 +96,15 @@ isomer_color_weights = torch.from_numpy(np.array([1, 0.5, 1, 0.5])).float().to(d
97
 
98
  # model initialization and loading
99
  # flux
100
- taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16).to(device_0)
101
- good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16, token=access_token).to(device_0)
102
- # flux_pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16, token=access_token).to(device=device_0, dtype=torch.bfloat16)
103
- flux_pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16, vae=taef1, token=access_token).to(device_0)
104
- flux_lora_ckpt_path = hf_hub_download(repo_id="LTT/xxx-ckpt", filename="rgb_normal_large.safetensors", repo_type="model")
105
- flux_pipe.load_lora_weights(flux_lora_ckpt_path)
106
- # flux_pipe.to(device=device_0, dtype=torch.bfloat16)
107
- torch.cuda.empty_cache()
108
- flux_pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(flux_pipe)
109
 
110
 
111
  # lrm
@@ -308,7 +307,8 @@ def reconstruct_3d_model(images, prompt):
308
  @spaces.GPU
309
  def gradio_pipeline(prompt, seed):
310
  # 生成多视图图像
311
- rgb_normal_grid = generate_multi_view_images(prompt, seed)
 
312
  image_preview = Image.fromarray((rgb_normal_grid * 255).astype(np.uint8))
313
 
314
  # 3d reconstruction
 
4
  import shlex
5
  import spaces
6
  import torch
 
7
  access_token = os.getenv("HUGGINGFACE_TOKEN")
8
  subprocess.run(
9
  shlex.split(
 
96
 
97
  # model initialization and loading
98
  # flux
99
+ # taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16).to(device_0)
100
+ # good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16, token=access_token).to(device_0)
101
+ # # flux_pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16, token=access_token).to(device=device_0, dtype=torch.bfloat16)
102
+ # flux_pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16, vae=taef1, token=access_token).to(device_0)
103
+ # flux_lora_ckpt_path = hf_hub_download(repo_id="LTT/xxx-ckpt", filename="rgb_normal_large.safetensors", repo_type="model")
104
+ # flux_pipe.load_lora_weights(flux_lora_ckpt_path)
105
+ # # flux_pipe.to(device=device_0, dtype=torch.bfloat16)
106
+ # torch.cuda.empty_cache()
107
+ # flux_pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(flux_pipe)
108
 
109
 
110
  # lrm
 
307
  @spaces.GPU
308
  def gradio_pipeline(prompt, seed):
309
  # 生成多视图图像
310
+ # rgb_normal_grid = generate_multi_view_images(prompt, seed)
311
+ rgb_normal_grid = np.load("rgb_normal_grid.npy")
312
  image_preview = Image.fromarray((rgb_normal_grid * 255).astype(np.uint8))
313
 
314
  # 3d reconstruction