thewhole commited on
Commit
95a9966
·
1 Parent(s): b5f67e7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -7
app.py CHANGED
@@ -30,8 +30,20 @@ example_outputs_1 = [
30
  gr.Video(value=os.path.join(os.path.dirname(__file__), 'example/flamethrower,_with_fire,_scifi,_cyberpunk,_photorealistic,_8K,_HD.mp4'), autoplay=True)
31
  ]
32
 
 
 
 
 
 
 
 
 
 
 
33
 
34
-
 
 
35
  def main(prompt, iteration,CFG, seed):
36
  if [prompt] in example_inputs:
37
  return example_outputs_1[example_inputs.index([prompt])]
@@ -49,11 +61,12 @@ def main(prompt, iteration,CFG, seed):
49
  return gr.Video(value=path, autoplay=True)
50
 
51
  with gr.Blocks() as demo:
52
- gr.Markdown("# <center>LucidDreamer: Towards High-Fidelity Text-to-3D Generation via Interval Score Matching</center>")
53
- gr.Markdown("This live demo allows you to generate high-quality 3D content using text prompts. The outputs are 360° rendered 3d gaussian video and training progress visualization.<br> \
54
- It is based on Stable Diffusion 2.1. Please check out our <strong><a href=https://github.com/EnVision-Research/LucidDreamer>Project Page</a> / <a href=https://arxiv.org/abs/2311.11284>Paper</a> / <a href=https://github.com/EnVision-Research/LucidDreamer>Code</a></strong> if you want to learn more about our method!<br> \
55
- Note that this demo is running on A10G, the running time might be longer than the reported 35 minutes (5000 iterations) on A100.<br> \
56
- &copy; This Gradio space was developed by Haodong LI.")
 
57
  gr.Interface(fn=main, inputs=[gr.Textbox(lines=2, value="A portrait of IRONMAN, white hair, head, photorealistic, 8K, HDR.", label="Your prompt"),
58
  gr.Slider(0, 2000, value=1200, label="Number of iteration"),
59
  gr.Slider(80, 200, value=100, label="CFG"),
@@ -62,4 +75,4 @@ with gr.Blocks() as demo:
62
  examples=example_inputs,
63
  cache_examples=True,
64
  concurrency_limit=1)
65
- demo.launch()
 
30
  gr.Video(value=os.path.join(os.path.dirname(__file__), 'example/flamethrower,_with_fire,_scifi,_cyberpunk,_photorealistic,_8K,_HD.mp4'), autoplay=True)
31
  ]
32
 
33
+ from shap_e.diffusion.sample import sample_latents
34
+ from shap_e.diffusion.gaussian_diffusion import diffusion_from_config as diffusion_from_config_shape
35
+ from shap_e.models.download import load_model, load_config
36
+ from shap_e.util.notebooks import create_pan_cameras, decode_latent_images, gif_widget
37
+ from shap_e.util.notebooks import decode_latent_mesh
38
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
39
+ xm = load_model('transmitter', device=device)
40
+ model = load_model('text300M', device=device)
41
+ # model.load_state_dict(torch.load('./load/shapE_finetuned_with_330kdata.pth', map_location=device)['model_state_dict'])
42
+ diffusion = diffusion_from_config_shape(load_config('diffusion'))
43
 
44
+ del xm
45
+ del model
46
+ del diffusion
47
  def main(prompt, iteration,CFG, seed):
48
  if [prompt] in example_inputs:
49
  return example_outputs_1[example_inputs.index([prompt])]
 
61
  return gr.Video(value=path, autoplay=True)
62
 
63
  with gr.Blocks() as demo:
64
+
65
+ gr.Markdown("# <center>GaussianDreamer: Fast Generation from Text to 3D Gaussians by Bridging 2D and 3D Diffusion Models</center>")
66
+ gr.Markdown("This live demo allows you to generate high-quality 3D content using text prompts. The outputs are 360° rendered 3d video.<br> \
67
+ It is based on Stable Diffusion 2.1-base. Please check out our <strong><a href=https://taoranyi.com/gaussiandreamer/>Project Page</a> / <a href=https://arxiv.org/abs/2310.08529>Paper</a> / <a href=https://github.com/hustvl/GaussianDreamer>Code</a></strong> if you want to learn more about our method!<br> \
68
+ Note that this demo is running on T4, the running time might be longer than the reported 15 minutes (1200 iterations) on RTx 3090.<br> \
69
+ &copy; This Gradio space is developed by Taoran Yi.")
70
  gr.Interface(fn=main, inputs=[gr.Textbox(lines=2, value="A portrait of IRONMAN, white hair, head, photorealistic, 8K, HDR.", label="Your prompt"),
71
  gr.Slider(0, 2000, value=1200, label="Number of iteration"),
72
  gr.Slider(80, 200, value=100, label="CFG"),
 
75
  examples=example_inputs,
76
  cache_examples=True,
77
  concurrency_limit=1)
78
+ demo.launch()