thewhole commited on
Commit
c64ae0b
·
1 Parent(s): df1c43e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -13
app.py CHANGED
@@ -30,20 +30,11 @@ example_outputs_1 = [
30
  gr.Video(value=os.path.join(os.path.dirname(__file__), 'example/flamethrower,_with_fire,_scifi,_cyberpunk,_photorealistic,_8K,_HD.mp4'), autoplay=True)
31
  ]
32
 
33
- from shap_e.diffusion.sample import sample_latents
34
- from shap_e.diffusion.gaussian_diffusion import diffusion_from_config as diffusion_from_config_shape
35
- from shap_e.models.download import load_model, load_config
36
- from shap_e.util.notebooks import create_pan_cameras, decode_latent_images, gif_widget
37
- from shap_e.util.notebooks import decode_latent_mesh
38
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
39
- xm = load_model('transmitter', device=device)
40
- model = load_model('text300M', device=device)
41
- # model.load_state_dict(torch.load('./load/shapE_finetuned_with_330kdata.pth', map_location=device)['model_state_dict'])
42
- diffusion = diffusion_from_config_shape(load_config('diffusion'))
43
 
44
- del xm
45
- del model
46
- del diffusion
 
47
  def main(prompt, iteration,CFG, seed):
48
  if [prompt] in example_inputs:
49
  return example_outputs_1[example_inputs.index([prompt])]
 
30
  gr.Video(value=os.path.join(os.path.dirname(__file__), 'example/flamethrower,_with_fire,_scifi,_cyberpunk,_photorealistic,_8K,_HD.mp4'), autoplay=True)
31
  ]
32
 
 
 
 
 
 
 
 
 
 
 
33
 
34
+ subprocess.run([
35
+ f'python shape.py'],
36
+ shell=True)
37
+
38
  def main(prompt, iteration,CFG, seed):
39
  if [prompt] in example_inputs:
40
  return example_outputs_1[example_inputs.index([prompt])]