Raumkommander commited on
Commit
80a5ad2
·
1 Parent(s): c8ac78e

inital deployment1

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -3,7 +3,7 @@ import cv2
3
  import torch
4
  import numpy as np
5
  from diffusers import StableDiffusionPipeline
6
- from transformers import AutoProcessor, AutoModel
7
  from PIL import Image
8
 
9
  # Load the Real-Time Latent Consistency Model
@@ -13,6 +13,7 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
13
  # Load the model (optimized for inference)
14
  model_id = "https://huggingface.co/spaces/radames/Real-Time-Latent-Consistency-Model"
15
 
 
16
 
17
  realtime_pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
18
  realtime_pipe.to("cuda") # Use GPU for faster inference
@@ -52,4 +53,4 @@ with gr.Blocks() as demo:
52
  prompt_input = gr.Textbox(label="Real-Time Latent Consistency Model Prompt", value="A futuristic landscape")
53
  webcam_feed.change(fn=video_stream, inputs=[prompt_input], outputs=[processed_image, canvas])
54
 
55
- demo.launch()
 
3
  import torch
4
  import numpy as np
5
  from diffusers import StableDiffusionPipeline
6
+ from transformers import AutoProcessor, AutoModel, AutoTokenizer
7
  from PIL import Image
8
 
9
  # Load the Real-Time Latent Consistency Model
 
13
  # Load the model (optimized for inference)
14
  model_id = "https://huggingface.co/spaces/radames/Real-Time-Latent-Consistency-Model"
15
 
16
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
17
 
18
  realtime_pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
19
  realtime_pipe.to("cuda") # Use GPU for faster inference
 
53
  prompt_input = gr.Textbox(label="Real-Time Latent Consistency Model Prompt", value="A futuristic landscape")
54
  webcam_feed.change(fn=video_stream, inputs=[prompt_input], outputs=[processed_image, canvas])
55
 
56
+ demo.launch(share=True)