Anne Marthe Sophie Ngo Bibinbe commited on
Commit
a4c368e
·
1 Parent(s): dbff38f
Files changed (3) hide show
  1. .gradio/certificate.pem +31 -0
  2. app.py +233 -233
  3. requirements.txt +1 -0
.gradio/certificate.pem ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN CERTIFICATE-----
2
+ MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
+ TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
+ cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
+ WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
+ ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
+ MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
+ h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
+ 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
+ A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
+ T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
+ B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
+ B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
+ KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
+ OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
+ jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
+ qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
+ rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
+ HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
+ hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
+ ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
+ 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
+ NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
+ ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
+ TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
+ jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
+ oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
+ 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
+ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
+ emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
+ -----END CERTIFICATE-----
app.py CHANGED
@@ -1,251 +1,251 @@
1
- import gradio as gr
2
- import numpy as np
3
- import random
4
-
5
- # import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline
7
- import torch
8
-
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
11
-
12
- if torch.cuda.is_available():
13
- torch_dtype = torch.float16
14
- else:
15
- torch_dtype = torch.float32
16
-
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
19
-
20
- MAX_SEED = np.iinfo(np.int32).max
21
- MAX_IMAGE_SIZE = 1024
22
-
23
-
24
- # @spaces.GPU #[uncomment to use ZeroGPU]
25
- def infer(
26
- prompt,
27
- negative_prompt,
28
- seed,
29
- randomize_seed,
30
- width,
31
- height,
32
- guidance_scale,
33
- num_inference_steps,
34
- progress=gr.Progress(track_tqdm=True),
35
- ):
36
- if randomize_seed:
37
- seed = random.randint(0, MAX_SEED)
38
-
39
- generator = torch.Generator().manual_seed(seed)
40
-
41
- image = pipe(
42
- prompt=prompt,
43
- negative_prompt=negative_prompt,
44
- guidance_scale=guidance_scale,
45
- num_inference_steps=num_inference_steps,
46
- width=width,
47
- height=height,
48
- generator=generator,
49
- ).images[0]
50
-
51
- return image, seed
52
-
53
-
54
- examples = [
55
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
56
- "An astronaut riding a green horse",
57
- "A delicious ceviche cheesecake slice",
58
- ]
59
-
60
- css = """
61
- #col-container {
62
- margin: 0 auto;
63
- max-width: 640px;
64
- }
65
- """
66
-
67
- with gr.Blocks(css=css) as demo:
68
- with gr.Column(elem_id="col-container"):
69
- gr.Markdown(" # Text-to-Image Gradio Template")
70
-
71
- with gr.Row():
72
- prompt = gr.Text(
73
- label="Prompt",
74
- show_label=False,
75
- max_lines=1,
76
- placeholder="Enter your prompt",
77
- container=False,
78
- )
79
-
80
- run_button = gr.Button("Run", scale=0, variant="primary")
81
-
82
- result = gr.Image(label="Result", show_label=False)
83
-
84
- with gr.Accordion("Advanced Settings", open=False):
85
- negative_prompt = gr.Text(
86
- label="Negative prompt",
87
- max_lines=1,
88
- placeholder="Enter a negative prompt",
89
- visible=False,
90
- )
91
-
92
- seed = gr.Slider(
93
- label="Seed",
94
- minimum=0,
95
- maximum=MAX_SEED,
96
- step=1,
97
- value=0,
98
- )
99
-
100
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
-
102
- with gr.Row():
103
- width = gr.Slider(
104
- label="Width",
105
- minimum=256,
106
- maximum=MAX_IMAGE_SIZE,
107
- step=32,
108
- value=1024, # Replace with defaults that work for your model
109
- )
110
-
111
- height = gr.Slider(
112
- label="Height",
113
- minimum=256,
114
- maximum=MAX_IMAGE_SIZE,
115
- step=32,
116
- value=1024, # Replace with defaults that work for your model
117
- )
118
-
119
- with gr.Row():
120
- guidance_scale = gr.Slider(
121
- label="Guidance scale",
122
- minimum=0.0,
123
- maximum=10.0,
124
- step=0.1,
125
- value=0.0, # Replace with defaults that work for your model
126
- )
127
-
128
- num_inference_steps = gr.Slider(
129
- label="Number of inference steps",
130
- minimum=1,
131
- maximum=50,
132
- step=1,
133
- value=2, # Replace with defaults that work for your model
134
- )
135
-
136
- gr.Examples(examples=examples, inputs=[prompt])
137
- gr.on(
138
- triggers=[run_button.click, prompt.submit],
139
- fn=infer,
140
- inputs=[
141
- prompt,
142
- negative_prompt,
143
- seed,
144
- randomize_seed,
145
- width,
146
- height,
147
- guidance_scale,
148
- num_inference_steps,
149
- ],
150
- outputs=[result, seed],
151
- )
152
 
153
- if __name__ == "__main__":
154
- demo.launch(share=True)
155
 
156
 
157
 
158
- # import gradio as gr
159
- # import shutil
160
- # import os
161
- # import subprocess
162
- # import sys
163
- # # Run the .bat file before launching the app
164
- # """try:
165
- # import PromptTrack
166
- # except ImportError:
167
- # print("PromptTrack not found. Installing...")
168
- # subprocess.run([sys.executable, "-m", "pip", "install",
169
- # "--index-url", "https://test.pypi.org/simple/",
170
- # "--extra-index-url", "https://pypi.org/simple/",
171
- # "PromptTrack"], check=True)
172
- # subprocess.run([sys.executable, "-m", "pip", "install",
173
- # "--no-deps", "bytetracker"], check=True)
174
- # import PromptTrack # Retry import after installation
175
-
176
-
177
- # from PromptTrack import PromptTracker
178
- # tracker = PromptTracker()"""
179
- # def process_video(video_path, prompt):
180
- # detection_threshold=0.3
181
- # track_thresh=0.4
182
- # match_thresh=1
183
- # max_time_lost=float("inf")
184
- # nbr_frames_fixing=800
185
- # output_video = video_path.split('mp4')[0]+"_with_id.mp4" # Placeholder for processed video
186
- # output_file = video_path.split('mp4')[0]+"_mot_.json" # Tracking result
187
- # output_file_2 = video_path.split('mp4')[0]+"_object_detection.json" # detection results
188
- # video_file = video_path
189
- # """tracker.detect_objects(video_file, prompt=prompt, nms_threshold=0.8, detection_threshold=detection_threshold, detector="OWL-VITV2")
190
- # tracker.process_mot(video_file, fixed_parc=True, track_thresh=track_thresh, match_thresh=match_thresh, frame_rate=25, max_time_lost=max_time_lost, nbr_frames_fixing=nbr_frames_fixing)
191
- # tracker.read_video_with_mot(video_file, fps=25)
192
- # """
193
 
194
- # output_video = "output.mp4" # Placeholder for processed video
195
- # output_file = "output.txt" # Placeholder for generated file
196
 
 
 
197
 
198
- # # Copy the input video to simulate processing
199
- # shutil.copy(video_path.name, output_video)
 
200
 
201
- # # Create an output text file with the prompt content
202
- # with open(output_file, "w") as f:
203
- # f.write(f"User Prompt: {prompt}\n")
204
 
205
- # return output_video, output_file
206
 
207
- # # Define Gradio interface
208
- # iface = gr.Interface(
209
- # fn=process_video,
210
- # inputs=[gr.File(label="Upload Video"), gr.Textbox(placeholder="Enter your prompt")],
211
- # outputs=[gr.Video(), gr.File(label="Generated File")],
212
- # title="Video Processing App",
213
- # description="Upload a video and enter a prompt. The app will return the processed video and a generated file."
214
- # )
215
 
216
 
217
- # # Launch the app
218
- # '''if __name__ == "__main__":
219
- # iface.launch()
220
- # '''
221
 
222
 
223
- # import gradio as gr
224
- # import shutil
225
- # import os
 
226
 
227
- # def process_video(video, prompt):
228
- # output_video = "output.mp4" # Placeholder for processed video
229
- # output_file = "output.txt" # Placeholder for generated file
230
 
231
- # # Copy the input video to simulate processing
232
- # shutil.copy(video.name, output_video)
233
 
234
- # # Create an output text file with the prompt content
235
- # with open(output_file, "w") as f:
236
- # f.write(f"User Prompt: {prompt}\n")
237
 
238
- # return output_video, output_file
239
-
240
- # # Define Gradio interface
241
- # iface = gr.Interface(
242
- # fn=process_video,
243
- # inputs=[gr.File(label="Upload Video"), gr.Textbox(placeholder="Enter your prompt")],
244
- # outputs=[gr.Video(), gr.File(label="Generated File")],
245
- # title="Video Processing App",
246
- # description="Upload a video and enter a prompt. The app will return the processed video and a generated file."
247
- # )
248
-
249
- # # Launch the app
250
- # if __name__ == "__main__":
251
- # iface.launch(share=True)
 
1
+ # import gradio as gr
2
+ # import numpy as np
3
+ # import random
4
+
5
+ # # import spaces #[uncomment to use ZeroGPU]
6
+ # from diffusers import DiffusionPipeline
7
+ # import torch
8
+
9
+ # device = "cuda" if torch.cuda.is_available() else "cpu"
10
+ # model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
11
+
12
+ # if torch.cuda.is_available():
13
+ # torch_dtype = torch.float16
14
+ # else:
15
+ # torch_dtype = torch.float32
16
+
17
+ # pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
+ # pipe = pipe.to(device)
19
+
20
+ # MAX_SEED = np.iinfo(np.int32).max
21
+ # MAX_IMAGE_SIZE = 1024
22
+
23
+
24
+ # # @spaces.GPU #[uncomment to use ZeroGPU]
25
+ # def infer(
26
+ # prompt,
27
+ # negative_prompt,
28
+ # seed,
29
+ # randomize_seed,
30
+ # width,
31
+ # height,
32
+ # guidance_scale,
33
+ # num_inference_steps,
34
+ # progress=gr.Progress(track_tqdm=True),
35
+ # ):
36
+ # if randomize_seed:
37
+ # seed = random.randint(0, MAX_SEED)
38
+
39
+ # generator = torch.Generator().manual_seed(seed)
40
+
41
+ # image = pipe(
42
+ # prompt=prompt,
43
+ # negative_prompt=negative_prompt,
44
+ # guidance_scale=guidance_scale,
45
+ # num_inference_steps=num_inference_steps,
46
+ # width=width,
47
+ # height=height,
48
+ # generator=generator,
49
+ # ).images[0]
50
+
51
+ # return image, seed
52
+
53
+
54
+ # examples = [
55
+ # "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
56
+ # "An astronaut riding a green horse",
57
+ # "A delicious ceviche cheesecake slice",
58
+ # ]
59
+
60
+ # css = """
61
+ # #col-container {
62
+ # margin: 0 auto;
63
+ # max-width: 640px;
64
+ # }
65
+ # """
66
+
67
+ # with gr.Blocks(css=css) as demo:
68
+ # with gr.Column(elem_id="col-container"):
69
+ # gr.Markdown(" # Text-to-Image Gradio Template")
70
+
71
+ # with gr.Row():
72
+ # prompt = gr.Text(
73
+ # label="Prompt",
74
+ # show_label=False,
75
+ # max_lines=1,
76
+ # placeholder="Enter your prompt",
77
+ # container=False,
78
+ # )
79
+
80
+ # run_button = gr.Button("Run", scale=0, variant="primary")
81
+
82
+ # result = gr.Image(label="Result", show_label=False)
83
+
84
+ # with gr.Accordion("Advanced Settings", open=False):
85
+ # negative_prompt = gr.Text(
86
+ # label="Negative prompt",
87
+ # max_lines=1,
88
+ # placeholder="Enter a negative prompt",
89
+ # visible=False,
90
+ # )
91
+
92
+ # seed = gr.Slider(
93
+ # label="Seed",
94
+ # minimum=0,
95
+ # maximum=MAX_SEED,
96
+ # step=1,
97
+ # value=0,
98
+ # )
99
+
100
+ # randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
+
102
+ # with gr.Row():
103
+ # width = gr.Slider(
104
+ # label="Width",
105
+ # minimum=256,
106
+ # maximum=MAX_IMAGE_SIZE,
107
+ # step=32,
108
+ # value=1024, # Replace with defaults that work for your model
109
+ # )
110
+
111
+ # height = gr.Slider(
112
+ # label="Height",
113
+ # minimum=256,
114
+ # maximum=MAX_IMAGE_SIZE,
115
+ # step=32,
116
+ # value=1024, # Replace with defaults that work for your model
117
+ # )
118
+
119
+ # with gr.Row():
120
+ # guidance_scale = gr.Slider(
121
+ # label="Guidance scale",
122
+ # minimum=0.0,
123
+ # maximum=10.0,
124
+ # step=0.1,
125
+ # value=0.0, # Replace with defaults that work for your model
126
+ # )
127
+
128
+ # num_inference_steps = gr.Slider(
129
+ # label="Number of inference steps",
130
+ # minimum=1,
131
+ # maximum=50,
132
+ # step=1,
133
+ # value=2, # Replace with defaults that work for your model
134
+ # )
135
+
136
+ # gr.Examples(examples=examples, inputs=[prompt])
137
+ # gr.on(
138
+ # triggers=[run_button.click, prompt.submit],
139
+ # fn=infer,
140
+ # inputs=[
141
+ # prompt,
142
+ # negative_prompt,
143
+ # seed,
144
+ # randomize_seed,
145
+ # width,
146
+ # height,
147
+ # guidance_scale,
148
+ # num_inference_steps,
149
+ # ],
150
+ # outputs=[result, seed],
151
+ # )
152
 
153
+ # if __name__ == "__main__":
154
+ # demo.launch(share=True)
155
 
156
 
157
 
158
+ import gradio as gr
159
+ import shutil
160
+ import os
161
+ import subprocess
162
+ import sys
163
+ # Run the .bat file before launching the app
164
+ try:
165
+ import PromptTrack
166
+ except ImportError:
167
+ print("PromptTrack not found. Installing...")
168
+ subprocess.run([sys.executable, "-m", "pip", "install",
169
+ "--index-url", "https://test.pypi.org/simple/",
170
+ "--extra-index-url", "https://pypi.org/simple/",
171
+ "PromptTrack"], check=True)
172
+ subprocess.run([sys.executable, "-m", "pip", "install",
173
+ "--no-deps", "bytetracker"], check=True)
174
+ import PromptTrack # Retry import after installation
175
+
176
+
177
+ from PromptTrack import PromptTracker
178
+ tracker = PromptTracker()
179
+ def process_video(video_path, prompt):
180
+ detection_threshold=0.3
181
+ track_thresh=0.4
182
+ match_thresh=1
183
+ max_time_lost=float("inf")
184
+ nbr_frames_fixing=800
185
+ output_video = video_path.split('mp4')[0]+"_with_id.mp4" # Placeholder for processed video
186
+ output_file = video_path.split('mp4')[0]+"_mot_.json" # Tracking result
187
+ output_file_2 = video_path.split('mp4')[0]+"_object_detection.json" # detection results
188
+ video_file = video_path
189
+ tracker.detect_objects(video_file, prompt=prompt, nms_threshold=0.8, detection_threshold=detection_threshold, detector="OWL-VITV2")
190
+ tracker.process_mot(video_file, fixed_parc=True, track_thresh=track_thresh, match_thresh=match_thresh, frame_rate=25, max_time_lost=max_time_lost, nbr_frames_fixing=nbr_frames_fixing)
191
+ tracker.read_video_with_mot(video_file, fps=25)
 
192
 
 
 
193
 
194
+ '''output_video = "output.mp4" # Placeholder for processed video
195
+ output_file = "output.txt" # Placeholder for generated file
196
 
197
+ '''
198
+ # Copy the input video to simulate processing
199
+ shutil.copy(video_path.name, output_video)
200
 
201
+ # Create an output text file with the prompt content
202
+ with open(output_file, "w") as f:
203
+ f.write(f"User Prompt: {prompt}\n")
204
 
205
+ return output_video, output_file
206
 
207
+ # Define Gradio interface
208
+ iface = gr.Interface(
209
+ fn=process_video,
210
+ inputs=[gr.File(label="Upload Video"), gr.Textbox(placeholder="Enter your prompt")],
211
+ outputs=[gr.Video(), gr.File(label="Generated File")],
212
+ title="Video Processing App",
213
+ description="Upload a video and enter a prompt. The app will return the processed video and a generated file."
214
+ )
215
 
216
 
217
+ # Launch the app
218
+ if __name__ == "__main__":
219
+ iface.launch()
 
220
 
221
 
222
+ '''
223
+ import gradio as gr
224
+ import shutil
225
+ import os
226
 
227
+ def process_video(video, prompt):
228
+ output_video = "output.mp4" # Placeholder for processed video
229
+ output_file = "output.txt" # Placeholder for generated file
230
 
231
+ # Copy the input video to simulate processing
232
+ shutil.copy(video.name, output_video)
233
 
234
+ # Create an output text file with the prompt content
235
+ with open(output_file, "w") as f:
236
+ f.write(f"User Prompt: {prompt}\n")
237
 
238
+ return output_video, output_file
239
+
240
+ # Define Gradio interface
241
+ iface = gr.Interface(
242
+ fn=process_video,
243
+ inputs=[gr.File(label="Upload Video"), gr.Textbox(placeholder="Enter your prompt")],
244
+ outputs=[gr.Video(), gr.File(label="Generated File")],
245
+ title="Video Processing App",
246
+ description="Upload a video and enter a prompt. The app will return the processed video and a generated file."
247
+ )
248
+
249
+ # Launch the app
250
+ if __name__ == "__main__":
251
+ iface.launch(share=True)'''
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
 
 
2
  accelerate
3
  diffusers
4
  invisible_watermark
 
1
 
2
+ gradio>=4.0.0
3
  accelerate
4
  diffusers
5
  invisible_watermark