samarth-ht commited on
Commit
f0cfabe
·
1 Parent(s): 20f3332

files added

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +45 -0
  2. LICENSE +201 -0
  3. README.md +170 -12
  4. app.py +161 -1
  5. cog.yaml +44 -0
  6. configs/audio.yaml +23 -0
  7. configs/scheduler_config.json +13 -0
  8. configs/syncnet/syncnet_16_latent.yaml +46 -0
  9. configs/syncnet/syncnet_16_pixel.yaml +45 -0
  10. configs/syncnet/syncnet_25_pixel.yaml +45 -0
  11. configs/unet/first_stage.yaml +103 -0
  12. configs/unet/second_stage.yaml +103 -0
  13. data/syncnet_dataset.py +153 -0
  14. data/unet_dataset.py +164 -0
  15. data_processing_pipeline.sh +9 -0
  16. eval/detectors/README.md +3 -0
  17. eval/detectors/__init__.py +1 -0
  18. eval/detectors/s3fd/__init__.py +61 -0
  19. eval/detectors/s3fd/box_utils.py +221 -0
  20. eval/detectors/s3fd/nets.py +174 -0
  21. eval/draw_syncnet_lines.py +70 -0
  22. eval/eval_fvd.py +96 -0
  23. eval/eval_sync_conf.py +77 -0
  24. eval/eval_sync_conf.sh +2 -0
  25. eval/eval_syncnet_acc.py +118 -0
  26. eval/eval_syncnet_acc.sh +3 -0
  27. eval/fvd.py +56 -0
  28. eval/hyper_iqa.py +343 -0
  29. eval/inference_videos.py +37 -0
  30. eval/syncnet/__init__.py +1 -0
  31. eval/syncnet/syncnet.py +113 -0
  32. eval/syncnet/syncnet_eval.py +220 -0
  33. eval/syncnet_detect.py +251 -0
  34. inference.sh +9 -0
  35. pipelines/lipsync_pipeline.py +470 -0
  36. predict.py +60 -0
  37. preprocess/affine_transform.py +137 -0
  38. preprocess/data_processing_pipeline.py +85 -0
  39. preprocess/detect_shot.py +62 -0
  40. preprocess/filter_high_resolution.py +112 -0
  41. preprocess/filter_visual_quality.py +127 -0
  42. preprocess/remove_broken_videos.py +43 -0
  43. preprocess/remove_incorrect_affined.py +81 -0
  44. preprocess/resample_fps_hz.py +70 -0
  45. preprocess/segment_videos.py +62 -0
  46. preprocess/sync_av.py +113 -0
  47. requirements.txt +30 -0
  48. scripts/inference.py +103 -0
  49. scripts/train_syncnet.py +336 -0
  50. scripts/train_unet.py +510 -0
.gitignore ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PyCharm files
2
+ .idea/
3
+
4
+ # macOS dir files
5
+ .DS_Store
6
+
7
+ # VS Code configuration dir
8
+ .vscode/
9
+
10
+ # Jupyter Notebook cache files
11
+ .ipynb_checkpoints/
12
+ *.ipynb
13
+
14
+ # Python cache files
15
+ __pycache__/
16
+
17
+ # folders
18
+ wandb/
19
+ *debug*
20
+ /debug
21
+ /output
22
+ /validation
23
+ /test
24
+ /models/
25
+ /venv/
26
+ /detect_results/
27
+ /temp
28
+
29
+ # checkpoint files
30
+ *.safetensors
31
+ *.ckpt
32
+ *.pt
33
+
34
+ # data files
35
+ *.mp4
36
+ *.avi
37
+ *.wav
38
+ *.png
39
+ *.jpg
40
+ *.jpeg
41
+ *.csv
42
+
43
+ !/latentsync/utils/mask.png
44
+ /checkpoints/
45
+ !/assets/*
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
README.md CHANGED
@@ -1,12 +1,170 @@
1
- ---
2
- title: LatentSync
3
- emoji: 🌍
4
- colorFrom: green
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 5.12.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LatentSync: Audio Conditioned Latent Diffusion Models for Lip Sync
2
+
3
+ <div align="center">
4
+
5
+ [![arXiv](https://img.shields.io/badge/arXiv_paper-2412.09262-b31b1b)](https://arxiv.org/abs/2412.09262)
6
+ [![arXiv](https://img.shields.io/badge/%F0%9F%A4%97%20space-HuggingFace-yellow)](https://huggingface.co/spaces/fffiloni/LatentSync)
7
+ <a href="https://replicate.com/lucataco/latentsync"><img src="https://replicate.com/lucataco/latentsync/badge" alt="Replicate"></a>
8
+
9
+ </div>
10
+
11
+ ## 📖 Abstract
12
+
13
+ We present *LatentSync*, an end-to-end lip sync framework based on audio conditioned latent diffusion models without any intermediate motion representation, diverging from previous diffusion-based lip sync methods based on pixel space diffusion or two-stage generation. Our framework can leverage the powerful capabilities of Stable Diffusion to directly model complex audio-visual correlations. Additionally, we found that the diffusion-based lip sync methods exhibit inferior temporal consistency due to the inconsistency in the diffusion process across different frames. We propose *Temporal REPresentation Alignment (TREPA)* to enhance temporal consistency while preserving lip-sync accuracy. TREPA uses temporal representations extracted by large-scale self-supervised video models to align the generated frames with the ground truth frames.
14
+
15
+ ## 🏗️ Framework
16
+
17
+ <p align="center">
18
+ <img src="assets/framework.png" width=100%>
19
+ <p>
20
+
21
+ LatentSync uses the [Whisper](https://github.com/openai/whisper) to convert melspectrogram into audio embeddings, which are then integrated into the U-Net via cross-attention layers. The reference and masked frames are channel-wise concatenated with noised latents as the input of U-Net. In the training process, we use a one-step method to get estimated clean latents from predicted noises, which are then decoded to obtain the estimated clean frames. The TREPA, [LPIPS](https://arxiv.org/abs/1801.03924) and [SyncNet](https://www.robots.ox.ac.uk/~vgg/publications/2016/Chung16a/chung16a.pdf) losses are added in the pixel space.
22
+
23
+ ## 🎬 Demo
24
+
25
+ <table class="center">
26
+ <tr style="font-weight: bolder;text-align:center;">
27
+ <td width="50%"><b>Original video</b></td>
28
+ <td width="50%"><b>Lip-synced video</b></td>
29
+ </tr>
30
+ <tr>
31
+ <td>
32
+ <video src=https://github.com/user-attachments/assets/ff3a84da-dc9b-498a-950f-5c54f58dd5c5 controls preload></video>
33
+ </td>
34
+ <td>
35
+ <video src=https://github.com/user-attachments/assets/150e00fd-381e-4421-a478-a9ea3d1212a8 controls preload></video>
36
+ </td>
37
+ </tr>
38
+ <tr>
39
+ <td>
40
+ <video src=https://github.com/user-attachments/assets/32c830a9-4d7d-4044-9b33-b184d8e11010 controls preload></video>
41
+ </td>
42
+ <td>
43
+ <video src=https://github.com/user-attachments/assets/84e4fe9d-b108-44a4-8712-13a012348145 controls preload></video>
44
+ </td>
45
+ </tr>
46
+ <tr>
47
+ <td>
48
+ <video src=https://github.com/user-attachments/assets/7510a448-255a-44ee-b093-a1b98bd3961d controls preload></video>
49
+ </td>
50
+ <td>
51
+ <video src=https://github.com/user-attachments/assets/6150c453-c559-4ae0-bb00-c565f135ff41 controls preload></video>
52
+ </td>
53
+ </tr>
54
+ <tr>
55
+ <td width=300px>
56
+ <video src=https://github.com/user-attachments/assets/0f7f9845-68b2-4165-bd08-c7bbe01a0e52 controls preload></video>
57
+ </td>
58
+ <td width=300px>
59
+ <video src=https://github.com/user-attachments/assets/c34fe89d-0c09-4de3-8601-3d01229a69e3 controls preload></video>
60
+ </td>
61
+ </tr>
62
+ <tr>
63
+ <td>
64
+ <video src=https://github.com/user-attachments/assets/7ce04d50-d39f-4154-932a-ec3a590a8f64 controls preload></video>
65
+ </td>
66
+ <td>
67
+ <video src=https://github.com/user-attachments/assets/70bde520-42fa-4a0e-b66c-d3040ae5e065 controls preload></video>
68
+ </td>
69
+ </tr>
70
+ </table>
71
+
72
+ (Photorealistic videos are filmed by contracted models, and anime videos are from [VASA-1](https://www.microsoft.com/en-us/research/project/vasa-1/) and [EMO](https://humanaigc.github.io/emote-portrait-alive/))
73
+
74
+ ## 📑 Open-source Plan
75
+
76
+ - [x] Inference code and checkpoints
77
+ - [x] Data processing pipeline
78
+ - [x] Training code
79
+
80
+ ## 🔧 Setting up the Environment
81
+
82
+ Install the required packages and download the checkpoints via:
83
+
84
+ ```bash
85
+ source setup_env.sh
86
+ ```
87
+
88
+ If the download is successful, the checkpoints should appear as follows:
89
+
90
+ ```
91
+ ./checkpoints/
92
+ |-- latentsync_unet.pt
93
+ |-- latentsync_syncnet.pt
94
+ |-- whisper
95
+ | `-- tiny.pt
96
+ |-- auxiliary
97
+ | |-- 2DFAN4-cd938726ad.zip
98
+ | |-- i3d_torchscript.pt
99
+ | |-- koniq_pretrained.pkl
100
+ | |-- s3fd-619a316812.pth
101
+ | |-- sfd_face.pth
102
+ | |-- syncnet_v2.model
103
+ | |-- vgg16-397923af.pth
104
+ | `-- vit_g_hybrid_pt_1200e_ssv2_ft.pth
105
+ ```
106
+
107
+ These already include all the checkpoints required for latentsync training and inference. If you just want to try inference, you only need to download `latentsync_unet.pt` and `tiny.pt` from our [HuggingFace repo](https://huggingface.co/chunyu-li/LatentSync)
108
+
109
+ ## 🚀 Inference
110
+
111
+ ### 1. Gradio App
112
+
113
+ Run the Gradio app for inference, which requires about 6.5 GB GPU memory.
114
+
115
+ ```bash
116
+ python gradio_app.py
117
+ ```
118
+
119
+ ### 2. Command Line Interface
120
+
121
+ Run the script for inference, which requires about 6.5 GB GPU memory.
122
+
123
+ ```bash
124
+ ./inference.sh
125
+ ```
126
+
127
+ You can change the parameter `guidance_scale` to 1.5 to improve the lip-sync accuracy.
128
+
129
+ ## 🔄 Data Processing Pipeline
130
+
131
+ The complete data processing pipeline includes the following steps:
132
+
133
+ 1. Remove the broken video files.
134
+ 2. Resample the video FPS to 25, and resample the audio to 16000 Hz.
135
+ 3. Scene detect via [PySceneDetect](https://github.com/Breakthrough/PySceneDetect).
136
+ 4. Split each video into 5-10 second segments.
137
+ 5. Remove videos where the face is smaller than 256 $\times$ 256, as well as videos with more than one face.
138
+ 6. Affine transform the faces according to the landmarks detected by [face-alignment](https://github.com/1adrianb/face-alignment), then resize to 256 $\times$ 256.
139
+ 7. Remove videos with [sync confidence score](https://www.robots.ox.ac.uk/~vgg/publications/2016/Chung16a/chung16a.pdf) lower than 3, and adjust the audio-visual offset to 0.
140
+ 8. Calculate [hyperIQA](https://openaccess.thecvf.com/content_CVPR_2020/papers/Su_Blindly_Assess_Image_Quality_in_the_Wild_Guided_by_a_CVPR_2020_paper.pdf) score, and remove videos with scores lower than 40.
141
+
142
+ Run the script to execute the data processing pipeline:
143
+
144
+ ```bash
145
+ ./data_processing_pipeline.sh
146
+ ```
147
+
148
+ You can change the parameter `input_dir` in the script to specify the data directory to be processed. The processed data will be saved in the `high_visual_quality` directory. Each step will generate a new directory to prevent the need to redo the entire pipeline in case the process is interrupted by an unexpected error.
149
+
150
+ ## 🏋️‍♂️ Training U-Net
151
+
152
+ Before training, you must process the data as described above and download all the checkpoints. We released a pretrained SyncNet with 94% accuracy on the VoxCeleb2 dataset for the supervision of U-Net training. Note that this SyncNet is trained on affine transformed videos, so when using or evaluating this SyncNet, you need to perform affine transformation on the video first (the code of affine transformation is included in the data processing pipeline).
153
+
154
+ If all the preparations are complete, you can train the U-Net with the following script:
155
+
156
+ ```bash
157
+ ./train_unet.sh
158
+ ```
159
+
160
+ You should change the parameters in U-Net config file to specify the data directory, checkpoint save path, and other training hyperparameters.
161
+
162
+ ## 🏋️‍♂️ Training SyncNet
163
+
164
+ In case you want to train SyncNet on your own datasets, you can run the following script. The data processing pipeline for SyncNet is the same as U-Net.
165
+
166
+ ```bash
167
+ ./train_syncnet.sh
168
+ ```
169
+
170
+ After `validations_steps` training, the loss charts will be saved in `train_output_dir`. They contain both the training and validation loss.
app.py CHANGED
@@ -1 +1,161 @@
1
- print("hello")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from pathlib import Path
3
+ from scripts.inference import main
4
+ from omegaconf import OmegaConf
5
+ import argparse
6
+ from datetime import datetime
7
+
8
+ CONFIG_PATH = Path("configs/unet/second_stage.yaml")
9
+ CHECKPOINT_PATH = Path("checkpoints/latentsync_unet.pt")
10
+
11
+
12
+ def process_video(
13
+ video_path,
14
+ audio_path,
15
+ guidance_scale,
16
+ inference_steps,
17
+ seed,
18
+ ):
19
+ # Create the temp directory if it doesn't exist
20
+ output_dir = Path("./temp")
21
+ output_dir.mkdir(parents=True, exist_ok=True)
22
+
23
+ # Convert paths to absolute Path objects and normalize them
24
+ video_file_path = Path(video_path)
25
+ video_path = video_file_path.absolute().as_posix()
26
+ audio_path = Path(audio_path).absolute().as_posix()
27
+
28
+ current_time = datetime.now().strftime("%Y%m%d_%H%M%S")
29
+ # Set the output path for the processed video
30
+ output_path = str(
31
+ output_dir / f"{video_file_path.stem}_{current_time}.mp4"
32
+ ) # Change the filename as needed
33
+
34
+ config = OmegaConf.load(CONFIG_PATH)
35
+
36
+ config["run"].update(
37
+ {
38
+ "guidance_scale": guidance_scale,
39
+ "inference_steps": inference_steps,
40
+ }
41
+ )
42
+
43
+ # Parse the arguments
44
+ args = create_args(video_path, audio_path, output_path, guidance_scale, seed)
45
+
46
+ try:
47
+ result = main(
48
+ config=config,
49
+ args=args,
50
+ )
51
+ print("Processing completed successfully.")
52
+ return output_path # Ensure the output path is returned
53
+ except Exception as e:
54
+ print(f"Error during processing: {str(e)}")
55
+ raise gr.Error(f"Error during processing: {str(e)}")
56
+
57
+
58
+ def create_args(
59
+ video_path: str, audio_path: str, output_path: str, guidance_scale: float, seed: int
60
+ ) -> argparse.Namespace:
61
+ parser = argparse.ArgumentParser()
62
+ parser.add_argument("--inference_ckpt_path", type=str, required=True)
63
+ parser.add_argument("--video_path", type=str, required=True)
64
+ parser.add_argument("--audio_path", type=str, required=True)
65
+ parser.add_argument("--video_out_path", type=str, required=True)
66
+ parser.add_argument("--guidance_scale", type=float, default=1.0)
67
+ parser.add_argument("--seed", type=int, default=1247)
68
+
69
+ return parser.parse_args(
70
+ [
71
+ "--inference_ckpt_path",
72
+ CHECKPOINT_PATH.absolute().as_posix(),
73
+ "--video_path",
74
+ video_path,
75
+ "--audio_path",
76
+ audio_path,
77
+ "--video_out_path",
78
+ output_path,
79
+ "--guidance_scale",
80
+ str(guidance_scale),
81
+ "--seed",
82
+ str(seed),
83
+ ]
84
+ )
85
+
86
+
87
+ # Create Gradio interface
88
+ with gr.Blocks(title="LatentSync Video Processing") as demo:
89
+ gr.Markdown(
90
+ """
91
+ # LatentSync: Audio Conditioned Latent Diffusion Models for Lip Sync
92
+ Upload a video and audio file to process with LatentSync model.
93
+
94
+ <div align="center">
95
+ <strong>Chunyu Li1,2 Chao Zhang1 Weikai Xu1 Jinghui Xie1,† Weiguo Feng1
96
+ Bingyue Peng1 Weiwei Xing2,†</strong>
97
+ </div>
98
+
99
+ <div align="center">
100
+ <strong>1ByteDance 2Beijing Jiaotong University</strong>
101
+ </div>
102
+
103
+ <div style="display:flex;justify-content:center;column-gap:4px;">
104
+ <a href="https://github.com/bytedance/LatentSync">
105
+ <img src='https://img.shields.io/badge/GitHub-Repo-blue'>
106
+ </a>
107
+ <a href="https://arxiv.org/pdf/2412.09262">
108
+ <img src='https://img.shields.io/badge/ArXiv-Paper-red'>
109
+ </a>
110
+ </div>
111
+ """
112
+ )
113
+
114
+ with gr.Row():
115
+ with gr.Column():
116
+ video_input = gr.Video(label="Input Video")
117
+ audio_input = gr.Audio(label="Input Audio", type="filepath")
118
+
119
+ with gr.Row():
120
+ guidance_scale = gr.Slider(
121
+ minimum=0.1,
122
+ maximum=3.0,
123
+ value=1.0,
124
+ step=0.1,
125
+ label="Guidance Scale",
126
+ )
127
+ inference_steps = gr.Slider(
128
+ minimum=1, maximum=50, value=20, step=1, label="Inference Steps"
129
+ )
130
+
131
+ with gr.Row():
132
+ seed = gr.Number(value=1247, label="Random Seed", precision=0)
133
+
134
+ process_btn = gr.Button("Process Video")
135
+
136
+ with gr.Column():
137
+ video_output = gr.Video(label="Output Video")
138
+
139
+ gr.Examples(
140
+ examples=[
141
+ ["assets/demo1_video.mp4", "assets/demo1_audio.wav"],
142
+ ["assets/demo2_video.mp4", "assets/demo2_audio.wav"],
143
+ ["assets/demo3_video.mp4", "assets/demo3_audio.wav"],
144
+ ],
145
+ inputs=[video_input, audio_input],
146
+ )
147
+
148
+ process_btn.click(
149
+ fn=process_video,
150
+ inputs=[
151
+ video_input,
152
+ audio_input,
153
+ guidance_scale,
154
+ inference_steps,
155
+ seed,
156
+ ],
157
+ outputs=video_output,
158
+ )
159
+
160
+ if __name__ == "__main__":
161
+ demo.launch(inbrowser=True, share=True)
cog.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configuration for Cog ⚙️
2
+ # Reference: https://cog.run/yaml
3
+
4
+ build:
5
+ gpu: true
6
+ cuda: "12.1"
7
+ system_packages:
8
+ - "ffmpeg"
9
+ - "libgl1"
10
+ python_version: "3.10.13"
11
+ python_packages:
12
+ - "torch==2.2.2"
13
+ - "torchvision"
14
+ - "triton==2.2.0"
15
+ - "diffusers==0.11.1"
16
+ - "transformers==4.38.0"
17
+ - "huggingface-hub==0.25.2"
18
+ - "imageio==2.27.0"
19
+ - "decord==0.6.0"
20
+ - "accelerate==0.26.1"
21
+ - "einops==0.7.0"
22
+ - "omegaconf==2.3.0"
23
+ - "safetensors==0.4.2"
24
+ - "opencv-python==4.9.0.80"
25
+ - "mediapipe==0.10.11"
26
+ - "av==11.0.0"
27
+ - "torch-fidelity==0.3.0"
28
+ - "torchmetrics==1.3.1"
29
+ - "python_speech_features==0.6"
30
+ - "librosa==0.10.1"
31
+ - "scenedetect==0.6.1"
32
+ - "ffmpeg-python==0.2.0"
33
+ - "lpips==0.1.4"
34
+ - "face-alignment==1.4.1"
35
+ - "ninja==1.11.1.1"
36
+ - "pandas==2.0.3"
37
+ - "numpy==1.24.4"
38
+ - "xformers==0.0.26"
39
+
40
+ run:
41
+ - curl -o /usr/local/bin/pget -L "https://github.com/replicate/pget/releases/download/v0.8.2/pget_linux_x86_64" && chmod +x /usr/local/bin/pget
42
+
43
+ # predict.py defines how predictions are run on your model
44
+ predict: "predict.py:Predictor"
configs/audio.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ audio:
2
+ num_mels: 80 # Number of mel-spectrogram channels and local conditioning dimensionality
3
+ rescale: true # Whether to rescale audio prior to preprocessing
4
+ rescaling_max: 0.9 # Rescaling value
5
+ use_lws:
6
+ false # Use LWS (https://github.com/Jonathan-LeRoux/lws) for STFT and phase reconstruction
7
+ # It"s preferred to set True to use with https://github.com/r9y9/wavenet_vocoder
8
+ # Does not work if n_ffit is not multiple of hop_size!!
9
+ n_fft: 800 # Extra window size is filled with 0 paddings to match this parameter
10
+ hop_size: 200 # For 16000Hz, 200 = 12.5 ms (0.0125 * sample_rate)
11
+ win_size: 800 # For 16000Hz, 800 = 50 ms (If None, win_size = n_fft) (0.05 * sample_rate)
12
+ sample_rate: 16000 # 16000Hz (corresponding to librispeech) (sox --i <filename>)
13
+ frame_shift_ms: null
14
+ signal_normalization: true
15
+ allow_clipping_in_normalization: true
16
+ symmetric_mels: true
17
+ max_abs_value: 4.0
18
+ preemphasize: true # whether to apply filter
19
+ preemphasis: 0.97 # filter coefficient.
20
+ min_level_db: -100
21
+ ref_level_db: 20
22
+ fmin: 55
23
+ fmax: 7600
configs/scheduler_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "DDIMScheduler",
3
+ "_diffusers_version": "0.6.0.dev0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "num_train_timesteps": 1000,
9
+ "set_alpha_to_one": false,
10
+ "steps_offset": 1,
11
+ "trained_betas": null,
12
+ "skip_prk_steps": true
13
+ }
configs/syncnet/syncnet_16_latent.yaml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ audio_encoder: # input (1, 80, 52)
3
+ in_channels: 1
4
+ block_out_channels: [32, 64, 128, 256, 512, 1024]
5
+ downsample_factors: [[2, 1], 2, 2, 2, 2, [2, 3]]
6
+ attn_blocks: [0, 0, 0, 0, 0, 0]
7
+ dropout: 0.0
8
+ visual_encoder: # input (64, 32, 32)
9
+ in_channels: 64
10
+ block_out_channels: [64, 128, 256, 256, 512, 1024]
11
+ downsample_factors: [2, 2, 2, 1, 2, 2]
12
+ attn_blocks: [0, 0, 0, 0, 0, 0]
13
+ dropout: 0.0
14
+
15
+ ckpt:
16
+ resume_ckpt_path: ""
17
+ inference_ckpt_path: ""
18
+ save_ckpt_steps: 2500
19
+
20
+ data:
21
+ train_output_dir: output/syncnet
22
+ num_val_samples: 1200
23
+ batch_size: 120 # 40
24
+ num_workers: 11 # 11
25
+ latent_space: true
26
+ num_frames: 16
27
+ resolution: 256
28
+ train_fileslist: ""
29
+ train_data_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/VoxCeleb2/high_visual_quality/train
30
+ val_fileslist: ""
31
+ val_data_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/VoxCeleb2/high_visual_quality/val
32
+ audio_cache_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/audio_cache/mel_new
33
+ lower_half: false
34
+ pretrained_audio_model_path: facebook/wav2vec2-large-xlsr-53
35
+ audio_sample_rate: 16000
36
+ video_fps: 25
37
+
38
+ optimizer:
39
+ lr: 1e-5
40
+ max_grad_norm: 1.0
41
+
42
+ run:
43
+ max_train_steps: 10000000
44
+ validation_steps: 2500
45
+ mixed_precision_training: true
46
+ seed: 42
configs/syncnet/syncnet_16_pixel.yaml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ audio_encoder: # input (1, 80, 52)
3
+ in_channels: 1
4
+ block_out_channels: [32, 64, 128, 256, 512, 1024, 2048]
5
+ downsample_factors: [[2, 1], 2, 2, 1, 2, 2, [2, 3]]
6
+ attn_blocks: [0, 0, 0, 0, 0, 0, 0]
7
+ dropout: 0.0
8
+ visual_encoder: # input (48, 128, 256)
9
+ in_channels: 48
10
+ block_out_channels: [64, 128, 256, 256, 512, 1024, 2048, 2048]
11
+ downsample_factors: [[1, 2], 2, 2, 2, 2, 2, 2, 2]
12
+ attn_blocks: [0, 0, 0, 0, 0, 0, 0, 0]
13
+ dropout: 0.0
14
+
15
+ ckpt:
16
+ resume_ckpt_path: ""
17
+ inference_ckpt_path: checkpoints/latentsync_syncnet.pt
18
+ save_ckpt_steps: 2500
19
+
20
+ data:
21
+ train_output_dir: debug/syncnet
22
+ num_val_samples: 2048
23
+ batch_size: 128 # 128
24
+ num_workers: 11 # 11
25
+ latent_space: false
26
+ num_frames: 16
27
+ resolution: 256
28
+ train_fileslist: /mnt/bn/maliva-gen-ai-v2/chunyu.li/fileslist/all_data_v6.txt
29
+ train_data_dir: ""
30
+ val_fileslist: ""
31
+ val_data_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/VoxCeleb2/high_visual_quality/val
32
+ audio_mel_cache_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/audio_cache/mel_new
33
+ lower_half: true
34
+ audio_sample_rate: 16000
35
+ video_fps: 25
36
+
37
+ optimizer:
38
+ lr: 1e-5
39
+ max_grad_norm: 1.0
40
+
41
+ run:
42
+ max_train_steps: 10000000
43
+ validation_steps: 2500
44
+ mixed_precision_training: true
45
+ seed: 42
configs/syncnet/syncnet_25_pixel.yaml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ audio_encoder: # input (1, 80, 80)
3
+ in_channels: 1
4
+ block_out_channels: [64, 128, 256, 256, 512, 1024]
5
+ downsample_factors: [2, 2, 2, 2, 2, 2]
6
+ dropout: 0.0
7
+ visual_encoder: # input (75, 128, 256)
8
+ in_channels: 75
9
+ block_out_channels: [128, 128, 256, 256, 512, 512, 1024, 1024]
10
+ downsample_factors: [[1, 2], 2, 2, 2, 2, 2, 2, 2]
11
+ dropout: 0.0
12
+
13
+ ckpt:
14
+ resume_ckpt_path: ""
15
+ inference_ckpt_path: ""
16
+ save_ckpt_steps: 2500
17
+
18
+ data:
19
+ train_output_dir: debug/syncnet
20
+ num_val_samples: 2048
21
+ batch_size: 64 # 64
22
+ num_workers: 11 # 11
23
+ latent_space: false
24
+ num_frames: 25
25
+ resolution: 256
26
+ train_fileslist: /mnt/bn/maliva-gen-ai-v2/chunyu.li/fileslist/hdtf_vox_avatars_ads_affine.txt
27
+ # /mnt/bn/maliva-gen-ai-v2/chunyu.li/fileslist/hdtf_voxceleb_avatars_affine.txt
28
+ train_data_dir: ""
29
+ val_fileslist: /mnt/bn/maliva-gen-ai-v2/chunyu.li/fileslist/vox_affine_val.txt
30
+ # /mnt/bn/maliva-gen-ai-v2/chunyu.li/fileslist/voxceleb_val.txt
31
+ val_data_dir: ""
32
+ audio_cache_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/audio_cache/mel
33
+ lower_half: true
34
+ pretrained_audio_model_path: facebook/wav2vec2-large-xlsr-53
35
+ audio_sample_rate: 16000
36
+ video_fps: 25
37
+
38
+ optimizer:
39
+ lr: 1e-5
40
+ max_grad_norm: 1.0
41
+
42
+ run:
43
+ max_train_steps: 10000000
44
+ mixed_precision_training: true
45
+ seed: 42
configs/unet/first_stage.yaml ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ data:
2
+ syncnet_config_path: configs/syncnet/syncnet_16_pixel.yaml
3
+ train_output_dir: debug/unet
4
+ train_fileslist: /mnt/bn/maliva-gen-ai-v2/chunyu.li/fileslist/all_data_v6.txt
5
+ train_data_dir: ""
6
+ audio_embeds_cache_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/audio_cache/whisper_new
7
+ audio_mel_cache_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/audio_cache/mel_new
8
+
9
+ val_video_path: assets/demo1_video.mp4
10
+ val_audio_path: assets/demo1_audio.wav
11
+ batch_size: 8 # 8
12
+ num_workers: 11 # 11
13
+ num_frames: 16
14
+ resolution: 256
15
+ mask: fix_mask
16
+ audio_sample_rate: 16000
17
+ video_fps: 25
18
+
19
+ ckpt:
20
+ resume_ckpt_path: checkpoints/latentsync_unet.pt
21
+ save_ckpt_steps: 5000
22
+
23
+ run:
24
+ pixel_space_supervise: false
25
+ use_syncnet: false
26
+ sync_loss_weight: 0.05 # 1/283
27
+ perceptual_loss_weight: 0.1 # 0.1
28
+ recon_loss_weight: 1 # 1
29
+ guidance_scale: 1.0 # 1.5 or 1.0
30
+ trepa_loss_weight: 10
31
+ inference_steps: 20
32
+ seed: 1247
33
+ use_mixed_noise: true
34
+ mixed_noise_alpha: 1 # 1
35
+ mixed_precision_training: true
36
+ enable_gradient_checkpointing: false
37
+ enable_xformers_memory_efficient_attention: true
38
+ max_train_steps: 10000000
39
+ max_train_epochs: -1
40
+
41
+ optimizer:
42
+ lr: 1e-5
43
+ scale_lr: false
44
+ max_grad_norm: 1.0
45
+ lr_scheduler: constant
46
+ lr_warmup_steps: 0
47
+
48
+ model:
49
+ act_fn: silu
50
+ add_audio_layer: true
51
+ custom_audio_layer: false
52
+ audio_condition_method: cross_attn # Choose between [cross_attn, group_norm]
53
+ attention_head_dim: 8
54
+ block_out_channels: [320, 640, 1280, 1280]
55
+ center_input_sample: false
56
+ cross_attention_dim: 384
57
+ down_block_types:
58
+ [
59
+ "CrossAttnDownBlock3D",
60
+ "CrossAttnDownBlock3D",
61
+ "CrossAttnDownBlock3D",
62
+ "DownBlock3D",
63
+ ]
64
+ mid_block_type: UNetMidBlock3DCrossAttn
65
+ up_block_types:
66
+ [
67
+ "UpBlock3D",
68
+ "CrossAttnUpBlock3D",
69
+ "CrossAttnUpBlock3D",
70
+ "CrossAttnUpBlock3D",
71
+ ]
72
+ downsample_padding: 1
73
+ flip_sin_to_cos: true
74
+ freq_shift: 0
75
+ in_channels: 13 # 49
76
+ layers_per_block: 2
77
+ mid_block_scale_factor: 1
78
+ norm_eps: 1e-5
79
+ norm_num_groups: 32
80
+ out_channels: 4 # 16
81
+ sample_size: 64
82
+ resnet_time_scale_shift: default # Choose between [default, scale_shift]
83
+ unet_use_cross_frame_attention: false
84
+ unet_use_temporal_attention: false
85
+
86
+ # Actually we don't use the motion module in the final version of LatentSync
87
+ # When we started the project, we used the codebase of AnimateDiff and tried motion module, the results are poor
88
+ # We decied to leave the code here for possible future usage
89
+ use_motion_module: false
90
+ motion_module_resolutions: [1, 2, 4, 8]
91
+ motion_module_mid_block: false
92
+ motion_module_decoder_only: false
93
+ motion_module_type: Vanilla
94
+ motion_module_kwargs:
95
+ num_attention_heads: 8
96
+ num_transformer_block: 1
97
+ attention_block_types:
98
+ - Temporal_Self
99
+ - Temporal_Self
100
+ temporal_position_encoding: true
101
+ temporal_position_encoding_max_len: 16
102
+ temporal_attention_dim_div: 1
103
+ zero_initialize: true
configs/unet/second_stage.yaml ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ data:
2
+ syncnet_config_path: configs/syncnet/syncnet_16_pixel.yaml
3
+ train_output_dir: debug/unet
4
+ train_fileslist: /mnt/bn/maliva-gen-ai-v2/chunyu.li/fileslist/all_data_v6.txt
5
+ train_data_dir: ""
6
+ audio_embeds_cache_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/audio_cache/whisper_new
7
+ audio_mel_cache_dir: /mnt/bn/maliva-gen-ai-v2/chunyu.li/audio_cache/mel_new
8
+
9
+ val_video_path: assets/demo1_video.mp4
10
+ val_audio_path: assets/demo1_audio.wav
11
+ batch_size: 2 # 8
12
+ num_workers: 11 # 11
13
+ num_frames: 16
14
+ resolution: 256
15
+ mask: fix_mask
16
+ audio_sample_rate: 16000
17
+ video_fps: 25
18
+
19
+ ckpt:
20
+ resume_ckpt_path: checkpoints/latentsync_unet.pt
21
+ save_ckpt_steps: 5000
22
+
23
+ run:
24
+ pixel_space_supervise: true
25
+ use_syncnet: true
26
+ sync_loss_weight: 0.05 # 1/283
27
+ perceptual_loss_weight: 0.1 # 0.1
28
+ recon_loss_weight: 1 # 1
29
+ guidance_scale: 1.0 # 1.5 or 1.0
30
+ trepa_loss_weight: 10
31
+ inference_steps: 20
32
+ seed: 1247
33
+ use_mixed_noise: true
34
+ mixed_noise_alpha: 1 # 1
35
+ mixed_precision_training: true
36
+ enable_gradient_checkpointing: false
37
+ enable_xformers_memory_efficient_attention: true
38
+ max_train_steps: 10000000
39
+ max_train_epochs: -1
40
+
41
+ optimizer:
42
+ lr: 1e-5
43
+ scale_lr: false
44
+ max_grad_norm: 1.0
45
+ lr_scheduler: constant
46
+ lr_warmup_steps: 0
47
+
48
+ model:
49
+ act_fn: silu
50
+ add_audio_layer: true
51
+ custom_audio_layer: false
52
+ audio_condition_method: cross_attn # Choose between [cross_attn, group_norm]
53
+ attention_head_dim: 8
54
+ block_out_channels: [320, 640, 1280, 1280]
55
+ center_input_sample: false
56
+ cross_attention_dim: 384
57
+ down_block_types:
58
+ [
59
+ "CrossAttnDownBlock3D",
60
+ "CrossAttnDownBlock3D",
61
+ "CrossAttnDownBlock3D",
62
+ "DownBlock3D",
63
+ ]
64
+ mid_block_type: UNetMidBlock3DCrossAttn
65
+ up_block_types:
66
+ [
67
+ "UpBlock3D",
68
+ "CrossAttnUpBlock3D",
69
+ "CrossAttnUpBlock3D",
70
+ "CrossAttnUpBlock3D",
71
+ ]
72
+ downsample_padding: 1
73
+ flip_sin_to_cos: true
74
+ freq_shift: 0
75
+ in_channels: 13 # 49
76
+ layers_per_block: 2
77
+ mid_block_scale_factor: 1
78
+ norm_eps: 1e-5
79
+ norm_num_groups: 32
80
+ out_channels: 4 # 16
81
+ sample_size: 64
82
+ resnet_time_scale_shift: default # Choose between [default, scale_shift]
83
+ unet_use_cross_frame_attention: false
84
+ unet_use_temporal_attention: false
85
+
86
+ # Actually we don't use the motion module in the final version of LatentSync
87
+ # When we started the project, we used the codebase of AnimateDiff and tried motion module, the results are poor
88
+ # We decied to leave the code here for possible future usage
89
+ use_motion_module: false
90
+ motion_module_resolutions: [1, 2, 4, 8]
91
+ motion_module_mid_block: false
92
+ motion_module_decoder_only: false
93
+ motion_module_type: Vanilla
94
+ motion_module_kwargs:
95
+ num_attention_heads: 8
96
+ num_transformer_block: 1
97
+ attention_block_types:
98
+ - Temporal_Self
99
+ - Temporal_Self
100
+ temporal_position_encoding: true
101
+ temporal_position_encoding_max_len: 16
102
+ temporal_attention_dim_div: 1
103
+ zero_initialize: true
data/syncnet_dataset.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import numpy as np
17
+ from torch.utils.data import Dataset
18
+ import torch
19
+ import random
20
+ from ..utils.util import gather_video_paths_recursively
21
+ from ..utils.image_processor import ImageProcessor
22
+ from ..utils.audio import melspectrogram
23
+ import math
24
+
25
+ from decord import AudioReader, VideoReader, cpu
26
+
27
+
28
+ class SyncNetDataset(Dataset):
29
+ def __init__(self, data_dir: str, fileslist: str, config):
30
+ if fileslist != "":
31
+ with open(fileslist) as file:
32
+ self.video_paths = [line.rstrip() for line in file]
33
+ elif data_dir != "":
34
+ self.video_paths = gather_video_paths_recursively(data_dir)
35
+ else:
36
+ raise ValueError("data_dir and fileslist cannot be both empty")
37
+
38
+ self.resolution = config.data.resolution
39
+ self.num_frames = config.data.num_frames
40
+
41
+ self.mel_window_length = math.ceil(self.num_frames / 5 * 16)
42
+
43
+ self.audio_sample_rate = config.data.audio_sample_rate
44
+ self.video_fps = config.data.video_fps
45
+ self.audio_samples_length = int(
46
+ config.data.audio_sample_rate // config.data.video_fps * config.data.num_frames
47
+ )
48
+ self.image_processor = ImageProcessor(resolution=config.data.resolution, mask="half")
49
+ self.audio_mel_cache_dir = config.data.audio_mel_cache_dir
50
+ os.makedirs(self.audio_mel_cache_dir, exist_ok=True)
51
+
52
+ def __len__(self):
53
+ return len(self.video_paths)
54
+
55
+ def read_audio(self, video_path: str):
56
+ ar = AudioReader(video_path, ctx=cpu(self.worker_id), sample_rate=self.audio_sample_rate)
57
+ original_mel = melspectrogram(ar[:].asnumpy().squeeze(0))
58
+ return torch.from_numpy(original_mel)
59
+
60
+ def crop_audio_window(self, original_mel, start_index):
61
+ start_idx = int(80.0 * (start_index / float(self.video_fps)))
62
+ end_idx = start_idx + self.mel_window_length
63
+ return original_mel[:, start_idx:end_idx].unsqueeze(0)
64
+
65
+ def get_frames(self, video_reader: VideoReader):
66
+ total_num_frames = len(video_reader)
67
+
68
+ start_idx = random.randint(0, total_num_frames - self.num_frames)
69
+ frames_index = np.arange(start_idx, start_idx + self.num_frames, dtype=int)
70
+
71
+ while True:
72
+ wrong_start_idx = random.randint(0, total_num_frames - self.num_frames)
73
+ # wrong_start_idx = random.randint(
74
+ # max(0, start_idx - 25), min(total_num_frames - self.num_frames, start_idx + 25)
75
+ # )
76
+ if wrong_start_idx == start_idx:
77
+ continue
78
+ # if wrong_start_idx >= start_idx - self.num_frames and wrong_start_idx <= start_idx + self.num_frames:
79
+ # continue
80
+ wrong_frames_index = np.arange(wrong_start_idx, wrong_start_idx + self.num_frames, dtype=int)
81
+ break
82
+
83
+ frames = video_reader.get_batch(frames_index).asnumpy()
84
+ wrong_frames = video_reader.get_batch(wrong_frames_index).asnumpy()
85
+
86
+ return frames, wrong_frames, start_idx
87
+
88
+ def worker_init_fn(self, worker_id):
89
+ # Initialize the face mesh object in each worker process,
90
+ # because the face mesh object cannot be called in subprocesses
91
+ self.worker_id = worker_id
92
+ # setattr(self, f"image_processor_{worker_id}", ImageProcessor(self.resolution, self.mask))
93
+
94
+ def __getitem__(self, idx):
95
+ # image_processor = getattr(self, f"image_processor_{self.worker_id}")
96
+ while True:
97
+ try:
98
+ idx = random.randint(0, len(self) - 1)
99
+
100
+ # Get video file path
101
+ video_path = self.video_paths[idx]
102
+
103
+ vr = VideoReader(video_path, ctx=cpu(self.worker_id))
104
+
105
+ if len(vr) < 2 * self.num_frames:
106
+ continue
107
+
108
+ frames, wrong_frames, start_idx = self.get_frames(vr)
109
+
110
+ mel_cache_path = os.path.join(
111
+ self.audio_mel_cache_dir, os.path.basename(video_path).replace(".mp4", "_mel.pt")
112
+ )
113
+
114
+ if os.path.isfile(mel_cache_path):
115
+ try:
116
+ original_mel = torch.load(mel_cache_path)
117
+ except Exception as e:
118
+ print(f"{type(e).__name__} - {e} - {mel_cache_path}")
119
+ os.remove(mel_cache_path)
120
+ original_mel = self.read_audio(video_path)
121
+ torch.save(original_mel, mel_cache_path)
122
+ else:
123
+ original_mel = self.read_audio(video_path)
124
+ torch.save(original_mel, mel_cache_path)
125
+
126
+ mel = self.crop_audio_window(original_mel, start_idx)
127
+
128
+ if mel.shape[-1] != self.mel_window_length:
129
+ continue
130
+
131
+ if random.choice([True, False]):
132
+ y = torch.ones(1).float()
133
+ chosen_frames = frames
134
+ else:
135
+ y = torch.zeros(1).float()
136
+ chosen_frames = wrong_frames
137
+
138
+ chosen_frames = self.image_processor.process_images(chosen_frames)
139
+ # chosen_frames, _, _ = image_processor.prepare_masks_and_masked_images(
140
+ # chosen_frames, affine_transform=True
141
+ # )
142
+
143
+ vr.seek(0) # avoid memory leak
144
+ break
145
+
146
+ except Exception as e: # Handle the exception of face not detcted
147
+ print(f"{type(e).__name__} - {e} - {video_path}")
148
+ if "vr" in locals():
149
+ vr.seek(0) # avoid memory leak
150
+
151
+ sample = dict(frames=chosen_frames, audio_samples=mel, y=y)
152
+
153
+ return sample
data/unet_dataset.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import numpy as np
17
+ from torch.utils.data import Dataset
18
+ import torch
19
+ import random
20
+ import cv2
21
+ from ..utils.image_processor import ImageProcessor, load_fixed_mask
22
+ from ..utils.audio import melspectrogram
23
+ from decord import AudioReader, VideoReader, cpu
24
+
25
+
26
+ class UNetDataset(Dataset):
27
+ def __init__(self, train_data_dir: str, config):
28
+ if config.data.train_fileslist != "":
29
+ with open(config.data.train_fileslist) as file:
30
+ self.video_paths = [line.rstrip() for line in file]
31
+ elif train_data_dir != "":
32
+ self.video_paths = []
33
+ for file in os.listdir(train_data_dir):
34
+ if file.endswith(".mp4"):
35
+ self.video_paths.append(os.path.join(train_data_dir, file))
36
+ else:
37
+ raise ValueError("data_dir and fileslist cannot be both empty")
38
+
39
+ self.resolution = config.data.resolution
40
+ self.num_frames = config.data.num_frames
41
+
42
+ if self.num_frames == 16:
43
+ self.mel_window_length = 52
44
+ elif self.num_frames == 5:
45
+ self.mel_window_length = 16
46
+ else:
47
+ raise NotImplementedError("Only support 16 and 5 frames now")
48
+
49
+ self.audio_sample_rate = config.data.audio_sample_rate
50
+ self.video_fps = config.data.video_fps
51
+ self.mask = config.data.mask
52
+ self.mask_image = load_fixed_mask(self.resolution)
53
+ self.load_audio_data = config.model.add_audio_layer and config.run.use_syncnet
54
+ self.audio_mel_cache_dir = config.data.audio_mel_cache_dir
55
+ os.makedirs(self.audio_mel_cache_dir, exist_ok=True)
56
+
57
+ def __len__(self):
58
+ return len(self.video_paths)
59
+
60
+ def read_audio(self, video_path: str):
61
+ ar = AudioReader(video_path, ctx=cpu(self.worker_id), sample_rate=self.audio_sample_rate)
62
+ original_mel = melspectrogram(ar[:].asnumpy().squeeze(0))
63
+ return torch.from_numpy(original_mel)
64
+
65
+ def crop_audio_window(self, original_mel, start_index):
66
+ start_idx = int(80.0 * (start_index / float(self.video_fps)))
67
+ end_idx = start_idx + self.mel_window_length
68
+ return original_mel[:, start_idx:end_idx].unsqueeze(0)
69
+
70
+ def get_frames(self, video_reader: VideoReader):
71
+ total_num_frames = len(video_reader)
72
+
73
+ start_idx = random.randint(self.num_frames // 2, total_num_frames - self.num_frames - self.num_frames // 2)
74
+ frames_index = np.arange(start_idx, start_idx + self.num_frames, dtype=int)
75
+
76
+ while True:
77
+ wrong_start_idx = random.randint(0, total_num_frames - self.num_frames)
78
+ if wrong_start_idx > start_idx - self.num_frames and wrong_start_idx < start_idx + self.num_frames:
79
+ continue
80
+ wrong_frames_index = np.arange(wrong_start_idx, wrong_start_idx + self.num_frames, dtype=int)
81
+ break
82
+
83
+ frames = video_reader.get_batch(frames_index).asnumpy()
84
+ wrong_frames = video_reader.get_batch(wrong_frames_index).asnumpy()
85
+
86
+ return frames, wrong_frames, start_idx
87
+
88
+ def worker_init_fn(self, worker_id):
89
+ # Initialize the face mesh object in each worker process,
90
+ # because the face mesh object cannot be called in subprocesses
91
+ self.worker_id = worker_id
92
+ setattr(
93
+ self,
94
+ f"image_processor_{worker_id}",
95
+ ImageProcessor(self.resolution, self.mask, mask_image=self.mask_image),
96
+ )
97
+
98
+ def __getitem__(self, idx):
99
+ image_processor = getattr(self, f"image_processor_{self.worker_id}")
100
+ while True:
101
+ try:
102
+ idx = random.randint(0, len(self) - 1)
103
+
104
+ # Get video file path
105
+ video_path = self.video_paths[idx]
106
+
107
+ vr = VideoReader(video_path, ctx=cpu(self.worker_id))
108
+
109
+ if len(vr) < 3 * self.num_frames:
110
+ continue
111
+
112
+ continuous_frames, ref_frames, start_idx = self.get_frames(vr)
113
+
114
+ if self.load_audio_data:
115
+ mel_cache_path = os.path.join(
116
+ self.audio_mel_cache_dir, os.path.basename(video_path).replace(".mp4", "_mel.pt")
117
+ )
118
+
119
+ if os.path.isfile(mel_cache_path):
120
+ try:
121
+ original_mel = torch.load(mel_cache_path)
122
+ except Exception as e:
123
+ print(f"{type(e).__name__} - {e} - {mel_cache_path}")
124
+ os.remove(mel_cache_path)
125
+ original_mel = self.read_audio(video_path)
126
+ torch.save(original_mel, mel_cache_path)
127
+ else:
128
+ original_mel = self.read_audio(video_path)
129
+ torch.save(original_mel, mel_cache_path)
130
+
131
+ mel = self.crop_audio_window(original_mel, start_idx)
132
+
133
+ if mel.shape[-1] != self.mel_window_length:
134
+ continue
135
+ else:
136
+ mel = []
137
+
138
+ gt, masked_gt, mask = image_processor.prepare_masks_and_masked_images(
139
+ continuous_frames, affine_transform=False
140
+ )
141
+
142
+ if self.mask == "fix_mask":
143
+ ref, _, _ = image_processor.prepare_masks_and_masked_images(ref_frames, affine_transform=False)
144
+ else:
145
+ ref = image_processor.process_images(ref_frames)
146
+ vr.seek(0) # avoid memory leak
147
+ break
148
+
149
+ except Exception as e: # Handle the exception of face not detcted
150
+ print(f"{type(e).__name__} - {e} - {video_path}")
151
+ if "vr" in locals():
152
+ vr.seek(0) # avoid memory leak
153
+
154
+ sample = dict(
155
+ gt=gt,
156
+ masked_gt=masked_gt,
157
+ ref=ref,
158
+ mel=mel,
159
+ mask=mask,
160
+ video_path=video_path,
161
+ start_idx=start_idx,
162
+ )
163
+
164
+ return sample
data_processing_pipeline.sh ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ python -m preprocess.data_processing_pipeline \
4
+ --total_num_workers 20 \
5
+ --per_gpu_num_workers 10 \
6
+ --resolution 256 \
7
+ --sync_conf_threshold 3 \
8
+ --temp_dir temp \
9
+ --input_dir /mnt/bn/maliva-gen-ai-v2/chunyu.li/VoxCeleb2/raw
eval/detectors/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Face detector
2
+
3
+ This face detector is adapted from `https://github.com/cs-giung/face-detection-pytorch`.
eval/detectors/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .s3fd import S3FD
eval/detectors/s3fd/__init__.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import numpy as np
3
+ import cv2
4
+ import torch
5
+ from torchvision import transforms
6
+ from .nets import S3FDNet
7
+ from .box_utils import nms_
8
+
9
+ PATH_WEIGHT = 'checkpoints/auxiliary/sfd_face.pth'
10
+ img_mean = np.array([104., 117., 123.])[:, np.newaxis, np.newaxis].astype('float32')
11
+
12
+
13
+ class S3FD():
14
+
15
+ def __init__(self, device='cuda'):
16
+
17
+ tstamp = time.time()
18
+ self.device = device
19
+
20
+ print('[S3FD] loading with', self.device)
21
+ self.net = S3FDNet(device=self.device).to(self.device)
22
+ state_dict = torch.load(PATH_WEIGHT, map_location=self.device)
23
+ self.net.load_state_dict(state_dict)
24
+ self.net.eval()
25
+ print('[S3FD] finished loading (%.4f sec)' % (time.time() - tstamp))
26
+
27
+ def detect_faces(self, image, conf_th=0.8, scales=[1]):
28
+
29
+ w, h = image.shape[1], image.shape[0]
30
+
31
+ bboxes = np.empty(shape=(0, 5))
32
+
33
+ with torch.no_grad():
34
+ for s in scales:
35
+ scaled_img = cv2.resize(image, dsize=(0, 0), fx=s, fy=s, interpolation=cv2.INTER_LINEAR)
36
+
37
+ scaled_img = np.swapaxes(scaled_img, 1, 2)
38
+ scaled_img = np.swapaxes(scaled_img, 1, 0)
39
+ scaled_img = scaled_img[[2, 1, 0], :, :]
40
+ scaled_img = scaled_img.astype('float32')
41
+ scaled_img -= img_mean
42
+ scaled_img = scaled_img[[2, 1, 0], :, :]
43
+ x = torch.from_numpy(scaled_img).unsqueeze(0).to(self.device)
44
+ y = self.net(x)
45
+
46
+ detections = y.data
47
+ scale = torch.Tensor([w, h, w, h])
48
+
49
+ for i in range(detections.size(1)):
50
+ j = 0
51
+ while detections[0, i, j, 0] > conf_th:
52
+ score = detections[0, i, j, 0]
53
+ pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
54
+ bbox = (pt[0], pt[1], pt[2], pt[3], score)
55
+ bboxes = np.vstack((bboxes, bbox))
56
+ j += 1
57
+
58
+ keep = nms_(bboxes, 0.1)
59
+ bboxes = bboxes[keep]
60
+
61
+ return bboxes
eval/detectors/s3fd/box_utils.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from itertools import product as product
3
+ import torch
4
+ from torch.autograd import Function
5
+ import warnings
6
+
7
+
8
+ def nms_(dets, thresh):
9
+ """
10
+ Courtesy of Ross Girshick
11
+ [https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/py_cpu_nms.py]
12
+ """
13
+ x1 = dets[:, 0]
14
+ y1 = dets[:, 1]
15
+ x2 = dets[:, 2]
16
+ y2 = dets[:, 3]
17
+ scores = dets[:, 4]
18
+
19
+ areas = (x2 - x1) * (y2 - y1)
20
+ order = scores.argsort()[::-1]
21
+
22
+ keep = []
23
+ while order.size > 0:
24
+ i = order[0]
25
+ keep.append(int(i))
26
+ xx1 = np.maximum(x1[i], x1[order[1:]])
27
+ yy1 = np.maximum(y1[i], y1[order[1:]])
28
+ xx2 = np.minimum(x2[i], x2[order[1:]])
29
+ yy2 = np.minimum(y2[i], y2[order[1:]])
30
+
31
+ w = np.maximum(0.0, xx2 - xx1)
32
+ h = np.maximum(0.0, yy2 - yy1)
33
+ inter = w * h
34
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
35
+
36
+ inds = np.where(ovr <= thresh)[0]
37
+ order = order[inds + 1]
38
+
39
+ return np.array(keep).astype(np.int32)
40
+
41
+
42
+ def decode(loc, priors, variances):
43
+ """Decode locations from predictions using priors to undo
44
+ the encoding we did for offset regression at train time.
45
+ Args:
46
+ loc (tensor): location predictions for loc layers,
47
+ Shape: [num_priors,4]
48
+ priors (tensor): Prior boxes in center-offset form.
49
+ Shape: [num_priors,4].
50
+ variances: (list[float]) Variances of priorboxes
51
+ Return:
52
+ decoded bounding box predictions
53
+ """
54
+
55
+ boxes = torch.cat((
56
+ priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
57
+ priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
58
+ boxes[:, :2] -= boxes[:, 2:] / 2
59
+ boxes[:, 2:] += boxes[:, :2]
60
+ return boxes
61
+
62
+
63
+ def nms(boxes, scores, overlap=0.5, top_k=200):
64
+ """Apply non-maximum suppression at test time to avoid detecting too many
65
+ overlapping bounding boxes for a given object.
66
+ Args:
67
+ boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
68
+ scores: (tensor) The class predscores for the img, Shape:[num_priors].
69
+ overlap: (float) The overlap thresh for suppressing unnecessary boxes.
70
+ top_k: (int) The Maximum number of box preds to consider.
71
+ Return:
72
+ The indices of the kept boxes with respect to num_priors.
73
+ """
74
+
75
+ keep = scores.new(scores.size(0)).zero_().long()
76
+ if boxes.numel() == 0:
77
+ return keep, 0
78
+ x1 = boxes[:, 0]
79
+ y1 = boxes[:, 1]
80
+ x2 = boxes[:, 2]
81
+ y2 = boxes[:, 3]
82
+ area = torch.mul(x2 - x1, y2 - y1)
83
+ v, idx = scores.sort(0) # sort in ascending order
84
+ # I = I[v >= 0.01]
85
+ idx = idx[-top_k:] # indices of the top-k largest vals
86
+ xx1 = boxes.new()
87
+ yy1 = boxes.new()
88
+ xx2 = boxes.new()
89
+ yy2 = boxes.new()
90
+ w = boxes.new()
91
+ h = boxes.new()
92
+
93
+ # keep = torch.Tensor()
94
+ count = 0
95
+ while idx.numel() > 0:
96
+ i = idx[-1] # index of current largest val
97
+ # keep.append(i)
98
+ keep[count] = i
99
+ count += 1
100
+ if idx.size(0) == 1:
101
+ break
102
+ idx = idx[:-1] # remove kept element from view
103
+ # load bboxes of next highest vals
104
+ with warnings.catch_warnings():
105
+ # Ignore UserWarning within this block
106
+ warnings.simplefilter("ignore", category=UserWarning)
107
+ torch.index_select(x1, 0, idx, out=xx1)
108
+ torch.index_select(y1, 0, idx, out=yy1)
109
+ torch.index_select(x2, 0, idx, out=xx2)
110
+ torch.index_select(y2, 0, idx, out=yy2)
111
+ # store element-wise max with next highest score
112
+ xx1 = torch.clamp(xx1, min=x1[i])
113
+ yy1 = torch.clamp(yy1, min=y1[i])
114
+ xx2 = torch.clamp(xx2, max=x2[i])
115
+ yy2 = torch.clamp(yy2, max=y2[i])
116
+ w.resize_as_(xx2)
117
+ h.resize_as_(yy2)
118
+ w = xx2 - xx1
119
+ h = yy2 - yy1
120
+ # check sizes of xx1 and xx2.. after each iteration
121
+ w = torch.clamp(w, min=0.0)
122
+ h = torch.clamp(h, min=0.0)
123
+ inter = w * h
124
+ # IoU = i / (area(a) + area(b) - i)
125
+ rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
126
+ union = (rem_areas - inter) + area[i]
127
+ IoU = inter / union # store result in iou
128
+ # keep only elements with an IoU <= overlap
129
+ idx = idx[IoU.le(overlap)]
130
+ return keep, count
131
+
132
+
133
+ class Detect(object):
134
+
135
+ def __init__(self, num_classes=2,
136
+ top_k=750, nms_thresh=0.3, conf_thresh=0.05,
137
+ variance=[0.1, 0.2], nms_top_k=5000):
138
+
139
+ self.num_classes = num_classes
140
+ self.top_k = top_k
141
+ self.nms_thresh = nms_thresh
142
+ self.conf_thresh = conf_thresh
143
+ self.variance = variance
144
+ self.nms_top_k = nms_top_k
145
+
146
+ def forward(self, loc_data, conf_data, prior_data):
147
+
148
+ num = loc_data.size(0)
149
+ num_priors = prior_data.size(0)
150
+
151
+ conf_preds = conf_data.view(num, num_priors, self.num_classes).transpose(2, 1)
152
+ batch_priors = prior_data.view(-1, num_priors, 4).expand(num, num_priors, 4)
153
+ batch_priors = batch_priors.contiguous().view(-1, 4)
154
+
155
+ decoded_boxes = decode(loc_data.view(-1, 4), batch_priors, self.variance)
156
+ decoded_boxes = decoded_boxes.view(num, num_priors, 4)
157
+
158
+ output = torch.zeros(num, self.num_classes, self.top_k, 5)
159
+
160
+ for i in range(num):
161
+ boxes = decoded_boxes[i].clone()
162
+ conf_scores = conf_preds[i].clone()
163
+
164
+ for cl in range(1, self.num_classes):
165
+ c_mask = conf_scores[cl].gt(self.conf_thresh)
166
+ scores = conf_scores[cl][c_mask]
167
+
168
+ if scores.dim() == 0:
169
+ continue
170
+ l_mask = c_mask.unsqueeze(1).expand_as(boxes)
171
+ boxes_ = boxes[l_mask].view(-1, 4)
172
+ ids, count = nms(boxes_, scores, self.nms_thresh, self.nms_top_k)
173
+ count = count if count < self.top_k else self.top_k
174
+
175
+ output[i, cl, :count] = torch.cat((scores[ids[:count]].unsqueeze(1), boxes_[ids[:count]]), 1)
176
+
177
+ return output
178
+
179
+
180
+ class PriorBox(object):
181
+
182
+ def __init__(self, input_size, feature_maps,
183
+ variance=[0.1, 0.2],
184
+ min_sizes=[16, 32, 64, 128, 256, 512],
185
+ steps=[4, 8, 16, 32, 64, 128],
186
+ clip=False):
187
+
188
+ super(PriorBox, self).__init__()
189
+
190
+ self.imh = input_size[0]
191
+ self.imw = input_size[1]
192
+ self.feature_maps = feature_maps
193
+
194
+ self.variance = variance
195
+ self.min_sizes = min_sizes
196
+ self.steps = steps
197
+ self.clip = clip
198
+
199
+ def forward(self):
200
+ mean = []
201
+ for k, fmap in enumerate(self.feature_maps):
202
+ feath = fmap[0]
203
+ featw = fmap[1]
204
+ for i, j in product(range(feath), range(featw)):
205
+ f_kw = self.imw / self.steps[k]
206
+ f_kh = self.imh / self.steps[k]
207
+
208
+ cx = (j + 0.5) / f_kw
209
+ cy = (i + 0.5) / f_kh
210
+
211
+ s_kw = self.min_sizes[k] / self.imw
212
+ s_kh = self.min_sizes[k] / self.imh
213
+
214
+ mean += [cx, cy, s_kw, s_kh]
215
+
216
+ output = torch.FloatTensor(mean).view(-1, 4)
217
+
218
+ if self.clip:
219
+ output.clamp_(max=1, min=0)
220
+
221
+ return output
eval/detectors/s3fd/nets.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import torch.nn.init as init
5
+ from .box_utils import Detect, PriorBox
6
+
7
+
8
+ class L2Norm(nn.Module):
9
+
10
+ def __init__(self, n_channels, scale):
11
+ super(L2Norm, self).__init__()
12
+ self.n_channels = n_channels
13
+ self.gamma = scale or None
14
+ self.eps = 1e-10
15
+ self.weight = nn.Parameter(torch.Tensor(self.n_channels))
16
+ self.reset_parameters()
17
+
18
+ def reset_parameters(self):
19
+ init.constant_(self.weight, self.gamma)
20
+
21
+ def forward(self, x):
22
+ norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
23
+ x = torch.div(x, norm)
24
+ out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x) * x
25
+ return out
26
+
27
+
28
+ class S3FDNet(nn.Module):
29
+
30
+ def __init__(self, device='cuda'):
31
+ super(S3FDNet, self).__init__()
32
+ self.device = device
33
+
34
+ self.vgg = nn.ModuleList([
35
+ nn.Conv2d(3, 64, 3, 1, padding=1),
36
+ nn.ReLU(inplace=True),
37
+ nn.Conv2d(64, 64, 3, 1, padding=1),
38
+ nn.ReLU(inplace=True),
39
+ nn.MaxPool2d(2, 2),
40
+
41
+ nn.Conv2d(64, 128, 3, 1, padding=1),
42
+ nn.ReLU(inplace=True),
43
+ nn.Conv2d(128, 128, 3, 1, padding=1),
44
+ nn.ReLU(inplace=True),
45
+ nn.MaxPool2d(2, 2),
46
+
47
+ nn.Conv2d(128, 256, 3, 1, padding=1),
48
+ nn.ReLU(inplace=True),
49
+ nn.Conv2d(256, 256, 3, 1, padding=1),
50
+ nn.ReLU(inplace=True),
51
+ nn.Conv2d(256, 256, 3, 1, padding=1),
52
+ nn.ReLU(inplace=True),
53
+ nn.MaxPool2d(2, 2, ceil_mode=True),
54
+
55
+ nn.Conv2d(256, 512, 3, 1, padding=1),
56
+ nn.ReLU(inplace=True),
57
+ nn.Conv2d(512, 512, 3, 1, padding=1),
58
+ nn.ReLU(inplace=True),
59
+ nn.Conv2d(512, 512, 3, 1, padding=1),
60
+ nn.ReLU(inplace=True),
61
+ nn.MaxPool2d(2, 2),
62
+
63
+ nn.Conv2d(512, 512, 3, 1, padding=1),
64
+ nn.ReLU(inplace=True),
65
+ nn.Conv2d(512, 512, 3, 1, padding=1),
66
+ nn.ReLU(inplace=True),
67
+ nn.Conv2d(512, 512, 3, 1, padding=1),
68
+ nn.ReLU(inplace=True),
69
+ nn.MaxPool2d(2, 2),
70
+
71
+ nn.Conv2d(512, 1024, 3, 1, padding=6, dilation=6),
72
+ nn.ReLU(inplace=True),
73
+ nn.Conv2d(1024, 1024, 1, 1),
74
+ nn.ReLU(inplace=True),
75
+ ])
76
+
77
+ self.L2Norm3_3 = L2Norm(256, 10)
78
+ self.L2Norm4_3 = L2Norm(512, 8)
79
+ self.L2Norm5_3 = L2Norm(512, 5)
80
+
81
+ self.extras = nn.ModuleList([
82
+ nn.Conv2d(1024, 256, 1, 1),
83
+ nn.Conv2d(256, 512, 3, 2, padding=1),
84
+ nn.Conv2d(512, 128, 1, 1),
85
+ nn.Conv2d(128, 256, 3, 2, padding=1),
86
+ ])
87
+
88
+ self.loc = nn.ModuleList([
89
+ nn.Conv2d(256, 4, 3, 1, padding=1),
90
+ nn.Conv2d(512, 4, 3, 1, padding=1),
91
+ nn.Conv2d(512, 4, 3, 1, padding=1),
92
+ nn.Conv2d(1024, 4, 3, 1, padding=1),
93
+ nn.Conv2d(512, 4, 3, 1, padding=1),
94
+ nn.Conv2d(256, 4, 3, 1, padding=1),
95
+ ])
96
+
97
+ self.conf = nn.ModuleList([
98
+ nn.Conv2d(256, 4, 3, 1, padding=1),
99
+ nn.Conv2d(512, 2, 3, 1, padding=1),
100
+ nn.Conv2d(512, 2, 3, 1, padding=1),
101
+ nn.Conv2d(1024, 2, 3, 1, padding=1),
102
+ nn.Conv2d(512, 2, 3, 1, padding=1),
103
+ nn.Conv2d(256, 2, 3, 1, padding=1),
104
+ ])
105
+
106
+ self.softmax = nn.Softmax(dim=-1)
107
+ self.detect = Detect()
108
+
109
+ def forward(self, x):
110
+ size = x.size()[2:]
111
+ sources = list()
112
+ loc = list()
113
+ conf = list()
114
+
115
+ for k in range(16):
116
+ x = self.vgg[k](x)
117
+ s = self.L2Norm3_3(x)
118
+ sources.append(s)
119
+
120
+ for k in range(16, 23):
121
+ x = self.vgg[k](x)
122
+ s = self.L2Norm4_3(x)
123
+ sources.append(s)
124
+
125
+ for k in range(23, 30):
126
+ x = self.vgg[k](x)
127
+ s = self.L2Norm5_3(x)
128
+ sources.append(s)
129
+
130
+ for k in range(30, len(self.vgg)):
131
+ x = self.vgg[k](x)
132
+ sources.append(x)
133
+
134
+ # apply extra layers and cache source layer outputs
135
+ for k, v in enumerate(self.extras):
136
+ x = F.relu(v(x), inplace=True)
137
+ if k % 2 == 1:
138
+ sources.append(x)
139
+
140
+ # apply multibox head to source layers
141
+ loc_x = self.loc[0](sources[0])
142
+ conf_x = self.conf[0](sources[0])
143
+
144
+ max_conf, _ = torch.max(conf_x[:, 0:3, :, :], dim=1, keepdim=True)
145
+ conf_x = torch.cat((max_conf, conf_x[:, 3:, :, :]), dim=1)
146
+
147
+ loc.append(loc_x.permute(0, 2, 3, 1).contiguous())
148
+ conf.append(conf_x.permute(0, 2, 3, 1).contiguous())
149
+
150
+ for i in range(1, len(sources)):
151
+ x = sources[i]
152
+ conf.append(self.conf[i](x).permute(0, 2, 3, 1).contiguous())
153
+ loc.append(self.loc[i](x).permute(0, 2, 3, 1).contiguous())
154
+
155
+ features_maps = []
156
+ for i in range(len(loc)):
157
+ feat = []
158
+ feat += [loc[i].size(1), loc[i].size(2)]
159
+ features_maps += [feat]
160
+
161
+ loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
162
+ conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
163
+
164
+ with torch.no_grad():
165
+ self.priorbox = PriorBox(size, features_maps)
166
+ self.priors = self.priorbox.forward()
167
+
168
+ output = self.detect.forward(
169
+ loc.view(loc.size(0), -1, 4),
170
+ self.softmax(conf.view(conf.size(0), -1, 2)),
171
+ self.priors.type(type(x.data)).to(self.device)
172
+ )
173
+
174
+ return output
eval/draw_syncnet_lines.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import torch
16
+ import matplotlib.pyplot as plt
17
+
18
+
19
+ class Chart:
20
+ def __init__(self):
21
+ self.loss_list = []
22
+
23
+ def add_ckpt(self, ckpt_path, line_name):
24
+ ckpt = torch.load(ckpt_path, map_location="cpu")
25
+ train_step_list = ckpt["train_step_list"]
26
+ train_loss_list = ckpt["train_loss_list"]
27
+ val_step_list = ckpt["val_step_list"]
28
+ val_loss_list = ckpt["val_loss_list"]
29
+ val_step_list = [val_step_list[0]] + val_step_list[4::5]
30
+ val_loss_list = [val_loss_list[0]] + val_loss_list[4::5]
31
+ self.loss_list.append((line_name, train_step_list, train_loss_list, val_step_list, val_loss_list))
32
+
33
+ def draw(self, save_path, plot_val=True):
34
+ # Global settings
35
+ plt.rcParams["font.size"] = 14
36
+ plt.rcParams["font.family"] = "serif"
37
+ plt.rcParams["font.sans-serif"] = ["Arial", "DejaVu Sans", "Lucida Grande"]
38
+ plt.rcParams["font.serif"] = ["Times New Roman", "DejaVu Serif"]
39
+
40
+ # Creating the plot
41
+ plt.figure(figsize=(7.766, 4.8)) # Golden ratio
42
+ for loss in self.loss_list:
43
+ if plot_val:
44
+ (line,) = plt.plot(loss[1], loss[2], label=loss[0], linewidth=0.5, alpha=0.5)
45
+ line_color = line.get_color()
46
+ plt.plot(loss[3], loss[4], linewidth=1.5, color=line_color)
47
+ else:
48
+ plt.plot(loss[1], loss[2], label=loss[0], linewidth=1)
49
+ plt.xlabel("Step")
50
+ plt.ylabel("Loss")
51
+ legend = plt.legend()
52
+ # legend = plt.legend(loc='upper right', bbox_to_anchor=(1, 0.82))
53
+
54
+ # Adjust the linewidth of legend
55
+ for line in legend.get_lines():
56
+ line.set_linewidth(2)
57
+
58
+ plt.savefig(save_path, transparent=True)
59
+ plt.close()
60
+
61
+
62
+ if __name__ == "__main__":
63
+ chart = Chart()
64
+ # chart.add_ckpt("output/syncnet/train-2024_10_25-18:14:43/checkpoints/checkpoint-10000.pt", "w/ self-attn")
65
+ # chart.add_ckpt("output/syncnet/train-2024_10_25-18:21:59/checkpoints/checkpoint-10000.pt", "w/o self-attn")
66
+ chart.add_ckpt("output/syncnet/train-2024_10_24-21:03:11/checkpoints/checkpoint-10000.pt", "Dim 512")
67
+ chart.add_ckpt("output/syncnet/train-2024_10_25-18:21:59/checkpoints/checkpoint-10000.pt", "Dim 2048")
68
+ chart.add_ckpt("output/syncnet/train-2024_10_24-22:37:04/checkpoints/checkpoint-10000.pt", "Dim 4096")
69
+ chart.add_ckpt("output/syncnet/train-2024_10_25-02:30:17/checkpoints/checkpoint-10000.pt", "Dim 6144")
70
+ chart.draw("ablation.pdf", plot_val=True)
eval/eval_fvd.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import mediapipe as mp
16
+ import cv2
17
+ from decord import VideoReader
18
+ from einops import rearrange
19
+ import os
20
+ import numpy as np
21
+ import torch
22
+ import tqdm
23
+ from eval.fvd import compute_our_fvd
24
+
25
+
26
+ class FVD:
27
+ def __init__(self, resolution=(224, 224)):
28
+ self.face_detector = mp.solutions.face_detection.FaceDetection(model_selection=0, min_detection_confidence=0.5)
29
+ self.resolution = resolution
30
+
31
+ def detect_face(self, image):
32
+ height, width = image.shape[:2]
33
+ # Process the image and detect faces.
34
+ results = self.face_detector.process(image)
35
+
36
+ if not results.detections: # Face not detected
37
+ raise Exception("Face not detected")
38
+
39
+ detection = results.detections[0] # Only use the first face in the image
40
+ bounding_box = detection.location_data.relative_bounding_box
41
+ xmin = int(bounding_box.xmin * width)
42
+ ymin = int(bounding_box.ymin * height)
43
+ face_width = int(bounding_box.width * width)
44
+ face_height = int(bounding_box.height * height)
45
+
46
+ # Crop the image to the bounding box.
47
+ xmin = max(0, xmin)
48
+ ymin = max(0, ymin)
49
+ xmax = min(width, xmin + face_width)
50
+ ymax = min(height, ymin + face_height)
51
+ image = image[ymin:ymax, xmin:xmax]
52
+
53
+ return image
54
+
55
+ def detect_video(self, video_path, real: bool = True):
56
+ vr = VideoReader(video_path)
57
+ video_frames = vr[20:36].asnumpy() # Use one frame per second
58
+ vr.seek(0) # avoid memory leak
59
+ faces = []
60
+ for frame in video_frames:
61
+ face = self.detect_face(frame)
62
+ face = cv2.resize(face, (self.resolution[1], self.resolution[0]), interpolation=cv2.INTER_AREA)
63
+ faces.append(face)
64
+
65
+ if len(faces) != 16:
66
+ return None
67
+ faces = np.stack(faces, axis=0) # (f, h, w, c)
68
+ faces = torch.from_numpy(faces)
69
+ return faces
70
+
71
+
72
+ def eval_fvd(real_videos_dir, fake_videos_dir):
73
+ fvd = FVD()
74
+ real_features_list = []
75
+ fake_features_list = []
76
+ for file in tqdm.tqdm(os.listdir(fake_videos_dir)):
77
+ if file.endswith(".mp4"):
78
+ real_video_path = os.path.join(real_videos_dir, file.replace("_out.mp4", ".mp4"))
79
+ fake_video_path = os.path.join(fake_videos_dir, file)
80
+ real_features = fvd.detect_video(real_video_path, real=True)
81
+ fake_features = fvd.detect_video(fake_video_path, real=False)
82
+ if real_features is None or fake_features is None:
83
+ continue
84
+ real_features_list.append(real_features)
85
+ fake_features_list.append(fake_features)
86
+
87
+ real_features = torch.stack(real_features_list) / 255.0
88
+ fake_features = torch.stack(fake_features_list) / 255.0
89
+ print(compute_our_fvd(real_features, fake_features, device="cpu"))
90
+
91
+
92
+ if __name__ == "__main__":
93
+ real_videos_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/VoxCeleb2/segmented/cross"
94
+ fake_videos_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/VoxCeleb2/segmented/latentsync_cross"
95
+
96
+ eval_fvd(real_videos_dir, fake_videos_dir)
eval/eval_sync_conf.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ import os
17
+ import tqdm
18
+ from statistics import fmean
19
+ from eval.syncnet import SyncNetEval
20
+ from eval.syncnet_detect import SyncNetDetector
21
+ from latentsync.utils.util import red_text
22
+ import torch
23
+
24
+
25
+ def syncnet_eval(syncnet, syncnet_detector, video_path, temp_dir, detect_results_dir="detect_results"):
26
+ syncnet_detector(video_path=video_path, min_track=50)
27
+ crop_videos = os.listdir(os.path.join(detect_results_dir, "crop"))
28
+ if crop_videos == []:
29
+ raise Exception(red_text(f"Face not detected in {video_path}"))
30
+ av_offset_list = []
31
+ conf_list = []
32
+ for video in crop_videos:
33
+ av_offset, _, conf = syncnet.evaluate(
34
+ video_path=os.path.join(detect_results_dir, "crop", video), temp_dir=temp_dir
35
+ )
36
+ av_offset_list.append(av_offset)
37
+ conf_list.append(conf)
38
+ av_offset = int(fmean(av_offset_list))
39
+ conf = fmean(conf_list)
40
+ print(f"Input video: {video_path}\nSyncNet confidence: {conf:.2f}\nAV offset: {av_offset}")
41
+ return av_offset, conf
42
+
43
+
44
+ def main():
45
+ parser = argparse.ArgumentParser(description="SyncNet")
46
+ parser.add_argument("--initial_model", type=str, default="checkpoints/auxiliary/syncnet_v2.model", help="")
47
+ parser.add_argument("--video_path", type=str, default=None, help="")
48
+ parser.add_argument("--videos_dir", type=str, default="/root/processed")
49
+ parser.add_argument("--temp_dir", type=str, default="temp", help="")
50
+
51
+ args = parser.parse_args()
52
+
53
+ device = "cuda" if torch.cuda.is_available() else "cpu"
54
+
55
+ syncnet = SyncNetEval(device=device)
56
+ syncnet.loadParameters(args.initial_model)
57
+
58
+ syncnet_detector = SyncNetDetector(device=device, detect_results_dir="detect_results")
59
+
60
+ if args.video_path is not None:
61
+ syncnet_eval(syncnet, syncnet_detector, args.video_path, args.temp_dir)
62
+ else:
63
+ sync_conf_list = []
64
+ video_names = sorted([f for f in os.listdir(args.videos_dir) if f.endswith(".mp4")])
65
+ for video_name in tqdm.tqdm(video_names):
66
+ try:
67
+ _, conf = syncnet_eval(
68
+ syncnet, syncnet_detector, os.path.join(args.videos_dir, video_name), args.temp_dir
69
+ )
70
+ sync_conf_list.append(conf)
71
+ except Exception as e:
72
+ print(e)
73
+ print(f"The average sync confidence is {fmean(sync_conf_list):.02f}")
74
+
75
+
76
+ if __name__ == "__main__":
77
+ main()
eval/eval_sync_conf.sh ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #!/bin/bash
2
+ python -m eval.eval_sync_conf --video_path "RD_Radio1_000_006_out.mp4"
eval/eval_syncnet_acc.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ from tqdm.auto import tqdm
17
+ import torch
18
+ import torch.nn as nn
19
+ from einops import rearrange
20
+ from latentsync.models.syncnet import SyncNet
21
+ from latentsync.data.syncnet_dataset import SyncNetDataset
22
+ from diffusers import AutoencoderKL
23
+ from omegaconf import OmegaConf
24
+ from accelerate.utils import set_seed
25
+
26
+
27
+ def main(config):
28
+ set_seed(config.run.seed)
29
+
30
+ device = "cuda" if torch.cuda.is_available() else "cpu"
31
+
32
+ if config.data.latent_space:
33
+ vae = AutoencoderKL.from_pretrained(
34
+ "runwayml/stable-diffusion-inpainting", subfolder="vae", revision="fp16", torch_dtype=torch.float16
35
+ )
36
+ vae.requires_grad_(False)
37
+ vae.to(device)
38
+
39
+ # Dataset and Dataloader setup
40
+ dataset = SyncNetDataset(config.data.val_data_dir, config.data.val_fileslist, config)
41
+
42
+ test_dataloader = torch.utils.data.DataLoader(
43
+ dataset,
44
+ batch_size=config.data.batch_size,
45
+ shuffle=False,
46
+ num_workers=config.data.num_workers,
47
+ drop_last=False,
48
+ worker_init_fn=dataset.worker_init_fn,
49
+ )
50
+
51
+ # Model
52
+ syncnet = SyncNet(OmegaConf.to_container(config.model)).to(device)
53
+
54
+ print(f"Load checkpoint from: {config.ckpt.inference_ckpt_path}")
55
+ checkpoint = torch.load(config.ckpt.inference_ckpt_path, map_location=device)
56
+
57
+ syncnet.load_state_dict(checkpoint["state_dict"])
58
+ syncnet.to(dtype=torch.float16)
59
+ syncnet.requires_grad_(False)
60
+ syncnet.eval()
61
+
62
+ global_step = 0
63
+ num_val_batches = config.data.num_val_samples // config.data.batch_size
64
+ progress_bar = tqdm(range(0, num_val_batches), initial=0, desc="Testing accuracy")
65
+
66
+ num_correct_preds = 0
67
+ num_total_preds = 0
68
+
69
+ while True:
70
+ for step, batch in enumerate(test_dataloader):
71
+ ### >>>> Test >>>> ###
72
+
73
+ frames = batch["frames"].to(device, dtype=torch.float16)
74
+ audio_samples = batch["audio_samples"].to(device, dtype=torch.float16)
75
+ y = batch["y"].to(device, dtype=torch.float16).squeeze(1)
76
+
77
+ if config.data.latent_space:
78
+ frames = rearrange(frames, "b f c h w -> (b f) c h w")
79
+
80
+ with torch.no_grad():
81
+ frames = vae.encode(frames).latent_dist.sample() * 0.18215
82
+
83
+ frames = rearrange(frames, "(b f) c h w -> b (f c) h w", f=config.data.num_frames)
84
+ else:
85
+ frames = rearrange(frames, "b f c h w -> b (f c) h w")
86
+
87
+ if config.data.lower_half:
88
+ height = frames.shape[2]
89
+ frames = frames[:, :, height // 2 :, :]
90
+
91
+ with torch.no_grad():
92
+ vision_embeds, audio_embeds = syncnet(frames, audio_samples)
93
+
94
+ sims = nn.functional.cosine_similarity(vision_embeds, audio_embeds)
95
+
96
+ preds = (sims > 0.5).to(dtype=torch.float16)
97
+ num_correct_preds += (preds == y).sum().item()
98
+ num_total_preds += len(sims)
99
+
100
+ progress_bar.update(1)
101
+ global_step += 1
102
+
103
+ if global_step >= num_val_batches:
104
+ progress_bar.close()
105
+ print(f"Accuracy score: {num_correct_preds / num_total_preds*100:.2f}%")
106
+ return
107
+
108
+
109
+ if __name__ == "__main__":
110
+ parser = argparse.ArgumentParser(description="Code to test the accuracy of expert lip-sync discriminator")
111
+
112
+ parser.add_argument("--config_path", type=str, default="configs/syncnet/syncnet_16_latent.yaml")
113
+ args = parser.parse_args()
114
+
115
+ # Load a configuration file
116
+ config = OmegaConf.load(args.config_path)
117
+
118
+ main(config)
eval/eval_syncnet_acc.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ python -m eval.eval_syncnet_acc --config_path "configs/syncnet/syncnet_16_pixel.yaml"
eval/fvd.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/universome/fvd-comparison/blob/master/our_fvd.py
2
+
3
+ from typing import Tuple
4
+ import scipy
5
+ import numpy as np
6
+ import torch
7
+
8
+
9
+ def compute_fvd(feats_fake: np.ndarray, feats_real: np.ndarray) -> float:
10
+ mu_gen, sigma_gen = compute_stats(feats_fake)
11
+ mu_real, sigma_real = compute_stats(feats_real)
12
+
13
+ m = np.square(mu_gen - mu_real).sum()
14
+ s, _ = scipy.linalg.sqrtm(np.dot(sigma_gen, sigma_real), disp=False) # pylint: disable=no-member
15
+ fid = np.real(m + np.trace(sigma_gen + sigma_real - s * 2))
16
+
17
+ return float(fid)
18
+
19
+
20
+ def compute_stats(feats: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
21
+ mu = feats.mean(axis=0) # [d]
22
+ sigma = np.cov(feats, rowvar=False) # [d, d]
23
+
24
+ return mu, sigma
25
+
26
+
27
+ @torch.no_grad()
28
+ def compute_our_fvd(videos_fake: np.ndarray, videos_real: np.ndarray, device: str = "cuda") -> float:
29
+ i3d_path = "checkpoints/auxiliary/i3d_torchscript.pt"
30
+ i3d_kwargs = dict(
31
+ rescale=False, resize=False, return_features=True
32
+ ) # Return raw features before the softmax layer.
33
+
34
+ with open(i3d_path, "rb") as f:
35
+ i3d_model = torch.jit.load(f).eval().to(device)
36
+
37
+ videos_fake = videos_fake.permute(0, 4, 1, 2, 3).to(device)
38
+ videos_real = videos_real.permute(0, 4, 1, 2, 3).to(device)
39
+
40
+ feats_fake = i3d_model(videos_fake, **i3d_kwargs).cpu().numpy()
41
+ feats_real = i3d_model(videos_real, **i3d_kwargs).cpu().numpy()
42
+
43
+ return compute_fvd(feats_fake, feats_real)
44
+
45
+
46
+ def main():
47
+ # input shape: (b, f, h, w, c)
48
+ videos_fake = torch.rand(10, 16, 224, 224, 3)
49
+ videos_real = torch.rand(10, 16, 224, 224, 3)
50
+
51
+ our_fvd_result = compute_our_fvd(videos_fake, videos_real)
52
+ print(f"[FVD scores] Ours: {our_fvd_result}")
53
+
54
+
55
+ if __name__ == "__main__":
56
+ main()
eval/hyper_iqa.py ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/SSL92/hyperIQA/blob/master/models.py
2
+
3
+ import torch as torch
4
+ import torch.nn as nn
5
+ from torch.nn import functional as F
6
+ from torch.nn import init
7
+ import math
8
+ import torch.utils.model_zoo as model_zoo
9
+
10
+ model_urls = {
11
+ 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
12
+ 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
13
+ 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
14
+ 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
15
+ 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
16
+ }
17
+
18
+
19
+ class HyperNet(nn.Module):
20
+ """
21
+ Hyper network for learning perceptual rules.
22
+
23
+ Args:
24
+ lda_out_channels: local distortion aware module output size.
25
+ hyper_in_channels: input feature channels for hyper network.
26
+ target_in_size: input vector size for target network.
27
+ target_fc(i)_size: fully connection layer size of target network.
28
+ feature_size: input feature map width/height for hyper network.
29
+
30
+ Note:
31
+ For size match, input args must satisfy: 'target_fc(i)_size * target_fc(i+1)_size' is divisible by 'feature_size ^ 2'.
32
+
33
+ """
34
+ def __init__(self, lda_out_channels, hyper_in_channels, target_in_size, target_fc1_size, target_fc2_size, target_fc3_size, target_fc4_size, feature_size):
35
+ super(HyperNet, self).__init__()
36
+
37
+ self.hyperInChn = hyper_in_channels
38
+ self.target_in_size = target_in_size
39
+ self.f1 = target_fc1_size
40
+ self.f2 = target_fc2_size
41
+ self.f3 = target_fc3_size
42
+ self.f4 = target_fc4_size
43
+ self.feature_size = feature_size
44
+
45
+ self.res = resnet50_backbone(lda_out_channels, target_in_size, pretrained=True)
46
+
47
+ self.pool = nn.AdaptiveAvgPool2d((1, 1))
48
+
49
+ # Conv layers for resnet output features
50
+ self.conv1 = nn.Sequential(
51
+ nn.Conv2d(2048, 1024, 1, padding=(0, 0)),
52
+ nn.ReLU(inplace=True),
53
+ nn.Conv2d(1024, 512, 1, padding=(0, 0)),
54
+ nn.ReLU(inplace=True),
55
+ nn.Conv2d(512, self.hyperInChn, 1, padding=(0, 0)),
56
+ nn.ReLU(inplace=True)
57
+ )
58
+
59
+ # Hyper network part, conv for generating target fc weights, fc for generating target fc biases
60
+ self.fc1w_conv = nn.Conv2d(self.hyperInChn, int(self.target_in_size * self.f1 / feature_size ** 2), 3, padding=(1, 1))
61
+ self.fc1b_fc = nn.Linear(self.hyperInChn, self.f1)
62
+
63
+ self.fc2w_conv = nn.Conv2d(self.hyperInChn, int(self.f1 * self.f2 / feature_size ** 2), 3, padding=(1, 1))
64
+ self.fc2b_fc = nn.Linear(self.hyperInChn, self.f2)
65
+
66
+ self.fc3w_conv = nn.Conv2d(self.hyperInChn, int(self.f2 * self.f3 / feature_size ** 2), 3, padding=(1, 1))
67
+ self.fc3b_fc = nn.Linear(self.hyperInChn, self.f3)
68
+
69
+ self.fc4w_conv = nn.Conv2d(self.hyperInChn, int(self.f3 * self.f4 / feature_size ** 2), 3, padding=(1, 1))
70
+ self.fc4b_fc = nn.Linear(self.hyperInChn, self.f4)
71
+
72
+ self.fc5w_fc = nn.Linear(self.hyperInChn, self.f4)
73
+ self.fc5b_fc = nn.Linear(self.hyperInChn, 1)
74
+
75
+ # initialize
76
+ for i, m_name in enumerate(self._modules):
77
+ if i > 2:
78
+ nn.init.kaiming_normal_(self._modules[m_name].weight.data)
79
+
80
+ def forward(self, img):
81
+ feature_size = self.feature_size
82
+
83
+ res_out = self.res(img)
84
+
85
+ # input vector for target net
86
+ target_in_vec = res_out['target_in_vec'].reshape(-1, self.target_in_size, 1, 1)
87
+
88
+ # input features for hyper net
89
+ hyper_in_feat = self.conv1(res_out['hyper_in_feat']).reshape(-1, self.hyperInChn, feature_size, feature_size)
90
+
91
+ # generating target net weights & biases
92
+ target_fc1w = self.fc1w_conv(hyper_in_feat).reshape(-1, self.f1, self.target_in_size, 1, 1)
93
+ target_fc1b = self.fc1b_fc(self.pool(hyper_in_feat).squeeze()).reshape(-1, self.f1)
94
+
95
+ target_fc2w = self.fc2w_conv(hyper_in_feat).reshape(-1, self.f2, self.f1, 1, 1)
96
+ target_fc2b = self.fc2b_fc(self.pool(hyper_in_feat).squeeze()).reshape(-1, self.f2)
97
+
98
+ target_fc3w = self.fc3w_conv(hyper_in_feat).reshape(-1, self.f3, self.f2, 1, 1)
99
+ target_fc3b = self.fc3b_fc(self.pool(hyper_in_feat).squeeze()).reshape(-1, self.f3)
100
+
101
+ target_fc4w = self.fc4w_conv(hyper_in_feat).reshape(-1, self.f4, self.f3, 1, 1)
102
+ target_fc4b = self.fc4b_fc(self.pool(hyper_in_feat).squeeze()).reshape(-1, self.f4)
103
+
104
+ target_fc5w = self.fc5w_fc(self.pool(hyper_in_feat).squeeze()).reshape(-1, 1, self.f4, 1, 1)
105
+ target_fc5b = self.fc5b_fc(self.pool(hyper_in_feat).squeeze()).reshape(-1, 1)
106
+
107
+ out = {}
108
+ out['target_in_vec'] = target_in_vec
109
+ out['target_fc1w'] = target_fc1w
110
+ out['target_fc1b'] = target_fc1b
111
+ out['target_fc2w'] = target_fc2w
112
+ out['target_fc2b'] = target_fc2b
113
+ out['target_fc3w'] = target_fc3w
114
+ out['target_fc3b'] = target_fc3b
115
+ out['target_fc4w'] = target_fc4w
116
+ out['target_fc4b'] = target_fc4b
117
+ out['target_fc5w'] = target_fc5w
118
+ out['target_fc5b'] = target_fc5b
119
+
120
+ return out
121
+
122
+
123
+ class TargetNet(nn.Module):
124
+ """
125
+ Target network for quality prediction.
126
+ """
127
+ def __init__(self, paras):
128
+ super(TargetNet, self).__init__()
129
+ self.l1 = nn.Sequential(
130
+ TargetFC(paras['target_fc1w'], paras['target_fc1b']),
131
+ nn.Sigmoid(),
132
+ )
133
+ self.l2 = nn.Sequential(
134
+ TargetFC(paras['target_fc2w'], paras['target_fc2b']),
135
+ nn.Sigmoid(),
136
+ )
137
+
138
+ self.l3 = nn.Sequential(
139
+ TargetFC(paras['target_fc3w'], paras['target_fc3b']),
140
+ nn.Sigmoid(),
141
+ )
142
+
143
+ self.l4 = nn.Sequential(
144
+ TargetFC(paras['target_fc4w'], paras['target_fc4b']),
145
+ nn.Sigmoid(),
146
+ TargetFC(paras['target_fc5w'], paras['target_fc5b']),
147
+ )
148
+
149
+ def forward(self, x):
150
+ q = self.l1(x)
151
+ # q = F.dropout(q)
152
+ q = self.l2(q)
153
+ q = self.l3(q)
154
+ q = self.l4(q).squeeze()
155
+ return q
156
+
157
+
158
+ class TargetFC(nn.Module):
159
+ """
160
+ Fully connection operations for target net
161
+
162
+ Note:
163
+ Weights & biases are different for different images in a batch,
164
+ thus here we use group convolution for calculating images in a batch with individual weights & biases.
165
+ """
166
+ def __init__(self, weight, bias):
167
+ super(TargetFC, self).__init__()
168
+ self.weight = weight
169
+ self.bias = bias
170
+
171
+ def forward(self, input_):
172
+
173
+ input_re = input_.reshape(-1, input_.shape[0] * input_.shape[1], input_.shape[2], input_.shape[3])
174
+ weight_re = self.weight.reshape(self.weight.shape[0] * self.weight.shape[1], self.weight.shape[2], self.weight.shape[3], self.weight.shape[4])
175
+ bias_re = self.bias.reshape(self.bias.shape[0] * self.bias.shape[1])
176
+ out = F.conv2d(input=input_re, weight=weight_re, bias=bias_re, groups=self.weight.shape[0])
177
+
178
+ return out.reshape(input_.shape[0], self.weight.shape[1], input_.shape[2], input_.shape[3])
179
+
180
+
181
+ class Bottleneck(nn.Module):
182
+ expansion = 4
183
+
184
+ def __init__(self, inplanes, planes, stride=1, downsample=None):
185
+ super(Bottleneck, self).__init__()
186
+ self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
187
+ self.bn1 = nn.BatchNorm2d(planes)
188
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
189
+ padding=1, bias=False)
190
+ self.bn2 = nn.BatchNorm2d(planes)
191
+ self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
192
+ self.bn3 = nn.BatchNorm2d(planes * 4)
193
+ self.relu = nn.ReLU(inplace=True)
194
+ self.downsample = downsample
195
+ self.stride = stride
196
+
197
+ def forward(self, x):
198
+ residual = x
199
+
200
+ out = self.conv1(x)
201
+ out = self.bn1(out)
202
+ out = self.relu(out)
203
+
204
+ out = self.conv2(out)
205
+ out = self.bn2(out)
206
+ out = self.relu(out)
207
+
208
+ out = self.conv3(out)
209
+ out = self.bn3(out)
210
+
211
+ if self.downsample is not None:
212
+ residual = self.downsample(x)
213
+
214
+ out += residual
215
+ out = self.relu(out)
216
+
217
+ return out
218
+
219
+
220
+ class ResNetBackbone(nn.Module):
221
+
222
+ def __init__(self, lda_out_channels, in_chn, block, layers, num_classes=1000):
223
+ super(ResNetBackbone, self).__init__()
224
+ self.inplanes = 64
225
+ self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
226
+ self.bn1 = nn.BatchNorm2d(64)
227
+ self.relu = nn.ReLU(inplace=True)
228
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
229
+ self.layer1 = self._make_layer(block, 64, layers[0])
230
+ self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
231
+ self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
232
+ self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
233
+
234
+ # local distortion aware module
235
+ self.lda1_pool = nn.Sequential(
236
+ nn.Conv2d(256, 16, kernel_size=1, stride=1, padding=0, bias=False),
237
+ nn.AvgPool2d(7, stride=7),
238
+ )
239
+ self.lda1_fc = nn.Linear(16 * 64, lda_out_channels)
240
+
241
+ self.lda2_pool = nn.Sequential(
242
+ nn.Conv2d(512, 32, kernel_size=1, stride=1, padding=0, bias=False),
243
+ nn.AvgPool2d(7, stride=7),
244
+ )
245
+ self.lda2_fc = nn.Linear(32 * 16, lda_out_channels)
246
+
247
+ self.lda3_pool = nn.Sequential(
248
+ nn.Conv2d(1024, 64, kernel_size=1, stride=1, padding=0, bias=False),
249
+ nn.AvgPool2d(7, stride=7),
250
+ )
251
+ self.lda3_fc = nn.Linear(64 * 4, lda_out_channels)
252
+
253
+ self.lda4_pool = nn.AvgPool2d(7, stride=7)
254
+ self.lda4_fc = nn.Linear(2048, in_chn - lda_out_channels * 3)
255
+
256
+ for m in self.modules():
257
+ if isinstance(m, nn.Conv2d):
258
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
259
+ m.weight.data.normal_(0, math.sqrt(2. / n))
260
+ elif isinstance(m, nn.BatchNorm2d):
261
+ m.weight.data.fill_(1)
262
+ m.bias.data.zero_()
263
+
264
+ # initialize
265
+ nn.init.kaiming_normal_(self.lda1_pool._modules['0'].weight.data)
266
+ nn.init.kaiming_normal_(self.lda2_pool._modules['0'].weight.data)
267
+ nn.init.kaiming_normal_(self.lda3_pool._modules['0'].weight.data)
268
+ nn.init.kaiming_normal_(self.lda1_fc.weight.data)
269
+ nn.init.kaiming_normal_(self.lda2_fc.weight.data)
270
+ nn.init.kaiming_normal_(self.lda3_fc.weight.data)
271
+ nn.init.kaiming_normal_(self.lda4_fc.weight.data)
272
+
273
+ def _make_layer(self, block, planes, blocks, stride=1):
274
+ downsample = None
275
+ if stride != 1 or self.inplanes != planes * block.expansion:
276
+ downsample = nn.Sequential(
277
+ nn.Conv2d(self.inplanes, planes * block.expansion,
278
+ kernel_size=1, stride=stride, bias=False),
279
+ nn.BatchNorm2d(planes * block.expansion),
280
+ )
281
+
282
+ layers = []
283
+ layers.append(block(self.inplanes, planes, stride, downsample))
284
+ self.inplanes = planes * block.expansion
285
+ for i in range(1, blocks):
286
+ layers.append(block(self.inplanes, planes))
287
+
288
+ return nn.Sequential(*layers)
289
+
290
+ def forward(self, x):
291
+ x = self.conv1(x)
292
+ x = self.bn1(x)
293
+ x = self.relu(x)
294
+ x = self.maxpool(x)
295
+ x = self.layer1(x)
296
+
297
+ # the same effect as lda operation in the paper, but save much more memory
298
+ lda_1 = self.lda1_fc(self.lda1_pool(x).reshape(x.size(0), -1))
299
+ x = self.layer2(x)
300
+ lda_2 = self.lda2_fc(self.lda2_pool(x).reshape(x.size(0), -1))
301
+ x = self.layer3(x)
302
+ lda_3 = self.lda3_fc(self.lda3_pool(x).reshape(x.size(0), -1))
303
+ x = self.layer4(x)
304
+ lda_4 = self.lda4_fc(self.lda4_pool(x).reshape(x.size(0), -1))
305
+
306
+ vec = torch.cat((lda_1, lda_2, lda_3, lda_4), 1)
307
+
308
+ out = {}
309
+ out['hyper_in_feat'] = x
310
+ out['target_in_vec'] = vec
311
+
312
+ return out
313
+
314
+
315
+ def resnet50_backbone(lda_out_channels, in_chn, pretrained=False, **kwargs):
316
+ """Constructs a ResNet-50 model_hyper.
317
+
318
+ Args:
319
+ pretrained (bool): If True, returns a model_hyper pre-trained on ImageNet
320
+ """
321
+ model = ResNetBackbone(lda_out_channels, in_chn, Bottleneck, [3, 4, 6, 3], **kwargs)
322
+ if pretrained:
323
+ save_model = model_zoo.load_url(model_urls['resnet50'])
324
+ model_dict = model.state_dict()
325
+ state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()}
326
+ model_dict.update(state_dict)
327
+ model.load_state_dict(model_dict)
328
+ else:
329
+ model.apply(weights_init_xavier)
330
+ return model
331
+
332
+
333
+ def weights_init_xavier(m):
334
+ classname = m.__class__.__name__
335
+ # print(classname)
336
+ # if isinstance(m, nn.Conv2d):
337
+ if classname.find('Conv') != -1:
338
+ init.kaiming_normal_(m.weight.data)
339
+ elif classname.find('Linear') != -1:
340
+ init.kaiming_normal_(m.weight.data)
341
+ elif classname.find('BatchNorm2d') != -1:
342
+ init.uniform_(m.weight.data, 1.0, 0.02)
343
+ init.constant_(m.bias.data, 0.0)
eval/inference_videos.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import subprocess
17
+ from tqdm import tqdm
18
+
19
+
20
+ def inference_video_from_dir(input_dir, output_dir, unet_config_path, ckpt_path):
21
+ os.makedirs(output_dir, exist_ok=True)
22
+ video_names = sorted([f for f in os.listdir(input_dir) if f.endswith(".mp4")])
23
+ for video_name in tqdm(video_names):
24
+ video_path = os.path.join(input_dir, video_name)
25
+ audio_path = os.path.join(input_dir, video_name.replace(".mp4", "_audio.wav"))
26
+ video_out_path = os.path.join(output_dir, video_name.replace(".mp4", "_out.mp4"))
27
+ inference_command = f"python inference.py --unet_config_path {unet_config_path} --video_path {video_path} --audio_path {audio_path} --video_out_path {video_out_path} --inference_ckpt_path {ckpt_path} --seed 1247"
28
+ subprocess.run(inference_command, shell=True)
29
+
30
+
31
+ if __name__ == "__main__":
32
+ input_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/HDTF/segmented/cross"
33
+ output_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/HDTF/segmented/latentsync_cross"
34
+ unet_config_path = "configs/unet/unet_latent_16_diffusion.yaml"
35
+ ckpt_path = "output/unet/train-2024_10_08-16:23:43/checkpoints/checkpoint-1920000.pt"
36
+
37
+ inference_video_from_dir(input_dir, output_dir, unet_config_path, ckpt_path)
eval/syncnet/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .syncnet_eval import SyncNetEval
eval/syncnet/syncnet.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/joonson/syncnet_python/blob/master/SyncNetModel.py
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+
7
+ def save(model, filename):
8
+ with open(filename, "wb") as f:
9
+ torch.save(model, f)
10
+ print("%s saved." % filename)
11
+
12
+
13
+ def load(filename):
14
+ net = torch.load(filename)
15
+ return net
16
+
17
+
18
+ class S(nn.Module):
19
+ def __init__(self, num_layers_in_fc_layers=1024):
20
+ super(S, self).__init__()
21
+
22
+ self.__nFeatures__ = 24
23
+ self.__nChs__ = 32
24
+ self.__midChs__ = 32
25
+
26
+ self.netcnnaud = nn.Sequential(
27
+ nn.Conv2d(1, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
28
+ nn.BatchNorm2d(64),
29
+ nn.ReLU(inplace=True),
30
+ nn.MaxPool2d(kernel_size=(1, 1), stride=(1, 1)),
31
+ nn.Conv2d(64, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
32
+ nn.BatchNorm2d(192),
33
+ nn.ReLU(inplace=True),
34
+ nn.MaxPool2d(kernel_size=(3, 3), stride=(1, 2)),
35
+ nn.Conv2d(192, 384, kernel_size=(3, 3), padding=(1, 1)),
36
+ nn.BatchNorm2d(384),
37
+ nn.ReLU(inplace=True),
38
+ nn.Conv2d(384, 256, kernel_size=(3, 3), padding=(1, 1)),
39
+ nn.BatchNorm2d(256),
40
+ nn.ReLU(inplace=True),
41
+ nn.Conv2d(256, 256, kernel_size=(3, 3), padding=(1, 1)),
42
+ nn.BatchNorm2d(256),
43
+ nn.ReLU(inplace=True),
44
+ nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2)),
45
+ nn.Conv2d(256, 512, kernel_size=(5, 4), padding=(0, 0)),
46
+ nn.BatchNorm2d(512),
47
+ nn.ReLU(),
48
+ )
49
+
50
+ self.netfcaud = nn.Sequential(
51
+ nn.Linear(512, 512),
52
+ nn.BatchNorm1d(512),
53
+ nn.ReLU(),
54
+ nn.Linear(512, num_layers_in_fc_layers),
55
+ )
56
+
57
+ self.netfclip = nn.Sequential(
58
+ nn.Linear(512, 512),
59
+ nn.BatchNorm1d(512),
60
+ nn.ReLU(),
61
+ nn.Linear(512, num_layers_in_fc_layers),
62
+ )
63
+
64
+ self.netcnnlip = nn.Sequential(
65
+ nn.Conv3d(3, 96, kernel_size=(5, 7, 7), stride=(1, 2, 2), padding=0),
66
+ nn.BatchNorm3d(96),
67
+ nn.ReLU(inplace=True),
68
+ nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2)),
69
+ nn.Conv3d(96, 256, kernel_size=(1, 5, 5), stride=(1, 2, 2), padding=(0, 1, 1)),
70
+ nn.BatchNorm3d(256),
71
+ nn.ReLU(inplace=True),
72
+ nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)),
73
+ nn.Conv3d(256, 256, kernel_size=(1, 3, 3), padding=(0, 1, 1)),
74
+ nn.BatchNorm3d(256),
75
+ nn.ReLU(inplace=True),
76
+ nn.Conv3d(256, 256, kernel_size=(1, 3, 3), padding=(0, 1, 1)),
77
+ nn.BatchNorm3d(256),
78
+ nn.ReLU(inplace=True),
79
+ nn.Conv3d(256, 256, kernel_size=(1, 3, 3), padding=(0, 1, 1)),
80
+ nn.BatchNorm3d(256),
81
+ nn.ReLU(inplace=True),
82
+ nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2)),
83
+ nn.Conv3d(256, 512, kernel_size=(1, 6, 6), padding=0),
84
+ nn.BatchNorm3d(512),
85
+ nn.ReLU(inplace=True),
86
+ )
87
+
88
+ def forward_aud(self, x):
89
+
90
+ mid = self.netcnnaud(x)
91
+ # N x ch x 24 x M
92
+ mid = mid.view((mid.size()[0], -1))
93
+ # N x (ch x 24)
94
+ out = self.netfcaud(mid)
95
+
96
+ return out
97
+
98
+ def forward_lip(self, x):
99
+
100
+ mid = self.netcnnlip(x)
101
+ mid = mid.view((mid.size()[0], -1))
102
+ # N x (ch x 24)
103
+ out = self.netfclip(mid)
104
+
105
+ return out
106
+
107
+ def forward_lipfeat(self, x):
108
+
109
+ mid = self.netcnnlip(x)
110
+ out = mid.view((mid.size()[0], -1))
111
+ # N x (ch x 24)
112
+
113
+ return out
eval/syncnet/syncnet_eval.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/joonson/syncnet_python/blob/master/SyncNetInstance.py
2
+
3
+ import torch
4
+ import numpy
5
+ import time, pdb, argparse, subprocess, os, math, glob
6
+ import cv2
7
+ import python_speech_features
8
+
9
+ from scipy import signal
10
+ from scipy.io import wavfile
11
+ from .syncnet import S
12
+ from shutil import rmtree
13
+
14
+
15
+ # ==================== Get OFFSET ====================
16
+
17
+ # Video 25 FPS, Audio 16000HZ
18
+
19
+
20
+ def calc_pdist(feat1, feat2, vshift=10):
21
+ win_size = vshift * 2 + 1
22
+
23
+ feat2p = torch.nn.functional.pad(feat2, (0, 0, vshift, vshift))
24
+
25
+ dists = []
26
+
27
+ for i in range(0, len(feat1)):
28
+
29
+ dists.append(
30
+ torch.nn.functional.pairwise_distance(feat1[[i], :].repeat(win_size, 1), feat2p[i : i + win_size, :])
31
+ )
32
+
33
+ return dists
34
+
35
+
36
+ # ==================== MAIN DEF ====================
37
+
38
+
39
+ class SyncNetEval(torch.nn.Module):
40
+ def __init__(self, dropout=0, num_layers_in_fc_layers=1024, device="cpu"):
41
+ super().__init__()
42
+
43
+ self.__S__ = S(num_layers_in_fc_layers=num_layers_in_fc_layers).to(device)
44
+ self.device = device
45
+
46
+ def evaluate(self, video_path, temp_dir="temp", batch_size=20, vshift=15):
47
+
48
+ self.__S__.eval()
49
+
50
+ # ========== ==========
51
+ # Convert files
52
+ # ========== ==========
53
+
54
+ if os.path.exists(temp_dir):
55
+ rmtree(temp_dir)
56
+
57
+ os.makedirs(temp_dir)
58
+
59
+ # temp_video_path = os.path.join(temp_dir, "temp.mp4")
60
+ # command = f"ffmpeg -loglevel error -nostdin -y -i {video_path} -vf scale='224:224' {temp_video_path}"
61
+ # subprocess.call(command, shell=True)
62
+
63
+ command = (
64
+ f"ffmpeg -loglevel error -nostdin -y -i {video_path} -f image2 {os.path.join(temp_dir, '%06d.jpg')}"
65
+ )
66
+ subprocess.call(command, shell=True, stdout=None)
67
+
68
+ command = f"ffmpeg -loglevel error -nostdin -y -i {video_path} -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 {os.path.join(temp_dir, 'audio.wav')}"
69
+ subprocess.call(command, shell=True, stdout=None)
70
+
71
+ # ========== ==========
72
+ # Load video
73
+ # ========== ==========
74
+
75
+ images = []
76
+
77
+ flist = glob.glob(os.path.join(temp_dir, "*.jpg"))
78
+ flist.sort()
79
+
80
+ for fname in flist:
81
+ img_input = cv2.imread(fname)
82
+ img_input = cv2.resize(img_input, (224, 224)) # HARD CODED, CHANGE BEFORE RELEASE
83
+ images.append(img_input)
84
+
85
+ im = numpy.stack(images, axis=3)
86
+ im = numpy.expand_dims(im, axis=0)
87
+ im = numpy.transpose(im, (0, 3, 4, 1, 2))
88
+
89
+ imtv = torch.autograd.Variable(torch.from_numpy(im.astype(float)).float())
90
+
91
+ # ========== ==========
92
+ # Load audio
93
+ # ========== ==========
94
+
95
+ sample_rate, audio = wavfile.read(os.path.join(temp_dir, "audio.wav"))
96
+ mfcc = zip(*python_speech_features.mfcc(audio, sample_rate))
97
+ mfcc = numpy.stack([numpy.array(i) for i in mfcc])
98
+
99
+ cc = numpy.expand_dims(numpy.expand_dims(mfcc, axis=0), axis=0)
100
+ cct = torch.autograd.Variable(torch.from_numpy(cc.astype(float)).float())
101
+
102
+ # ========== ==========
103
+ # Check audio and video input length
104
+ # ========== ==========
105
+
106
+ # if (float(len(audio)) / 16000) != (float(len(images)) / 25):
107
+ # print(
108
+ # "WARNING: Audio (%.4fs) and video (%.4fs) lengths are different."
109
+ # % (float(len(audio)) / 16000, float(len(images)) / 25)
110
+ # )
111
+
112
+ min_length = min(len(images), math.floor(len(audio) / 640))
113
+
114
+ # ========== ==========
115
+ # Generate video and audio feats
116
+ # ========== ==========
117
+
118
+ lastframe = min_length - 5
119
+ im_feat = []
120
+ cc_feat = []
121
+
122
+ tS = time.time()
123
+ for i in range(0, lastframe, batch_size):
124
+
125
+ im_batch = [imtv[:, :, vframe : vframe + 5, :, :] for vframe in range(i, min(lastframe, i + batch_size))]
126
+ im_in = torch.cat(im_batch, 0)
127
+ im_out = self.__S__.forward_lip(im_in.to(self.device))
128
+ im_feat.append(im_out.data.cpu())
129
+
130
+ cc_batch = [
131
+ cct[:, :, :, vframe * 4 : vframe * 4 + 20] for vframe in range(i, min(lastframe, i + batch_size))
132
+ ]
133
+ cc_in = torch.cat(cc_batch, 0)
134
+ cc_out = self.__S__.forward_aud(cc_in.to(self.device))
135
+ cc_feat.append(cc_out.data.cpu())
136
+
137
+ im_feat = torch.cat(im_feat, 0)
138
+ cc_feat = torch.cat(cc_feat, 0)
139
+
140
+ # ========== ==========
141
+ # Compute offset
142
+ # ========== ==========
143
+
144
+ dists = calc_pdist(im_feat, cc_feat, vshift=vshift)
145
+ mean_dists = torch.mean(torch.stack(dists, 1), 1)
146
+
147
+ min_dist, minidx = torch.min(mean_dists, 0)
148
+
149
+ av_offset = vshift - minidx
150
+ conf = torch.median(mean_dists) - min_dist
151
+
152
+ fdist = numpy.stack([dist[minidx].numpy() for dist in dists])
153
+ # fdist = numpy.pad(fdist, (3,3), 'constant', constant_values=15)
154
+ fconf = torch.median(mean_dists).numpy() - fdist
155
+ framewise_conf = signal.medfilt(fconf, kernel_size=9)
156
+
157
+ # numpy.set_printoptions(formatter={"float": "{: 0.3f}".format})
158
+ rmtree(temp_dir)
159
+ return av_offset.item(), min_dist.item(), conf.item()
160
+
161
+ def extract_feature(self, opt, videofile):
162
+
163
+ self.__S__.eval()
164
+
165
+ # ========== ==========
166
+ # Load video
167
+ # ========== ==========
168
+ cap = cv2.VideoCapture(videofile)
169
+
170
+ frame_num = 1
171
+ images = []
172
+ while frame_num:
173
+ frame_num += 1
174
+ ret, image = cap.read()
175
+ if ret == 0:
176
+ break
177
+
178
+ images.append(image)
179
+
180
+ im = numpy.stack(images, axis=3)
181
+ im = numpy.expand_dims(im, axis=0)
182
+ im = numpy.transpose(im, (0, 3, 4, 1, 2))
183
+
184
+ imtv = torch.autograd.Variable(torch.from_numpy(im.astype(float)).float())
185
+
186
+ # ========== ==========
187
+ # Generate video feats
188
+ # ========== ==========
189
+
190
+ lastframe = len(images) - 4
191
+ im_feat = []
192
+
193
+ tS = time.time()
194
+ for i in range(0, lastframe, opt.batch_size):
195
+
196
+ im_batch = [
197
+ imtv[:, :, vframe : vframe + 5, :, :] for vframe in range(i, min(lastframe, i + opt.batch_size))
198
+ ]
199
+ im_in = torch.cat(im_batch, 0)
200
+ im_out = self.__S__.forward_lipfeat(im_in.to(self.device))
201
+ im_feat.append(im_out.data.cpu())
202
+
203
+ im_feat = torch.cat(im_feat, 0)
204
+
205
+ # ========== ==========
206
+ # Compute offset
207
+ # ========== ==========
208
+
209
+ print("Compute time %.3f sec." % (time.time() - tS))
210
+
211
+ return im_feat
212
+
213
+ def loadParameters(self, path):
214
+ loaded_state = torch.load(path, map_location=lambda storage, loc: storage)
215
+
216
+ self_state = self.__S__.state_dict()
217
+
218
+ for name, param in loaded_state.items():
219
+
220
+ self_state[name].copy_(param)
eval/syncnet_detect.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/joonson/syncnet_python/blob/master/run_pipeline.py
2
+
3
+ import os, pdb, subprocess, glob, cv2
4
+ import numpy as np
5
+ from shutil import rmtree
6
+ import torch
7
+
8
+ from scenedetect.video_manager import VideoManager
9
+ from scenedetect.scene_manager import SceneManager
10
+ from scenedetect.stats_manager import StatsManager
11
+ from scenedetect.detectors import ContentDetector
12
+
13
+ from scipy.interpolate import interp1d
14
+ from scipy.io import wavfile
15
+ from scipy import signal
16
+
17
+ from eval.detectors import S3FD
18
+
19
+
20
+ class SyncNetDetector:
21
+ def __init__(self, device, detect_results_dir="detect_results"):
22
+ self.s3f_detector = S3FD(device=device)
23
+ self.detect_results_dir = detect_results_dir
24
+
25
+ def __call__(self, video_path: str, min_track=50, scale=False):
26
+ crop_dir = os.path.join(self.detect_results_dir, "crop")
27
+ video_dir = os.path.join(self.detect_results_dir, "video")
28
+ frames_dir = os.path.join(self.detect_results_dir, "frames")
29
+ temp_dir = os.path.join(self.detect_results_dir, "temp")
30
+
31
+ # ========== DELETE EXISTING DIRECTORIES ==========
32
+ if os.path.exists(crop_dir):
33
+ rmtree(crop_dir)
34
+
35
+ if os.path.exists(video_dir):
36
+ rmtree(video_dir)
37
+
38
+ if os.path.exists(frames_dir):
39
+ rmtree(frames_dir)
40
+
41
+ if os.path.exists(temp_dir):
42
+ rmtree(temp_dir)
43
+
44
+ # ========== MAKE NEW DIRECTORIES ==========
45
+
46
+ os.makedirs(crop_dir)
47
+ os.makedirs(video_dir)
48
+ os.makedirs(frames_dir)
49
+ os.makedirs(temp_dir)
50
+
51
+ # ========== CONVERT VIDEO AND EXTRACT FRAMES ==========
52
+
53
+ if scale:
54
+ scaled_video_path = os.path.join(video_dir, "scaled.mp4")
55
+ command = f"ffmpeg -loglevel error -y -nostdin -i {video_path} -vf scale='224:224' {scaled_video_path}"
56
+ subprocess.run(command, shell=True)
57
+ video_path = scaled_video_path
58
+
59
+ command = f"ffmpeg -y -nostdin -loglevel error -i {video_path} -qscale:v 2 -async 1 -r 25 {os.path.join(video_dir, 'video.mp4')}"
60
+ subprocess.run(command, shell=True, stdout=None)
61
+
62
+ command = f"ffmpeg -y -nostdin -loglevel error -i {os.path.join(video_dir, 'video.mp4')} -qscale:v 2 -f image2 {os.path.join(frames_dir, '%06d.jpg')}"
63
+ subprocess.run(command, shell=True, stdout=None)
64
+
65
+ command = f"ffmpeg -y -nostdin -loglevel error -i {os.path.join(video_dir, 'video.mp4')} -ac 1 -vn -acodec pcm_s16le -ar 16000 {os.path.join(video_dir, 'audio.wav')}"
66
+ subprocess.run(command, shell=True, stdout=None)
67
+
68
+ faces = self.detect_face(frames_dir)
69
+
70
+ scene = self.scene_detect(video_dir)
71
+
72
+ # Face tracking
73
+ alltracks = []
74
+
75
+ for shot in scene:
76
+ if shot[1].frame_num - shot[0].frame_num >= min_track:
77
+ alltracks.extend(self.track_face(faces[shot[0].frame_num : shot[1].frame_num], min_track=min_track))
78
+
79
+ # Face crop
80
+ for ii, track in enumerate(alltracks):
81
+ self.crop_video(track, os.path.join(crop_dir, "%05d" % ii), frames_dir, 25, temp_dir, video_dir)
82
+
83
+ rmtree(temp_dir)
84
+
85
+ def scene_detect(self, video_dir):
86
+ video_manager = VideoManager([os.path.join(video_dir, "video.mp4")])
87
+ stats_manager = StatsManager()
88
+ scene_manager = SceneManager(stats_manager)
89
+ # Add ContentDetector algorithm (constructor takes detector options like threshold).
90
+ scene_manager.add_detector(ContentDetector())
91
+ base_timecode = video_manager.get_base_timecode()
92
+
93
+ video_manager.set_downscale_factor()
94
+
95
+ video_manager.start()
96
+
97
+ scene_manager.detect_scenes(frame_source=video_manager)
98
+
99
+ scene_list = scene_manager.get_scene_list(base_timecode)
100
+
101
+ if scene_list == []:
102
+ scene_list = [(video_manager.get_base_timecode(), video_manager.get_current_timecode())]
103
+
104
+ return scene_list
105
+
106
+ def track_face(self, scenefaces, num_failed_det=25, min_track=50, min_face_size=100):
107
+
108
+ iouThres = 0.5 # Minimum IOU between consecutive face detections
109
+ tracks = []
110
+
111
+ while True:
112
+ track = []
113
+ for framefaces in scenefaces:
114
+ for face in framefaces:
115
+ if track == []:
116
+ track.append(face)
117
+ framefaces.remove(face)
118
+ elif face["frame"] - track[-1]["frame"] <= num_failed_det:
119
+ iou = bounding_box_iou(face["bbox"], track[-1]["bbox"])
120
+ if iou > iouThres:
121
+ track.append(face)
122
+ framefaces.remove(face)
123
+ continue
124
+ else:
125
+ break
126
+
127
+ if track == []:
128
+ break
129
+ elif len(track) > min_track:
130
+
131
+ framenum = np.array([f["frame"] for f in track])
132
+ bboxes = np.array([np.array(f["bbox"]) for f in track])
133
+
134
+ frame_i = np.arange(framenum[0], framenum[-1] + 1)
135
+
136
+ bboxes_i = []
137
+ for ij in range(0, 4):
138
+ interpfn = interp1d(framenum, bboxes[:, ij])
139
+ bboxes_i.append(interpfn(frame_i))
140
+ bboxes_i = np.stack(bboxes_i, axis=1)
141
+
142
+ if (
143
+ max(np.mean(bboxes_i[:, 2] - bboxes_i[:, 0]), np.mean(bboxes_i[:, 3] - bboxes_i[:, 1]))
144
+ > min_face_size
145
+ ):
146
+ tracks.append({"frame": frame_i, "bbox": bboxes_i})
147
+
148
+ return tracks
149
+
150
+ def detect_face(self, frames_dir, facedet_scale=0.25):
151
+ flist = glob.glob(os.path.join(frames_dir, "*.jpg"))
152
+ flist.sort()
153
+
154
+ dets = []
155
+
156
+ for fidx, fname in enumerate(flist):
157
+ image = cv2.imread(fname)
158
+
159
+ image_np = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
160
+ bboxes = self.s3f_detector.detect_faces(image_np, conf_th=0.9, scales=[facedet_scale])
161
+
162
+ dets.append([])
163
+ for bbox in bboxes:
164
+ dets[-1].append({"frame": fidx, "bbox": (bbox[:-1]).tolist(), "conf": bbox[-1]})
165
+
166
+ return dets
167
+
168
+ def crop_video(self, track, cropfile, frames_dir, frame_rate, temp_dir, video_dir, crop_scale=0.4):
169
+
170
+ flist = glob.glob(os.path.join(frames_dir, "*.jpg"))
171
+ flist.sort()
172
+
173
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
174
+ vOut = cv2.VideoWriter(cropfile + "t.mp4", fourcc, frame_rate, (224, 224))
175
+
176
+ dets = {"x": [], "y": [], "s": []}
177
+
178
+ for det in track["bbox"]:
179
+
180
+ dets["s"].append(max((det[3] - det[1]), (det[2] - det[0])) / 2)
181
+ dets["y"].append((det[1] + det[3]) / 2) # crop center x
182
+ dets["x"].append((det[0] + det[2]) / 2) # crop center y
183
+
184
+ # Smooth detections
185
+ dets["s"] = signal.medfilt(dets["s"], kernel_size=13)
186
+ dets["x"] = signal.medfilt(dets["x"], kernel_size=13)
187
+ dets["y"] = signal.medfilt(dets["y"], kernel_size=13)
188
+
189
+ for fidx, frame in enumerate(track["frame"]):
190
+
191
+ cs = crop_scale
192
+
193
+ bs = dets["s"][fidx] # Detection box size
194
+ bsi = int(bs * (1 + 2 * cs)) # Pad videos by this amount
195
+
196
+ image = cv2.imread(flist[frame])
197
+
198
+ frame = np.pad(image, ((bsi, bsi), (bsi, bsi), (0, 0)), "constant", constant_values=(110, 110))
199
+ my = dets["y"][fidx] + bsi # BBox center Y
200
+ mx = dets["x"][fidx] + bsi # BBox center X
201
+
202
+ face = frame[int(my - bs) : int(my + bs * (1 + 2 * cs)), int(mx - bs * (1 + cs)) : int(mx + bs * (1 + cs))]
203
+
204
+ vOut.write(cv2.resize(face, (224, 224)))
205
+
206
+ audiotmp = os.path.join(temp_dir, "audio.wav")
207
+ audiostart = (track["frame"][0]) / frame_rate
208
+ audioend = (track["frame"][-1] + 1) / frame_rate
209
+
210
+ vOut.release()
211
+
212
+ # ========== CROP AUDIO FILE ==========
213
+
214
+ command = "ffmpeg -y -nostdin -loglevel error -i %s -ss %.3f -to %.3f %s" % (
215
+ os.path.join(video_dir, "audio.wav"),
216
+ audiostart,
217
+ audioend,
218
+ audiotmp,
219
+ )
220
+ output = subprocess.run(command, shell=True, stdout=None)
221
+
222
+ sample_rate, audio = wavfile.read(audiotmp)
223
+
224
+ # ========== COMBINE AUDIO AND VIDEO FILES ==========
225
+
226
+ command = "ffmpeg -y -nostdin -loglevel error -i %st.mp4 -i %s -c:v copy -c:a aac %s.mp4" % (
227
+ cropfile,
228
+ audiotmp,
229
+ cropfile,
230
+ )
231
+ output = subprocess.run(command, shell=True, stdout=None)
232
+
233
+ os.remove(cropfile + "t.mp4")
234
+
235
+ return {"track": track, "proc_track": dets}
236
+
237
+
238
+ def bounding_box_iou(boxA, boxB):
239
+ xA = max(boxA[0], boxB[0])
240
+ yA = max(boxA[1], boxB[1])
241
+ xB = min(boxA[2], boxB[2])
242
+ yB = min(boxA[3], boxB[3])
243
+
244
+ interArea = max(0, xB - xA) * max(0, yB - yA)
245
+
246
+ boxAArea = (boxA[2] - boxA[0]) * (boxA[3] - boxA[1])
247
+ boxBArea = (boxB[2] - boxB[0]) * (boxB[3] - boxB[1])
248
+
249
+ iou = interArea / float(boxAArea + boxBArea - interArea)
250
+
251
+ return iou
inference.sh ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ python -m scripts.inference \
4
+ --unet_config_path "configs/unet/second_stage.yaml" \
5
+ --inference_ckpt_path "checkpoints/latentsync_unet.pt" \
6
+ --guidance_scale 1.0 \
7
+ --video_path "assets/demo1_video.mp4" \
8
+ --audio_path "assets/demo1_audio.wav" \
9
+ --video_out_path "video_out.mp4"
pipelines/lipsync_pipeline.py ADDED
@@ -0,0 +1,470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/guoyww/AnimateDiff/blob/main/animatediff/pipelines/pipeline_animation.py
2
+
3
+ import inspect
4
+ import os
5
+ import shutil
6
+ from typing import Callable, List, Optional, Union
7
+ import subprocess
8
+
9
+ import numpy as np
10
+ import torch
11
+ import torchvision
12
+
13
+ from diffusers.utils import is_accelerate_available
14
+ from packaging import version
15
+
16
+ from diffusers.configuration_utils import FrozenDict
17
+ from diffusers.models import AutoencoderKL
18
+ from diffusers.pipeline_utils import DiffusionPipeline
19
+ from diffusers.schedulers import (
20
+ DDIMScheduler,
21
+ DPMSolverMultistepScheduler,
22
+ EulerAncestralDiscreteScheduler,
23
+ EulerDiscreteScheduler,
24
+ LMSDiscreteScheduler,
25
+ PNDMScheduler,
26
+ )
27
+ from diffusers.utils import deprecate, logging
28
+
29
+ from einops import rearrange
30
+
31
+ from ..models.unet import UNet3DConditionModel
32
+ from ..utils.image_processor import ImageProcessor
33
+ from ..utils.util import read_video, read_audio, write_video
34
+ from ..whisper.audio2feature import Audio2Feature
35
+ import tqdm
36
+ import soundfile as sf
37
+
38
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
+
40
+
41
+ class LipsyncPipeline(DiffusionPipeline):
42
+ _optional_components = []
43
+
44
+ def __init__(
45
+ self,
46
+ vae: AutoencoderKL,
47
+ audio_encoder: Audio2Feature,
48
+ unet: UNet3DConditionModel,
49
+ scheduler: Union[
50
+ DDIMScheduler,
51
+ PNDMScheduler,
52
+ LMSDiscreteScheduler,
53
+ EulerDiscreteScheduler,
54
+ EulerAncestralDiscreteScheduler,
55
+ DPMSolverMultistepScheduler,
56
+ ],
57
+ ):
58
+ super().__init__()
59
+
60
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
61
+ deprecation_message = (
62
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
63
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
64
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
65
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
66
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
67
+ " file"
68
+ )
69
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
70
+ new_config = dict(scheduler.config)
71
+ new_config["steps_offset"] = 1
72
+ scheduler._internal_dict = FrozenDict(new_config)
73
+
74
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
75
+ deprecation_message = (
76
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
77
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
78
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
79
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
80
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
81
+ )
82
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
83
+ new_config = dict(scheduler.config)
84
+ new_config["clip_sample"] = False
85
+ scheduler._internal_dict = FrozenDict(new_config)
86
+
87
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
88
+ version.parse(unet.config._diffusers_version).base_version
89
+ ) < version.parse("0.9.0.dev0")
90
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
91
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
92
+ deprecation_message = (
93
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
94
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
95
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
96
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
97
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
98
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
99
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
100
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
101
+ " the `unet/config.json` file"
102
+ )
103
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
104
+ new_config = dict(unet.config)
105
+ new_config["sample_size"] = 64
106
+ unet._internal_dict = FrozenDict(new_config)
107
+
108
+ self.register_modules(
109
+ vae=vae,
110
+ audio_encoder=audio_encoder,
111
+ unet=unet,
112
+ scheduler=scheduler,
113
+ )
114
+
115
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
116
+
117
+ self.set_progress_bar_config(desc="Steps")
118
+
119
+ def enable_vae_slicing(self):
120
+ self.vae.enable_slicing()
121
+
122
+ def disable_vae_slicing(self):
123
+ self.vae.disable_slicing()
124
+
125
+ def enable_sequential_cpu_offload(self, gpu_id=0):
126
+ if is_accelerate_available():
127
+ from accelerate import cpu_offload
128
+ else:
129
+ raise ImportError("Please install accelerate via `pip install accelerate`")
130
+
131
+ device = torch.device(f"cuda:{gpu_id}")
132
+
133
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
134
+ if cpu_offloaded_model is not None:
135
+ cpu_offload(cpu_offloaded_model, device)
136
+
137
+ @property
138
+ def _execution_device(self):
139
+ if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
140
+ return self.device
141
+ for module in self.unet.modules():
142
+ if (
143
+ hasattr(module, "_hf_hook")
144
+ and hasattr(module._hf_hook, "execution_device")
145
+ and module._hf_hook.execution_device is not None
146
+ ):
147
+ return torch.device(module._hf_hook.execution_device)
148
+ return self.device
149
+
150
+ def decode_latents(self, latents):
151
+ latents = latents / self.vae.config.scaling_factor + self.vae.config.shift_factor
152
+ latents = rearrange(latents, "b c f h w -> (b f) c h w")
153
+ decoded_latents = self.vae.decode(latents).sample
154
+ return decoded_latents
155
+
156
+ def prepare_extra_step_kwargs(self, generator, eta):
157
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
158
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
159
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
160
+ # and should be between [0, 1]
161
+
162
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
163
+ extra_step_kwargs = {}
164
+ if accepts_eta:
165
+ extra_step_kwargs["eta"] = eta
166
+
167
+ # check if the scheduler accepts generator
168
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
169
+ if accepts_generator:
170
+ extra_step_kwargs["generator"] = generator
171
+ return extra_step_kwargs
172
+
173
+ def check_inputs(self, height, width, callback_steps):
174
+ assert height == width, "Height and width must be equal"
175
+
176
+ if height % 8 != 0 or width % 8 != 0:
177
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
178
+
179
+ if (callback_steps is None) or (
180
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
181
+ ):
182
+ raise ValueError(
183
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
184
+ f" {type(callback_steps)}."
185
+ )
186
+
187
+ def prepare_latents(self, batch_size, num_frames, num_channels_latents, height, width, dtype, device, generator):
188
+ shape = (
189
+ batch_size,
190
+ num_channels_latents,
191
+ 1,
192
+ height // self.vae_scale_factor,
193
+ width // self.vae_scale_factor,
194
+ )
195
+ rand_device = "cpu" if device.type == "mps" else device
196
+ latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device)
197
+ latents = latents.repeat(1, 1, num_frames, 1, 1)
198
+
199
+ # scale the initial noise by the standard deviation required by the scheduler
200
+ latents = latents * self.scheduler.init_noise_sigma
201
+ return latents
202
+
203
+ def prepare_mask_latents(
204
+ self, mask, masked_image, height, width, dtype, device, generator, do_classifier_free_guidance
205
+ ):
206
+ # resize the mask to latents shape as we concatenate the mask to the latents
207
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
208
+ # and half precision
209
+ mask = torch.nn.functional.interpolate(
210
+ mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
211
+ )
212
+ masked_image = masked_image.to(device=device, dtype=dtype)
213
+
214
+ # encode the mask image into latents space so we can concatenate it to the latents
215
+ masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
216
+ masked_image_latents = (masked_image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor
217
+
218
+ # aligning device to prevent device errors when concating it with the latent model input
219
+ masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
220
+ mask = mask.to(device=device, dtype=dtype)
221
+
222
+ # assume batch size = 1
223
+ mask = rearrange(mask, "f c h w -> 1 c f h w")
224
+ masked_image_latents = rearrange(masked_image_latents, "f c h w -> 1 c f h w")
225
+
226
+ mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
227
+ masked_image_latents = (
228
+ torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
229
+ )
230
+ return mask, masked_image_latents
231
+
232
+ def prepare_image_latents(self, images, device, dtype, generator, do_classifier_free_guidance):
233
+ images = images.to(device=device, dtype=dtype)
234
+ image_latents = self.vae.encode(images).latent_dist.sample(generator=generator)
235
+ image_latents = (image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor
236
+ image_latents = rearrange(image_latents, "f c h w -> 1 c f h w")
237
+ image_latents = torch.cat([image_latents] * 2) if do_classifier_free_guidance else image_latents
238
+
239
+ return image_latents
240
+
241
+ def set_progress_bar_config(self, **kwargs):
242
+ if not hasattr(self, "_progress_bar_config"):
243
+ self._progress_bar_config = {}
244
+ self._progress_bar_config.update(kwargs)
245
+
246
+ @staticmethod
247
+ def paste_surrounding_pixels_back(decoded_latents, pixel_values, masks, device, weight_dtype):
248
+ # Paste the surrounding pixels back, because we only want to change the mouth region
249
+ pixel_values = pixel_values.to(device=device, dtype=weight_dtype)
250
+ masks = masks.to(device=device, dtype=weight_dtype)
251
+ combined_pixel_values = decoded_latents * masks + pixel_values * (1 - masks)
252
+ return combined_pixel_values
253
+
254
+ @staticmethod
255
+ def pixel_values_to_images(pixel_values: torch.Tensor):
256
+ pixel_values = rearrange(pixel_values, "f c h w -> f h w c")
257
+ pixel_values = (pixel_values / 2 + 0.5).clamp(0, 1)
258
+ images = (pixel_values * 255).to(torch.uint8)
259
+ images = images.cpu().numpy()
260
+ return images
261
+
262
+ def affine_transform_video(self, video_path):
263
+ video_frames = read_video(video_path, use_decord=False)
264
+ faces = []
265
+ boxes = []
266
+ affine_matrices = []
267
+ print(f"Affine transforming {len(video_frames)} faces...")
268
+ for frame in tqdm.tqdm(video_frames):
269
+ face, box, affine_matrix = self.image_processor.affine_transform(frame)
270
+ faces.append(face)
271
+ boxes.append(box)
272
+ affine_matrices.append(affine_matrix)
273
+
274
+ faces = torch.stack(faces)
275
+ return faces, video_frames, boxes, affine_matrices
276
+
277
+ def restore_video(self, faces, video_frames, boxes, affine_matrices):
278
+ video_frames = video_frames[: faces.shape[0]]
279
+ out_frames = []
280
+ for index, face in enumerate(faces):
281
+ x1, y1, x2, y2 = boxes[index]
282
+ height = int(y2 - y1)
283
+ width = int(x2 - x1)
284
+ face = torchvision.transforms.functional.resize(face, size=(height, width), antialias=True)
285
+ face = rearrange(face, "c h w -> h w c")
286
+ face = (face / 2 + 0.5).clamp(0, 1)
287
+ face = (face * 255).to(torch.uint8).cpu().numpy()
288
+ out_frame = self.image_processor.restorer.restore_img(video_frames[index], face, affine_matrices[index])
289
+ out_frames.append(out_frame)
290
+ return np.stack(out_frames, axis=0)
291
+
292
+ @torch.no_grad()
293
+ def __call__(
294
+ self,
295
+ video_path: str,
296
+ audio_path: str,
297
+ video_out_path: str,
298
+ video_mask_path: str = None,
299
+ num_frames: int = 16,
300
+ video_fps: int = 25,
301
+ audio_sample_rate: int = 16000,
302
+ height: Optional[int] = None,
303
+ width: Optional[int] = None,
304
+ num_inference_steps: int = 20,
305
+ guidance_scale: float = 1.5,
306
+ weight_dtype: Optional[torch.dtype] = torch.float16,
307
+ eta: float = 0.0,
308
+ mask: str = "fix_mask",
309
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
310
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
311
+ callback_steps: Optional[int] = 1,
312
+ **kwargs,
313
+ ):
314
+ is_train = self.unet.training
315
+ self.unet.eval()
316
+
317
+ # 0. Define call parameters
318
+ batch_size = 1
319
+ device = self._execution_device
320
+ self.image_processor = ImageProcessor(height, mask=mask, device="cuda")
321
+ self.set_progress_bar_config(desc=f"Sample frames: {num_frames}")
322
+
323
+ video_frames, original_video_frames, boxes, affine_matrices = self.affine_transform_video(video_path)
324
+ audio_samples = read_audio(audio_path)
325
+
326
+ # 1. Default height and width to unet
327
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
328
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
329
+
330
+ # 2. Check inputs
331
+ self.check_inputs(height, width, callback_steps)
332
+
333
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
334
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
335
+ # corresponds to doing no classifier free guidance.
336
+ do_classifier_free_guidance = guidance_scale > 1.0
337
+
338
+ # 3. set timesteps
339
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
340
+ timesteps = self.scheduler.timesteps
341
+
342
+ # 4. Prepare extra step kwargs.
343
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
344
+
345
+ self.video_fps = video_fps
346
+
347
+ if self.unet.add_audio_layer:
348
+ whisper_feature = self.audio_encoder.audio2feat(audio_path)
349
+ whisper_chunks = self.audio_encoder.feature2chunks(feature_array=whisper_feature, fps=video_fps)
350
+
351
+ num_inferences = min(len(video_frames), len(whisper_chunks)) // num_frames
352
+ else:
353
+ num_inferences = len(video_frames) // num_frames
354
+
355
+ synced_video_frames = []
356
+ masked_video_frames = []
357
+
358
+ num_channels_latents = self.vae.config.latent_channels
359
+
360
+ # Prepare latent variables
361
+ all_latents = self.prepare_latents(
362
+ batch_size,
363
+ num_frames * num_inferences,
364
+ num_channels_latents,
365
+ height,
366
+ width,
367
+ weight_dtype,
368
+ device,
369
+ generator,
370
+ )
371
+
372
+ for i in tqdm.tqdm(range(num_inferences), desc="Doing inference..."):
373
+ if self.unet.add_audio_layer:
374
+ audio_embeds = torch.stack(whisper_chunks[i * num_frames : (i + 1) * num_frames])
375
+ audio_embeds = audio_embeds.to(device, dtype=weight_dtype)
376
+ if do_classifier_free_guidance:
377
+ empty_audio_embeds = torch.zeros_like(audio_embeds)
378
+ audio_embeds = torch.cat([empty_audio_embeds, audio_embeds])
379
+ else:
380
+ audio_embeds = None
381
+ inference_video_frames = video_frames[i * num_frames : (i + 1) * num_frames]
382
+ latents = all_latents[:, :, i * num_frames : (i + 1) * num_frames]
383
+ pixel_values, masked_pixel_values, masks = self.image_processor.prepare_masks_and_masked_images(
384
+ inference_video_frames, affine_transform=False
385
+ )
386
+
387
+ # 7. Prepare mask latent variables
388
+ mask_latents, masked_image_latents = self.prepare_mask_latents(
389
+ masks,
390
+ masked_pixel_values,
391
+ height,
392
+ width,
393
+ weight_dtype,
394
+ device,
395
+ generator,
396
+ do_classifier_free_guidance,
397
+ )
398
+
399
+ # 8. Prepare image latents
400
+ image_latents = self.prepare_image_latents(
401
+ pixel_values,
402
+ device,
403
+ weight_dtype,
404
+ generator,
405
+ do_classifier_free_guidance,
406
+ )
407
+
408
+ # 9. Denoising loop
409
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
410
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
411
+ for j, t in enumerate(timesteps):
412
+ # expand the latents if we are doing classifier free guidance
413
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
414
+
415
+ # concat latents, mask, masked_image_latents in the channel dimension
416
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
417
+ latent_model_input = torch.cat(
418
+ [latent_model_input, mask_latents, masked_image_latents, image_latents], dim=1
419
+ )
420
+
421
+ # predict the noise residual
422
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=audio_embeds).sample
423
+
424
+ # perform guidance
425
+ if do_classifier_free_guidance:
426
+ noise_pred_uncond, noise_pred_audio = noise_pred.chunk(2)
427
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_audio - noise_pred_uncond)
428
+
429
+ # compute the previous noisy sample x_t -> x_t-1
430
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
431
+
432
+ # call the callback, if provided
433
+ if j == len(timesteps) - 1 or ((j + 1) > num_warmup_steps and (j + 1) % self.scheduler.order == 0):
434
+ progress_bar.update()
435
+ if callback is not None and j % callback_steps == 0:
436
+ callback(j, t, latents)
437
+
438
+ # Recover the pixel values
439
+ decoded_latents = self.decode_latents(latents)
440
+ decoded_latents = self.paste_surrounding_pixels_back(
441
+ decoded_latents, pixel_values, 1 - masks, device, weight_dtype
442
+ )
443
+ synced_video_frames.append(decoded_latents)
444
+ masked_video_frames.append(masked_pixel_values)
445
+
446
+ synced_video_frames = self.restore_video(
447
+ torch.cat(synced_video_frames), original_video_frames, boxes, affine_matrices
448
+ )
449
+ masked_video_frames = self.restore_video(
450
+ torch.cat(masked_video_frames), original_video_frames, boxes, affine_matrices
451
+ )
452
+
453
+ audio_samples_remain_length = int(synced_video_frames.shape[0] / video_fps * audio_sample_rate)
454
+ audio_samples = audio_samples[:audio_samples_remain_length].cpu().numpy()
455
+
456
+ if is_train:
457
+ self.unet.train()
458
+
459
+ temp_dir = "temp"
460
+ if os.path.exists(temp_dir):
461
+ shutil.rmtree(temp_dir)
462
+ os.makedirs(temp_dir, exist_ok=True)
463
+
464
+ write_video(os.path.join(temp_dir, "video.mp4"), synced_video_frames, fps=25)
465
+ # write_video(video_mask_path, masked_video_frames, fps=25)
466
+
467
+ sf.write(os.path.join(temp_dir, "audio.wav"), audio_samples, audio_sample_rate)
468
+
469
+ command = f"ffmpeg -y -loglevel error -nostdin -i {os.path.join(temp_dir, 'video.mp4')} -i {os.path.join(temp_dir, 'audio.wav')} -c:v libx264 -c:a aac -q:v 0 -q:a 0 {video_out_path}"
470
+ subprocess.run(command, shell=True)
predict.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Prediction interface for Cog ⚙️
2
+ # https://cog.run/python
3
+
4
+ from cog import BasePredictor, Input, Path
5
+ import os
6
+ import time
7
+ import subprocess
8
+
9
+ MODEL_CACHE = "checkpoints"
10
+ MODEL_URL = "https://weights.replicate.delivery/default/chunyu-li/LatentSync/model.tar"
11
+
12
+ def download_weights(url, dest):
13
+ start = time.time()
14
+ print("downloading url: ", url)
15
+ print("downloading to: ", dest)
16
+ subprocess.check_call(["pget", "-xf", url, dest], close_fds=False)
17
+ print("downloading took: ", time.time() - start)
18
+
19
+ class Predictor(BasePredictor):
20
+ def setup(self) -> None:
21
+ """Load the model into memory to make running multiple predictions efficient"""
22
+ # Download the model weights
23
+ if not os.path.exists(MODEL_CACHE):
24
+ download_weights(MODEL_URL, MODEL_CACHE)
25
+
26
+ # Soft links for the auxiliary models
27
+ os.system("mkdir -p ~/.cache/torch/hub/checkpoints")
28
+ os.system("ln -s $(pwd)/checkpoints/auxiliary/2DFAN4-cd938726ad.zip ~/.cache/torch/hub/checkpoints/2DFAN4-cd938726ad.zip")
29
+ os.system("ln -s $(pwd)/checkpoints/auxiliary/s3fd-619a316812.pth ~/.cache/torch/hub/checkpoints/s3fd-619a316812.pth")
30
+ os.system("ln -s $(pwd)/checkpoints/auxiliary/vgg16-397923af.pth ~/.cache/torch/hub/checkpoints/vgg16-397923af.pth")
31
+
32
+ def predict(
33
+ self,
34
+ video: Path = Input(
35
+ description="Input video", default=None
36
+ ),
37
+ audio: Path = Input(
38
+ description="Input audio to ", default=None
39
+ ),
40
+ guidance_scale: float = Input(
41
+ description="Guidance scale", ge=0, le=10, default=1.0
42
+ ),
43
+ seed: int = Input(
44
+ description="Set to 0 for Random seed", default=0
45
+ )
46
+ ) -> Path:
47
+ """Run a single prediction on the model"""
48
+ if seed <= 0:
49
+ seed = int.from_bytes(os.urandom(2), "big")
50
+ print(f"Using seed: {seed}")
51
+
52
+ video_path = str(video)
53
+ audio_path = str(audio)
54
+ config_path = "configs/unet/second_stage.yaml"
55
+ ckpt_path = "checkpoints/latentsync_unet.pt"
56
+ output_path = "/tmp/video_out.mp4"
57
+
58
+ # Run the following command:
59
+ os.system(f"python -m scripts.inference --unet_config_path {config_path} --inference_ckpt_path {ckpt_path} --guidance_scale {str(guidance_scale)} --video_path {video_path} --audio_path {audio_path} --video_out_path {output_path} --seed {seed}")
60
+ return Path(output_path)
preprocess/affine_transform.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from latentsync.utils.util import read_video, write_video
16
+ from latentsync.utils.image_processor import ImageProcessor
17
+ import torch
18
+ from einops import rearrange
19
+ import os
20
+ import tqdm
21
+ import subprocess
22
+ from multiprocessing import Process
23
+ import shutil
24
+
25
+ paths = []
26
+
27
+
28
+ def gather_video_paths(input_dir, output_dir):
29
+ for video in sorted(os.listdir(input_dir)):
30
+ if video.endswith(".mp4"):
31
+ video_input = os.path.join(input_dir, video)
32
+ video_output = os.path.join(output_dir, video)
33
+ if os.path.isfile(video_output):
34
+ continue
35
+ paths.append((video_input, video_output))
36
+ elif os.path.isdir(os.path.join(input_dir, video)):
37
+ gather_video_paths(os.path.join(input_dir, video), os.path.join(output_dir, video))
38
+
39
+
40
+ class FaceDetector:
41
+ def __init__(self, resolution: int = 512, device: str = "cpu"):
42
+ self.image_processor = ImageProcessor(resolution, "fix_mask", device)
43
+
44
+ def affine_transform_video(self, video_path):
45
+ video_frames = read_video(video_path, change_fps=False)
46
+ results = []
47
+ for frame in video_frames:
48
+ frame, _, _ = self.image_processor.affine_transform(frame)
49
+ results.append(frame)
50
+ results = torch.stack(results)
51
+
52
+ results = rearrange(results, "f c h w -> f h w c").numpy()
53
+ return results
54
+
55
+ def close(self):
56
+ self.image_processor.close()
57
+
58
+
59
+ def combine_video_audio(video_frames, video_input_path, video_output_path, process_temp_dir):
60
+ video_name = os.path.basename(video_input_path)[:-4]
61
+ audio_temp = os.path.join(process_temp_dir, f"{video_name}_temp.wav")
62
+ video_temp = os.path.join(process_temp_dir, f"{video_name}_temp.mp4")
63
+
64
+ write_video(video_temp, video_frames, fps=25)
65
+
66
+ command = f"ffmpeg -y -loglevel error -i {video_input_path} -q:a 0 -map a {audio_temp}"
67
+ subprocess.run(command, shell=True)
68
+
69
+ os.makedirs(os.path.dirname(video_output_path), exist_ok=True)
70
+ command = f"ffmpeg -y -loglevel error -i {video_temp} -i {audio_temp} -c:v libx264 -c:a aac -map 0:v -map 1:a -q:v 0 -q:a 0 {video_output_path}"
71
+ subprocess.run(command, shell=True)
72
+
73
+ os.remove(audio_temp)
74
+ os.remove(video_temp)
75
+
76
+
77
+ def func(paths, process_temp_dir, device_id, resolution):
78
+ os.makedirs(process_temp_dir, exist_ok=True)
79
+ face_detector = FaceDetector(resolution, f"cuda:{device_id}")
80
+
81
+ for video_input, video_output in paths:
82
+ if os.path.isfile(video_output):
83
+ continue
84
+ try:
85
+ video_frames = face_detector.affine_transform_video(video_input)
86
+ except Exception as e: # Handle the exception of face not detcted
87
+ print(f"Exception: {e} - {video_input}")
88
+ continue
89
+
90
+ os.makedirs(os.path.dirname(video_output), exist_ok=True)
91
+ combine_video_audio(video_frames, video_input, video_output, process_temp_dir)
92
+ print(f"Saved: {video_output}")
93
+
94
+ face_detector.close()
95
+
96
+
97
+ def split(a, n):
98
+ k, m = divmod(len(a), n)
99
+ return (a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n))
100
+
101
+
102
+ def affine_transform_multi_gpus(input_dir, output_dir, temp_dir, resolution, num_workers):
103
+ print(f"Recursively gathering video paths of {input_dir} ...")
104
+ gather_video_paths(input_dir, output_dir)
105
+ num_devices = torch.cuda.device_count()
106
+ if num_devices == 0:
107
+ raise RuntimeError("No GPUs found")
108
+
109
+ if os.path.exists(temp_dir):
110
+ shutil.rmtree(temp_dir)
111
+ os.makedirs(temp_dir, exist_ok=True)
112
+
113
+ split_paths = list(split(paths, num_workers * num_devices))
114
+
115
+ processes = []
116
+
117
+ for i in range(num_devices):
118
+ for j in range(num_workers):
119
+ process_index = i * num_workers + j
120
+ process = Process(
121
+ target=func, args=(split_paths[process_index], os.path.join(temp_dir, f"process_{i}"), i, resolution)
122
+ )
123
+ process.start()
124
+ processes.append(process)
125
+
126
+ for process in processes:
127
+ process.join()
128
+
129
+
130
+ if __name__ == "__main__":
131
+ input_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/avatars/resampled/train"
132
+ output_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/avatars/affine_transformed/train"
133
+ temp_dir = "temp"
134
+ resolution = 256
135
+ num_workers = 10 # How many processes per device
136
+
137
+ affine_transform_multi_gpus(input_dir, output_dir, temp_dir, resolution, num_workers)
preprocess/data_processing_pipeline.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ import os
17
+ from preprocess.affine_transform import affine_transform_multi_gpus
18
+ from preprocess.remove_broken_videos import remove_broken_videos_multiprocessing
19
+ from preprocess.detect_shot import detect_shot_multiprocessing
20
+ from preprocess.filter_high_resolution import filter_high_resolution_multiprocessing
21
+ from preprocess.resample_fps_hz import resample_fps_hz_multiprocessing
22
+ from preprocess.segment_videos import segment_videos_multiprocessing
23
+ from preprocess.sync_av import sync_av_multi_gpus
24
+ from preprocess.filter_visual_quality import filter_visual_quality_multi_gpus
25
+ from preprocess.remove_incorrect_affined import remove_incorrect_affined_multiprocessing
26
+
27
+
28
+ def data_processing_pipeline(
29
+ total_num_workers, per_gpu_num_workers, resolution, sync_conf_threshold, temp_dir, input_dir
30
+ ):
31
+ print("Removing broken videos...")
32
+ remove_broken_videos_multiprocessing(input_dir, total_num_workers)
33
+
34
+ print("Resampling FPS hz...")
35
+ resampled_dir = os.path.join(os.path.dirname(input_dir), "resampled")
36
+ resample_fps_hz_multiprocessing(input_dir, resampled_dir, total_num_workers)
37
+
38
+ print("Detecting shot...")
39
+ shot_dir = os.path.join(os.path.dirname(input_dir), "shot")
40
+ detect_shot_multiprocessing(resampled_dir, shot_dir, total_num_workers)
41
+
42
+ print("Segmenting videos...")
43
+ segmented_dir = os.path.join(os.path.dirname(input_dir), "segmented")
44
+ segment_videos_multiprocessing(shot_dir, segmented_dir, total_num_workers)
45
+
46
+ print("Filtering high resolution...")
47
+ high_resolution_dir = os.path.join(os.path.dirname(input_dir), "high_resolution")
48
+ filter_high_resolution_multiprocessing(segmented_dir, high_resolution_dir, resolution, total_num_workers)
49
+
50
+ print("Affine transforming videos...")
51
+ affine_transformed_dir = os.path.join(os.path.dirname(input_dir), "affine_transformed")
52
+ affine_transform_multi_gpus(
53
+ high_resolution_dir, affine_transformed_dir, temp_dir, resolution, per_gpu_num_workers // 2
54
+ )
55
+
56
+ print("Removing incorrect affined videos...")
57
+ remove_incorrect_affined_multiprocessing(affine_transformed_dir, total_num_workers)
58
+
59
+ print("Syncing audio and video...")
60
+ av_synced_dir = os.path.join(os.path.dirname(input_dir), f"av_synced_{sync_conf_threshold}")
61
+ sync_av_multi_gpus(affine_transformed_dir, av_synced_dir, temp_dir, per_gpu_num_workers, sync_conf_threshold)
62
+
63
+ print("Filtering visual quality...")
64
+ high_visual_quality_dir = os.path.join(os.path.dirname(input_dir), "high_visual_quality")
65
+ filter_visual_quality_multi_gpus(av_synced_dir, high_visual_quality_dir, per_gpu_num_workers)
66
+
67
+
68
+ if __name__ == "__main__":
69
+ parser = argparse.ArgumentParser()
70
+ parser.add_argument("--total_num_workers", type=int, default=100)
71
+ parser.add_argument("--per_gpu_num_workers", type=int, default=20)
72
+ parser.add_argument("--resolution", type=int, default=256)
73
+ parser.add_argument("--sync_conf_threshold", type=int, default=3)
74
+ parser.add_argument("--temp_dir", type=str, default="temp")
75
+ parser.add_argument("--input_dir", type=str, required=True)
76
+ args = parser.parse_args()
77
+
78
+ data_processing_pipeline(
79
+ args.total_num_workers,
80
+ args.per_gpu_num_workers,
81
+ args.resolution,
82
+ args.sync_conf_threshold,
83
+ args.temp_dir,
84
+ args.input_dir,
85
+ )
preprocess/detect_shot.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import subprocess
17
+ import tqdm
18
+ from multiprocessing import Pool
19
+
20
+ paths = []
21
+
22
+
23
+ def gather_paths(input_dir, output_dir):
24
+ for video in sorted(os.listdir(input_dir)):
25
+ if video.endswith(".mp4"):
26
+ video_input = os.path.join(input_dir, video)
27
+ video_output = os.path.join(output_dir, video)
28
+ if os.path.isfile(video_output):
29
+ continue
30
+ paths.append([video_input, output_dir])
31
+ elif os.path.isdir(os.path.join(input_dir, video)):
32
+ gather_paths(os.path.join(input_dir, video), os.path.join(output_dir, video))
33
+
34
+
35
+ def detect_shot(video_input, output_dir):
36
+ os.makedirs(output_dir, exist_ok=True)
37
+ video = os.path.basename(video_input)[:-4]
38
+ command = f"scenedetect --quiet -i {video_input} detect-adaptive --threshold 2 split-video --filename '{video}_shot_$SCENE_NUMBER' --output {output_dir}"
39
+ # command = f"scenedetect --quiet -i {video_input} detect-adaptive --threshold 2 split-video --high-quality --filename '{video}_shot_$SCENE_NUMBER' --output {output_dir}"
40
+ subprocess.run(command, shell=True)
41
+
42
+
43
+ def multi_run_wrapper(args):
44
+ return detect_shot(*args)
45
+
46
+
47
+ def detect_shot_multiprocessing(input_dir, output_dir, num_workers):
48
+ print(f"Recursively gathering video paths of {input_dir} ...")
49
+ gather_paths(input_dir, output_dir)
50
+
51
+ print(f"Detecting shot of {input_dir} ...")
52
+ with Pool(num_workers) as pool:
53
+ for _ in tqdm.tqdm(pool.imap_unordered(multi_run_wrapper, paths), total=len(paths)):
54
+ pass
55
+
56
+
57
+ if __name__ == "__main__":
58
+ input_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/ads/high-resolution"
59
+ output_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/ads/shot"
60
+ num_workers = 50
61
+
62
+ detect_shot_multiprocessing(input_dir, output_dir, num_workers)
preprocess/filter_high_resolution.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import mediapipe as mp
16
+ from latentsync.utils.util import read_video
17
+ import os
18
+ import tqdm
19
+ import shutil
20
+ from multiprocessing import Pool
21
+
22
+ paths = []
23
+
24
+
25
+ def gather_video_paths(input_dir, output_dir, resolution):
26
+ for video in sorted(os.listdir(input_dir)):
27
+ if video.endswith(".mp4"):
28
+ video_input = os.path.join(input_dir, video)
29
+ video_output = os.path.join(output_dir, video)
30
+ if os.path.isfile(video_output):
31
+ continue
32
+ paths.append([video_input, video_output, resolution])
33
+ elif os.path.isdir(os.path.join(input_dir, video)):
34
+ gather_video_paths(os.path.join(input_dir, video), os.path.join(output_dir, video), resolution)
35
+
36
+
37
+ class FaceDetector:
38
+ def __init__(self, resolution=256):
39
+ self.face_detection = mp.solutions.face_detection.FaceDetection(
40
+ model_selection=0, min_detection_confidence=0.5
41
+ )
42
+ self.resolution = resolution
43
+
44
+ def detect_face(self, image):
45
+ height, width = image.shape[:2]
46
+ # Process the image and detect faces.
47
+ results = self.face_detection.process(image)
48
+
49
+ if not results.detections: # Face not detected
50
+ raise Exception("Face not detected")
51
+
52
+ if len(results.detections) != 1:
53
+ return False
54
+ detection = results.detections[0] # Only use the first face in the image
55
+
56
+ bounding_box = detection.location_data.relative_bounding_box
57
+ face_width = int(bounding_box.width * width)
58
+ face_height = int(bounding_box.height * height)
59
+ if face_width < self.resolution or face_height < self.resolution:
60
+ return False
61
+ return True
62
+
63
+ def detect_video(self, video_path):
64
+ video_frames = read_video(video_path, change_fps=False)
65
+ if len(video_frames) == 0:
66
+ return False
67
+ for frame in video_frames:
68
+ if not self.detect_face(frame):
69
+ return False
70
+ return True
71
+
72
+ def close(self):
73
+ self.face_detection.close()
74
+
75
+
76
+ def filter_video(video_input, video_out, resolution):
77
+ if os.path.isfile(video_out):
78
+ return
79
+ face_detector = FaceDetector(resolution)
80
+ try:
81
+ save = face_detector.detect_video(video_input)
82
+ except Exception as e:
83
+ # print(f"Exception: {e} Input video: {video_input}")
84
+ face_detector.close()
85
+ return
86
+ if save:
87
+ os.makedirs(os.path.dirname(video_out), exist_ok=True)
88
+ shutil.copy(video_input, video_out)
89
+ face_detector.close()
90
+
91
+
92
+ def multi_run_wrapper(args):
93
+ return filter_video(*args)
94
+
95
+
96
+ def filter_high_resolution_multiprocessing(input_dir, output_dir, resolution, num_workers):
97
+ print(f"Recursively gathering video paths of {input_dir} ...")
98
+ gather_video_paths(input_dir, output_dir, resolution)
99
+
100
+ print(f"Filtering high resolution videos in {input_dir} ...")
101
+ with Pool(num_workers) as pool:
102
+ for _ in tqdm.tqdm(pool.imap_unordered(multi_run_wrapper, paths), total=len(paths)):
103
+ pass
104
+
105
+
106
+ if __name__ == "__main__":
107
+ input_dir = "/mnt/bn/maliva-gen-ai/lichunyu/HDTF/original/train"
108
+ output_dir = "/mnt/bn/maliva-gen-ai/lichunyu/HDTF/detected/train"
109
+ resolution = 256
110
+ num_workers = 50
111
+
112
+ filter_high_resolution_multiprocessing(input_dir, output_dir, resolution, num_workers)
preprocess/filter_visual_quality.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import tqdm
17
+ import torch
18
+ import torchvision
19
+ import shutil
20
+ from multiprocessing import Process
21
+ import numpy as np
22
+ from decord import VideoReader
23
+ from einops import rearrange
24
+ from eval.hyper_iqa import HyperNet, TargetNet
25
+
26
+
27
+ paths = []
28
+
29
+
30
+ def gather_paths(input_dir, output_dir):
31
+ # os.makedirs(output_dir, exist_ok=True)
32
+
33
+ for video in tqdm.tqdm(sorted(os.listdir(input_dir))):
34
+ if video.endswith(".mp4"):
35
+ video_input = os.path.join(input_dir, video)
36
+ video_output = os.path.join(output_dir, video)
37
+ if os.path.isfile(video_output):
38
+ continue
39
+ paths.append((video_input, video_output))
40
+ elif os.path.isdir(os.path.join(input_dir, video)):
41
+ gather_paths(os.path.join(input_dir, video), os.path.join(output_dir, video))
42
+
43
+
44
+ def read_video(video_path: str):
45
+ vr = VideoReader(video_path)
46
+ first_frame = vr[0].asnumpy()
47
+ middle_frame = vr[len(vr) // 2].asnumpy()
48
+ last_frame = vr[-1].asnumpy()
49
+ vr.seek(0)
50
+ video_frames = np.stack([first_frame, middle_frame, last_frame], axis=0)
51
+ video_frames = torch.from_numpy(rearrange(video_frames, "b h w c -> b c h w"))
52
+ video_frames = video_frames / 255.0
53
+ return video_frames
54
+
55
+
56
+ def func(paths, device_id):
57
+ device = f"cuda:{device_id}"
58
+
59
+ model_hyper = HyperNet(16, 112, 224, 112, 56, 28, 14, 7).to(device)
60
+ model_hyper.train(False)
61
+
62
+ # load the pre-trained model on the koniq-10k dataset
63
+ model_hyper.load_state_dict((torch.load("checkpoints/auxiliary/koniq_pretrained.pkl", map_location=device)))
64
+
65
+ transforms = torchvision.transforms.Compose(
66
+ [
67
+ torchvision.transforms.CenterCrop(size=224),
68
+ torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
69
+ ]
70
+ )
71
+
72
+ for video_input, video_output in paths:
73
+ try:
74
+ video_frames = read_video(video_input)
75
+ video_frames = transforms(video_frames)
76
+ video_frames = video_frames.clone().detach().to(device)
77
+ paras = model_hyper(video_frames) # 'paras' contains the network weights conveyed to target network
78
+
79
+ # Building target network
80
+ model_target = TargetNet(paras).to(device)
81
+ for param in model_target.parameters():
82
+ param.requires_grad = False
83
+
84
+ # Quality prediction
85
+ pred = model_target(paras["target_in_vec"]) # 'paras['target_in_vec']' is the input to target net
86
+
87
+ # quality score ranges from 0-100, a higher score indicates a better quality
88
+ quality_score = pred.mean().item()
89
+ print(f"Input video: {video_input}\nVisual quality score: {quality_score:.2f}")
90
+
91
+ if quality_score >= 40:
92
+ os.makedirs(os.path.dirname(video_output), exist_ok=True)
93
+ shutil.copy(video_input, video_output)
94
+ except Exception as e:
95
+ print(e)
96
+
97
+
98
+ def split(a, n):
99
+ k, m = divmod(len(a), n)
100
+ return (a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n))
101
+
102
+
103
+ def filter_visual_quality_multi_gpus(input_dir, output_dir, num_workers):
104
+ gather_paths(input_dir, output_dir)
105
+ num_devices = torch.cuda.device_count()
106
+ if num_devices == 0:
107
+ raise RuntimeError("No GPUs found")
108
+ split_paths = list(split(paths, num_workers * num_devices))
109
+ processes = []
110
+
111
+ for i in range(num_devices):
112
+ for j in range(num_workers):
113
+ process_index = i * num_workers + j
114
+ process = Process(target=func, args=(split_paths[process_index], i))
115
+ process.start()
116
+ processes.append(process)
117
+
118
+ for process in processes:
119
+ process.join()
120
+
121
+
122
+ if __name__ == "__main__":
123
+ input_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/VoxCeleb2/av_synced_high"
124
+ output_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/VoxCeleb2/high_visual_quality"
125
+ num_workers = 20 # How many processes per device
126
+
127
+ filter_visual_quality_multi_gpus(input_dir, output_dir, num_workers)
preprocess/remove_broken_videos.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ from multiprocessing import Pool
17
+ import tqdm
18
+
19
+ from latentsync.utils.av_reader import AVReader
20
+ from latentsync.utils.util import gather_video_paths_recursively
21
+
22
+
23
+ def remove_broken_video(video_path):
24
+ try:
25
+ AVReader(video_path)
26
+ except Exception:
27
+ os.remove(video_path)
28
+
29
+
30
+ def remove_broken_videos_multiprocessing(input_dir, num_workers):
31
+ video_paths = gather_video_paths_recursively(input_dir)
32
+
33
+ print("Removing broken videos...")
34
+ with Pool(num_workers) as pool:
35
+ for _ in tqdm.tqdm(pool.imap_unordered(remove_broken_video, video_paths), total=len(video_paths)):
36
+ pass
37
+
38
+
39
+ if __name__ == "__main__":
40
+ input_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/multilingual/affine_transformed"
41
+ num_workers = 50
42
+
43
+ remove_broken_videos_multiprocessing(input_dir, num_workers)
preprocess/remove_incorrect_affined.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import mediapipe as mp
16
+ from latentsync.utils.util import read_video, gather_video_paths_recursively
17
+ import os
18
+ import tqdm
19
+ from multiprocessing import Pool
20
+
21
+
22
+ class FaceDetector:
23
+ def __init__(self):
24
+ self.face_detection = mp.solutions.face_detection.FaceDetection(
25
+ model_selection=0, min_detection_confidence=0.5
26
+ )
27
+
28
+ def detect_face(self, image):
29
+ # Process the image and detect faces.
30
+ results = self.face_detection.process(image)
31
+
32
+ if not results.detections: # Face not detected
33
+ return False
34
+
35
+ if len(results.detections) != 1:
36
+ return False
37
+ return True
38
+
39
+ def detect_video(self, video_path):
40
+ try:
41
+ video_frames = read_video(video_path, change_fps=False)
42
+ except Exception as e:
43
+ print(f"Exception: {e} - {video_path}")
44
+ return False
45
+ if len(video_frames) == 0:
46
+ return False
47
+ for frame in video_frames:
48
+ if not self.detect_face(frame):
49
+ return False
50
+ return True
51
+
52
+ def close(self):
53
+ self.face_detection.close()
54
+
55
+
56
+ def remove_incorrect_affined(video_path):
57
+ if not os.path.isfile(video_path):
58
+ return
59
+ face_detector = FaceDetector()
60
+ has_face = face_detector.detect_video(video_path)
61
+ if not has_face:
62
+ os.remove(video_path)
63
+ print(f"Removed: {video_path}")
64
+ face_detector.close()
65
+
66
+
67
+ def remove_incorrect_affined_multiprocessing(input_dir, num_workers):
68
+ video_paths = gather_video_paths_recursively(input_dir)
69
+ print(f"Total videos: {len(video_paths)}")
70
+
71
+ print(f"Removing incorrect affined videos in {input_dir} ...")
72
+ with Pool(num_workers) as pool:
73
+ for _ in tqdm.tqdm(pool.imap_unordered(remove_incorrect_affined, video_paths), total=len(video_paths)):
74
+ pass
75
+
76
+
77
+ if __name__ == "__main__":
78
+ input_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/multilingual_dcc/high_visual_quality"
79
+ num_workers = 50
80
+
81
+ remove_incorrect_affined_multiprocessing(input_dir, num_workers)
preprocess/resample_fps_hz.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import subprocess
17
+ import tqdm
18
+ from multiprocessing import Pool
19
+ import cv2
20
+
21
+ paths = []
22
+
23
+
24
+ def gather_paths(input_dir, output_dir):
25
+ for video in sorted(os.listdir(input_dir)):
26
+ if video.endswith(".mp4"):
27
+ video_input = os.path.join(input_dir, video)
28
+ video_output = os.path.join(output_dir, video)
29
+ if os.path.isfile(video_output):
30
+ continue
31
+ paths.append([video_input, video_output])
32
+ elif os.path.isdir(os.path.join(input_dir, video)):
33
+ gather_paths(os.path.join(input_dir, video), os.path.join(output_dir, video))
34
+
35
+
36
+ def get_video_fps(video_path: str):
37
+ cam = cv2.VideoCapture(video_path)
38
+ fps = cam.get(cv2.CAP_PROP_FPS)
39
+ return fps
40
+
41
+
42
+ def resample_fps_hz(video_input, video_output):
43
+ os.makedirs(os.path.dirname(video_output), exist_ok=True)
44
+ if get_video_fps(video_input) == 25:
45
+ command = f"ffmpeg -loglevel error -y -i {video_input} -c:v copy -ar 16000 -q:a 0 {video_output}"
46
+ else:
47
+ command = f"ffmpeg -loglevel error -y -i {video_input} -r 25 -ar 16000 -q:a 0 {video_output}"
48
+ subprocess.run(command, shell=True)
49
+
50
+
51
+ def multi_run_wrapper(args):
52
+ return resample_fps_hz(*args)
53
+
54
+
55
+ def resample_fps_hz_multiprocessing(input_dir, output_dir, num_workers):
56
+ print(f"Recursively gathering video paths of {input_dir} ...")
57
+ gather_paths(input_dir, output_dir)
58
+
59
+ print(f"Resampling FPS and Hz of {input_dir} ...")
60
+ with Pool(num_workers) as pool:
61
+ for _ in tqdm.tqdm(pool.imap_unordered(multi_run_wrapper, paths), total=len(paths)):
62
+ pass
63
+
64
+
65
+ if __name__ == "__main__":
66
+ input_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/HDTF/segmented/train"
67
+ output_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/HDTF/resampled_test"
68
+ num_workers = 20
69
+
70
+ resample_fps_hz_multiprocessing(input_dir, output_dir, num_workers)
preprocess/segment_videos.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import subprocess
17
+ import tqdm
18
+ from multiprocessing import Pool
19
+
20
+ paths = []
21
+
22
+
23
+ def gather_paths(input_dir, output_dir):
24
+ for video in sorted(os.listdir(input_dir)):
25
+ if video.endswith(".mp4"):
26
+ video_basename = video[:-4]
27
+ video_input = os.path.join(input_dir, video)
28
+ video_output = os.path.join(output_dir, f"{video_basename}_%03d.mp4")
29
+ if os.path.isfile(video_output):
30
+ continue
31
+ paths.append([video_input, video_output])
32
+ elif os.path.isdir(os.path.join(input_dir, video)):
33
+ gather_paths(os.path.join(input_dir, video), os.path.join(output_dir, video))
34
+
35
+
36
+ def segment_video(video_input, video_output):
37
+ os.makedirs(os.path.dirname(video_output), exist_ok=True)
38
+ command = f"ffmpeg -loglevel error -y -i {video_input} -map 0 -c:v copy -segment_time 5 -f segment -reset_timestamps 1 -q:a 0 {video_output}"
39
+ # command = f'ffmpeg -loglevel error -y -i {video_input} -map 0 -segment_time 5 -f segment -reset_timestamps 1 -force_key_frames "expr:gte(t,n_forced*5)" -crf 18 -q:a 0 {video_output}'
40
+ subprocess.run(command, shell=True)
41
+
42
+
43
+ def multi_run_wrapper(args):
44
+ return segment_video(*args)
45
+
46
+
47
+ def segment_videos_multiprocessing(input_dir, output_dir, num_workers):
48
+ print(f"Recursively gathering video paths of {input_dir} ...")
49
+ gather_paths(input_dir, output_dir)
50
+
51
+ print(f"Segmenting videos of {input_dir} ...")
52
+ with Pool(num_workers) as pool:
53
+ for _ in tqdm.tqdm(pool.imap_unordered(multi_run_wrapper, paths), total=len(paths)):
54
+ pass
55
+
56
+
57
+ if __name__ == "__main__":
58
+ input_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/avatars_new/cut"
59
+ output_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/avatars_new/segmented"
60
+ num_workers = 50
61
+
62
+ segment_videos_multiprocessing(input_dir, output_dir, num_workers)
preprocess/sync_av.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import tqdm
17
+ from eval.syncnet import SyncNetEval
18
+ from eval.syncnet_detect import SyncNetDetector
19
+ from eval.eval_sync_conf import syncnet_eval
20
+ import torch
21
+ import subprocess
22
+ import shutil
23
+ from multiprocessing import Process
24
+
25
+ paths = []
26
+
27
+
28
+ def gather_paths(input_dir, output_dir):
29
+ # os.makedirs(output_dir, exist_ok=True)
30
+
31
+ for video in tqdm.tqdm(sorted(os.listdir(input_dir))):
32
+ if video.endswith(".mp4"):
33
+ video_input = os.path.join(input_dir, video)
34
+ video_output = os.path.join(output_dir, video)
35
+ if os.path.isfile(video_output):
36
+ continue
37
+ paths.append((video_input, video_output))
38
+ elif os.path.isdir(os.path.join(input_dir, video)):
39
+ gather_paths(os.path.join(input_dir, video), os.path.join(output_dir, video))
40
+
41
+
42
+ def adjust_offset(video_input: str, video_output: str, av_offset: int, fps: int = 25):
43
+ command = f"ffmpeg -loglevel error -y -i {video_input} -itsoffset {av_offset/fps} -i {video_input} -map 0:v -map 1:a -c copy -q:v 0 -q:a 0 {video_output}"
44
+ subprocess.run(command, shell=True)
45
+
46
+
47
+ def func(sync_conf_threshold, paths, device_id, process_temp_dir):
48
+ os.makedirs(process_temp_dir, exist_ok=True)
49
+ device = f"cuda:{device_id}"
50
+
51
+ syncnet = SyncNetEval(device=device)
52
+ syncnet.loadParameters("checkpoints/auxiliary/syncnet_v2.model")
53
+
54
+ detect_results_dir = os.path.join(process_temp_dir, "detect_results")
55
+ syncnet_eval_results_dir = os.path.join(process_temp_dir, "syncnet_eval_results")
56
+
57
+ syncnet_detector = SyncNetDetector(device=device, detect_results_dir=detect_results_dir)
58
+
59
+ for video_input, video_output in paths:
60
+ try:
61
+ av_offset, conf = syncnet_eval(
62
+ syncnet, syncnet_detector, video_input, syncnet_eval_results_dir, detect_results_dir
63
+ )
64
+ if conf >= sync_conf_threshold and abs(av_offset) <= 6:
65
+ os.makedirs(os.path.dirname(video_output), exist_ok=True)
66
+ if av_offset == 0:
67
+ shutil.copy(video_input, video_output)
68
+ else:
69
+ adjust_offset(video_input, video_output, av_offset)
70
+ except Exception as e:
71
+ print(e)
72
+
73
+
74
+ def split(a, n):
75
+ k, m = divmod(len(a), n)
76
+ return (a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n))
77
+
78
+
79
+ def sync_av_multi_gpus(input_dir, output_dir, temp_dir, num_workers, sync_conf_threshold):
80
+ gather_paths(input_dir, output_dir)
81
+ num_devices = torch.cuda.device_count()
82
+ if num_devices == 0:
83
+ raise RuntimeError("No GPUs found")
84
+ split_paths = list(split(paths, num_workers * num_devices))
85
+ processes = []
86
+
87
+ for i in range(num_devices):
88
+ for j in range(num_workers):
89
+ process_index = i * num_workers + j
90
+ process = Process(
91
+ target=func,
92
+ args=(
93
+ sync_conf_threshold,
94
+ split_paths[process_index],
95
+ i,
96
+ os.path.join(temp_dir, f"process_{process_index}"),
97
+ ),
98
+ )
99
+ process.start()
100
+ processes.append(process)
101
+
102
+ for process in processes:
103
+ process.join()
104
+
105
+
106
+ if __name__ == "__main__":
107
+ input_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/ads/affine_transformed"
108
+ output_dir = "/mnt/bn/maliva-gen-ai-v2/chunyu.li/VoxCeleb2/temp"
109
+ temp_dir = "temp"
110
+ num_workers = 20 # How many processes per device
111
+ sync_conf_threshold = 3
112
+
113
+ sync_av_multi_gpus(input_dir, output_dir, temp_dir, num_workers, sync_conf_threshold)
requirements.txt ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch==2.2.2
2
+ torchvision==0.17.2
3
+ --extra-index-url https://download.pytorch.org/whl/cu121
4
+ xformers==0.0.26
5
+ triton==2.2.0
6
+
7
+ diffusers==0.11.1
8
+ transformers==4.38.0
9
+ huggingface-hub==0.25.2
10
+ imageio==2.27.0
11
+ decord==0.6.0
12
+ accelerate==0.26.1
13
+ einops==0.7.0
14
+ omegaconf==2.3.0
15
+ safetensors==0.4.2
16
+ opencv-python==4.9.0.80
17
+ mediapipe==0.10.11
18
+ av==11.0.0
19
+ torch-fidelity==0.3.0
20
+ torchmetrics==1.3.1
21
+ python_speech_features==0.6
22
+ librosa==0.10.1
23
+ scenedetect==0.6.1
24
+ ffmpeg-python==0.2.0
25
+ lpips==0.1.4
26
+ face-alignment==1.4.1
27
+ ninja==1.11.1.1
28
+ pandas==2.0.3
29
+ numpy==1.24.4
30
+ gradio==5.9.1
scripts/inference.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+ from omegaconf import OmegaConf
17
+ import torch
18
+ from diffusers import AutoencoderKL, DDIMScheduler
19
+ from latentsync.models.unet import UNet3DConditionModel
20
+ from latentsync.pipelines.lipsync_pipeline import LipsyncPipeline
21
+ from diffusers.utils.import_utils import is_xformers_available
22
+ from accelerate.utils import set_seed
23
+ from latentsync.whisper.audio2feature import Audio2Feature
24
+
25
+
26
+ def main(config, args):
27
+ # Check if the GPU supports float16
28
+ is_fp16_supported = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] > 7
29
+ dtype = torch.float16 if is_fp16_supported else torch.float32
30
+
31
+ print(f"Input video path: {args.video_path}")
32
+ print(f"Input audio path: {args.audio_path}")
33
+ print(f"Loaded checkpoint path: {args.inference_ckpt_path}")
34
+
35
+ scheduler = DDIMScheduler.from_pretrained("configs")
36
+
37
+ if config.model.cross_attention_dim == 768:
38
+ whisper_model_path = "checkpoints/whisper/small.pt"
39
+ elif config.model.cross_attention_dim == 384:
40
+ whisper_model_path = "checkpoints/whisper/tiny.pt"
41
+ else:
42
+ raise NotImplementedError("cross_attention_dim must be 768 or 384")
43
+
44
+ audio_encoder = Audio2Feature(model_path=whisper_model_path, device="cuda", num_frames=config.data.num_frames)
45
+
46
+ vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=dtype)
47
+ vae.config.scaling_factor = 0.18215
48
+ vae.config.shift_factor = 0
49
+
50
+ unet, _ = UNet3DConditionModel.from_pretrained(
51
+ OmegaConf.to_container(config.model),
52
+ args.inference_ckpt_path, # load checkpoint
53
+ device="cpu",
54
+ )
55
+
56
+ unet = unet.to(dtype=dtype)
57
+
58
+ # set xformers
59
+ if is_xformers_available():
60
+ unet.enable_xformers_memory_efficient_attention()
61
+
62
+ pipeline = LipsyncPipeline(
63
+ vae=vae,
64
+ audio_encoder=audio_encoder,
65
+ unet=unet,
66
+ scheduler=scheduler,
67
+ ).to("cuda")
68
+
69
+ if args.seed != -1:
70
+ set_seed(args.seed)
71
+ else:
72
+ torch.seed()
73
+
74
+ print(f"Initial seed: {torch.initial_seed()}")
75
+
76
+ pipeline(
77
+ video_path=args.video_path,
78
+ audio_path=args.audio_path,
79
+ video_out_path=args.video_out_path,
80
+ video_mask_path=args.video_out_path.replace(".mp4", "_mask.mp4"),
81
+ num_frames=config.data.num_frames,
82
+ num_inference_steps=config.run.inference_steps,
83
+ guidance_scale=args.guidance_scale,
84
+ weight_dtype=dtype,
85
+ width=config.data.resolution,
86
+ height=config.data.resolution,
87
+ )
88
+
89
+
90
+ if __name__ == "__main__":
91
+ parser = argparse.ArgumentParser()
92
+ parser.add_argument("--unet_config_path", type=str, default="configs/unet.yaml")
93
+ parser.add_argument("--inference_ckpt_path", type=str, required=True)
94
+ parser.add_argument("--video_path", type=str, required=True)
95
+ parser.add_argument("--audio_path", type=str, required=True)
96
+ parser.add_argument("--video_out_path", type=str, required=True)
97
+ parser.add_argument("--guidance_scale", type=float, default=1.0)
98
+ parser.add_argument("--seed", type=int, default=1247)
99
+ args = parser.parse_args()
100
+
101
+ config = OmegaConf.load(args.unet_config_path)
102
+
103
+ main(config, args)
scripts/train_syncnet.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from tqdm.auto import tqdm
16
+ import os, argparse, datetime, math
17
+ import logging
18
+ from omegaconf import OmegaConf
19
+ import shutil
20
+
21
+ from latentsync.data.syncnet_dataset import SyncNetDataset
22
+ from latentsync.models.syncnet import SyncNet
23
+ from latentsync.models.syncnet_wav2lip import SyncNetWav2Lip
24
+ from latentsync.utils.util import gather_loss, plot_loss_chart
25
+ from accelerate.utils import set_seed
26
+
27
+ import torch
28
+ from diffusers import AutoencoderKL
29
+ from diffusers.utils.logging import get_logger
30
+ from einops import rearrange
31
+ import torch.distributed as dist
32
+ from torch.nn.parallel import DistributedDataParallel as DDP
33
+ from torch.utils.data.distributed import DistributedSampler
34
+ from latentsync.utils.util import init_dist, cosine_loss
35
+
36
+ logger = get_logger(__name__)
37
+
38
+
39
+ def main(config):
40
+ # Initialize distributed training
41
+ local_rank = init_dist()
42
+ global_rank = dist.get_rank()
43
+ num_processes = dist.get_world_size()
44
+ is_main_process = global_rank == 0
45
+
46
+ seed = config.run.seed + global_rank
47
+ set_seed(seed)
48
+
49
+ # Logging folder
50
+ folder_name = "train" + datetime.datetime.now().strftime(f"-%Y_%m_%d-%H:%M:%S")
51
+ output_dir = os.path.join(config.data.train_output_dir, folder_name)
52
+
53
+ # Make one log on every process with the configuration for debugging.
54
+ logging.basicConfig(
55
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
56
+ datefmt="%m/%d/%Y %H:%M:%S",
57
+ level=logging.INFO,
58
+ )
59
+
60
+ # Handle the output folder creation
61
+ if is_main_process:
62
+ os.makedirs(output_dir, exist_ok=True)
63
+ os.makedirs(f"{output_dir}/checkpoints", exist_ok=True)
64
+ os.makedirs(f"{output_dir}/loss_charts", exist_ok=True)
65
+ shutil.copy(config.config_path, output_dir)
66
+
67
+ device = torch.device(local_rank)
68
+
69
+ if config.data.latent_space:
70
+ vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
71
+ vae.requires_grad_(False)
72
+ vae.to(device)
73
+ else:
74
+ vae = None
75
+
76
+ # Dataset and Dataloader setup
77
+ train_dataset = SyncNetDataset(config.data.train_data_dir, config.data.train_fileslist, config)
78
+ val_dataset = SyncNetDataset(config.data.val_data_dir, config.data.val_fileslist, config)
79
+
80
+ train_distributed_sampler = DistributedSampler(
81
+ train_dataset,
82
+ num_replicas=num_processes,
83
+ rank=global_rank,
84
+ shuffle=True,
85
+ seed=config.run.seed,
86
+ )
87
+
88
+ # DataLoaders creation:
89
+ train_dataloader = torch.utils.data.DataLoader(
90
+ train_dataset,
91
+ batch_size=config.data.batch_size,
92
+ shuffle=False,
93
+ sampler=train_distributed_sampler,
94
+ num_workers=config.data.num_workers,
95
+ pin_memory=False,
96
+ drop_last=True,
97
+ worker_init_fn=train_dataset.worker_init_fn,
98
+ )
99
+
100
+ num_samples_limit = 640
101
+
102
+ val_batch_size = min(
103
+ num_samples_limit // config.data.num_frames, config.data.batch_size
104
+ ) # limit batch size to avoid CUDA OOM
105
+
106
+ val_dataloader = torch.utils.data.DataLoader(
107
+ val_dataset,
108
+ batch_size=val_batch_size,
109
+ shuffle=False,
110
+ num_workers=config.data.num_workers,
111
+ pin_memory=False,
112
+ drop_last=False,
113
+ worker_init_fn=val_dataset.worker_init_fn,
114
+ )
115
+
116
+ # Model
117
+ syncnet = SyncNet(OmegaConf.to_container(config.model)).to(device)
118
+ # syncnet = SyncNetWav2Lip().to(device)
119
+
120
+ optimizer = torch.optim.AdamW(
121
+ list(filter(lambda p: p.requires_grad, syncnet.parameters())), lr=config.optimizer.lr
122
+ )
123
+
124
+ if config.ckpt.resume_ckpt_path != "":
125
+ if is_main_process:
126
+ logger.info(f"Load checkpoint from: {config.ckpt.resume_ckpt_path}")
127
+ ckpt = torch.load(config.ckpt.resume_ckpt_path, map_location=device)
128
+
129
+ syncnet.load_state_dict(ckpt["state_dict"])
130
+ global_step = ckpt["global_step"]
131
+ train_step_list = ckpt["train_step_list"]
132
+ train_loss_list = ckpt["train_loss_list"]
133
+ val_step_list = ckpt["val_step_list"]
134
+ val_loss_list = ckpt["val_loss_list"]
135
+ else:
136
+ global_step = 0
137
+ train_step_list = []
138
+ train_loss_list = []
139
+ val_step_list = []
140
+ val_loss_list = []
141
+
142
+ # DDP wrapper
143
+ syncnet = DDP(syncnet, device_ids=[local_rank], output_device=local_rank)
144
+
145
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader))
146
+ num_train_epochs = math.ceil(config.run.max_train_steps / num_update_steps_per_epoch)
147
+ # validation_steps = int(config.ckpt.save_ckpt_steps // 5)
148
+ # validation_steps = 100
149
+
150
+ if is_main_process:
151
+ logger.info("***** Running training *****")
152
+ logger.info(f" Num examples = {len(train_dataset)}")
153
+ logger.info(f" Num Epochs = {num_train_epochs}")
154
+ logger.info(f" Instantaneous batch size per device = {config.data.batch_size}")
155
+ logger.info(f" Total train batch size (w. parallel & distributed) = {config.data.batch_size * num_processes}")
156
+ logger.info(f" Total optimization steps = {config.run.max_train_steps}")
157
+
158
+ first_epoch = global_step // num_update_steps_per_epoch
159
+ num_val_batches = config.data.num_val_samples // (num_processes * config.data.batch_size)
160
+
161
+ # Only show the progress bar once on each machine.
162
+ progress_bar = tqdm(
163
+ range(0, config.run.max_train_steps), initial=global_step, desc="Steps", disable=not is_main_process
164
+ )
165
+
166
+ # Support mixed-precision training
167
+ scaler = torch.cuda.amp.GradScaler() if config.run.mixed_precision_training else None
168
+
169
+ for epoch in range(first_epoch, num_train_epochs):
170
+ train_dataloader.sampler.set_epoch(epoch)
171
+ syncnet.train()
172
+
173
+ for step, batch in enumerate(train_dataloader):
174
+ ### >>>> Training >>>> ###
175
+
176
+ frames = batch["frames"].to(device, dtype=torch.float16)
177
+ audio_samples = batch["audio_samples"].to(device, dtype=torch.float16)
178
+ y = batch["y"].to(device, dtype=torch.float32)
179
+
180
+ if config.data.latent_space:
181
+ max_batch_size = (
182
+ num_samples_limit // config.data.num_frames
183
+ ) # due to the limited cuda memory, we split the input frames into parts
184
+ if frames.shape[0] > max_batch_size:
185
+ assert (
186
+ frames.shape[0] % max_batch_size == 0
187
+ ), f"max_batch_size {max_batch_size} should be divisible by batch_size {frames.shape[0]}"
188
+ frames_part_results = []
189
+ for i in range(0, frames.shape[0], max_batch_size):
190
+ frames_part = frames[i : i + max_batch_size]
191
+ frames_part = rearrange(frames_part, "b f c h w -> (b f) c h w")
192
+ with torch.no_grad():
193
+ frames_part = vae.encode(frames_part).latent_dist.sample() * 0.18215
194
+ frames_part_results.append(frames_part)
195
+ frames = torch.cat(frames_part_results, dim=0)
196
+ else:
197
+ frames = rearrange(frames, "b f c h w -> (b f) c h w")
198
+ with torch.no_grad():
199
+ frames = vae.encode(frames).latent_dist.sample() * 0.18215
200
+
201
+ frames = rearrange(frames, "(b f) c h w -> b (f c) h w", f=config.data.num_frames)
202
+ else:
203
+ frames = rearrange(frames, "b f c h w -> b (f c) h w")
204
+
205
+ if config.data.lower_half:
206
+ height = frames.shape[2]
207
+ frames = frames[:, :, height // 2 :, :]
208
+
209
+ # audio_embeds = wav2vec_encoder(audio_samples).last_hidden_state
210
+
211
+ # Mixed-precision training
212
+ with torch.autocast(device_type="cuda", dtype=torch.float16, enabled=config.run.mixed_precision_training):
213
+ vision_embeds, audio_embeds = syncnet(frames, audio_samples)
214
+
215
+ loss = cosine_loss(vision_embeds.float(), audio_embeds.float(), y).mean()
216
+
217
+ optimizer.zero_grad()
218
+
219
+ # Backpropagate
220
+ if config.run.mixed_precision_training:
221
+ scaler.scale(loss).backward()
222
+ """ >>> gradient clipping >>> """
223
+ scaler.unscale_(optimizer)
224
+ torch.nn.utils.clip_grad_norm_(syncnet.parameters(), config.optimizer.max_grad_norm)
225
+ """ <<< gradient clipping <<< """
226
+ scaler.step(optimizer)
227
+ scaler.update()
228
+ else:
229
+ loss.backward()
230
+ """ >>> gradient clipping >>> """
231
+ torch.nn.utils.clip_grad_norm_(syncnet.parameters(), config.optimizer.max_grad_norm)
232
+ """ <<< gradient clipping <<< """
233
+ optimizer.step()
234
+
235
+ progress_bar.update(1)
236
+ global_step += 1
237
+
238
+ global_average_loss = gather_loss(loss, device)
239
+ train_step_list.append(global_step)
240
+ train_loss_list.append(global_average_loss)
241
+
242
+ if is_main_process and global_step % config.run.validation_steps == 0:
243
+ logger.info(f"Validation at step {global_step}")
244
+ val_loss = validation(
245
+ val_dataloader,
246
+ device,
247
+ syncnet,
248
+ cosine_loss,
249
+ config.data.latent_space,
250
+ config.data.lower_half,
251
+ vae,
252
+ num_val_batches,
253
+ )
254
+ val_step_list.append(global_step)
255
+ val_loss_list.append(val_loss)
256
+ logger.info(f"Validation loss at step {global_step} is {val_loss:0.3f}")
257
+
258
+ if is_main_process and global_step % config.ckpt.save_ckpt_steps == 0:
259
+ checkpoint_save_path = os.path.join(output_dir, f"checkpoints/checkpoint-{global_step}.pt")
260
+ torch.save(
261
+ {
262
+ "state_dict": syncnet.module.state_dict(), # to unwrap DDP
263
+ "global_step": global_step,
264
+ "train_step_list": train_step_list,
265
+ "train_loss_list": train_loss_list,
266
+ "val_step_list": val_step_list,
267
+ "val_loss_list": val_loss_list,
268
+ },
269
+ checkpoint_save_path,
270
+ )
271
+ logger.info(f"Saved checkpoint to {checkpoint_save_path}")
272
+ plot_loss_chart(
273
+ os.path.join(output_dir, f"loss_charts/loss_chart-{global_step}.png"),
274
+ ("Train loss", train_step_list, train_loss_list),
275
+ ("Val loss", val_step_list, val_loss_list),
276
+ )
277
+
278
+ progress_bar.set_postfix({"step_loss": global_average_loss})
279
+ if global_step >= config.run.max_train_steps:
280
+ break
281
+
282
+ progress_bar.close()
283
+ dist.destroy_process_group()
284
+
285
+
286
+ @torch.no_grad()
287
+ def validation(val_dataloader, device, syncnet, cosine_loss, latent_space, lower_half, vae, num_val_batches):
288
+ syncnet.eval()
289
+
290
+ losses = []
291
+ val_step = 0
292
+ while True:
293
+ for step, batch in enumerate(val_dataloader):
294
+ ### >>>> Validation >>>> ###
295
+
296
+ frames = batch["frames"].to(device, dtype=torch.float16)
297
+ audio_samples = batch["audio_samples"].to(device, dtype=torch.float16)
298
+ y = batch["y"].to(device, dtype=torch.float32)
299
+
300
+ if latent_space:
301
+ num_frames = frames.shape[1]
302
+ frames = rearrange(frames, "b f c h w -> (b f) c h w")
303
+ frames = vae.encode(frames).latent_dist.sample() * 0.18215
304
+ frames = rearrange(frames, "(b f) c h w -> b (f c) h w", f=num_frames)
305
+ else:
306
+ frames = rearrange(frames, "b f c h w -> b (f c) h w")
307
+
308
+ if lower_half:
309
+ height = frames.shape[2]
310
+ frames = frames[:, :, height // 2 :, :]
311
+
312
+ with torch.autocast(device_type="cuda", dtype=torch.float16):
313
+ vision_embeds, audio_embeds = syncnet(frames, audio_samples)
314
+
315
+ loss = cosine_loss(vision_embeds.float(), audio_embeds.float(), y).mean()
316
+
317
+ losses.append(loss.item())
318
+
319
+ val_step += 1
320
+ if val_step > num_val_batches:
321
+ syncnet.train()
322
+ if len(losses) == 0:
323
+ raise RuntimeError("No validation data")
324
+ return sum(losses) / len(losses)
325
+
326
+
327
+ if __name__ == "__main__":
328
+ parser = argparse.ArgumentParser(description="Code to train the expert lip-sync discriminator")
329
+ parser.add_argument("--config_path", type=str, default="configs/syncnet/syncnet_16_vae.yaml")
330
+ args = parser.parse_args()
331
+
332
+ # Load a configuration file
333
+ config = OmegaConf.load(args.config_path)
334
+ config.config_path = args.config_path
335
+
336
+ main(config)
scripts/train_unet.py ADDED
@@ -0,0 +1,510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import math
17
+ import argparse
18
+ import shutil
19
+ import datetime
20
+ import logging
21
+ from omegaconf import OmegaConf
22
+
23
+ from tqdm.auto import tqdm
24
+ from einops import rearrange
25
+
26
+ import torch
27
+ import torch.nn.functional as F
28
+ import torch.distributed as dist
29
+ from torch.utils.data.distributed import DistributedSampler
30
+ from torch.nn.parallel import DistributedDataParallel as DDP
31
+
32
+ import diffusers
33
+ from diffusers import AutoencoderKL, DDIMScheduler
34
+ from diffusers.utils.logging import get_logger
35
+ from diffusers.optimization import get_scheduler
36
+ from diffusers.utils.import_utils import is_xformers_available
37
+ from accelerate.utils import set_seed
38
+
39
+ from latentsync.data.unet_dataset import UNetDataset
40
+ from latentsync.models.unet import UNet3DConditionModel
41
+ from latentsync.models.syncnet import SyncNet
42
+ from latentsync.pipelines.lipsync_pipeline import LipsyncPipeline
43
+ from latentsync.utils.util import (
44
+ init_dist,
45
+ cosine_loss,
46
+ reversed_forward,
47
+ )
48
+ from latentsync.utils.util import plot_loss_chart, gather_loss
49
+ from latentsync.whisper.audio2feature import Audio2Feature
50
+ from latentsync.trepa import TREPALoss
51
+ from eval.syncnet import SyncNetEval
52
+ from eval.syncnet_detect import SyncNetDetector
53
+ from eval.eval_sync_conf import syncnet_eval
54
+ import lpips
55
+
56
+
57
+ logger = get_logger(__name__)
58
+
59
+
60
+ def main(config):
61
+ # Initialize distributed training
62
+ local_rank = init_dist()
63
+ global_rank = dist.get_rank()
64
+ num_processes = dist.get_world_size()
65
+ is_main_process = global_rank == 0
66
+
67
+ seed = config.run.seed + global_rank
68
+ set_seed(seed)
69
+
70
+ # Logging folder
71
+ folder_name = "train" + datetime.datetime.now().strftime(f"-%Y_%m_%d-%H:%M:%S")
72
+ output_dir = os.path.join(config.data.train_output_dir, folder_name)
73
+
74
+ # Make one log on every process with the configuration for debugging.
75
+ logging.basicConfig(
76
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
77
+ datefmt="%m/%d/%Y %H:%M:%S",
78
+ level=logging.INFO,
79
+ )
80
+
81
+ # Handle the output folder creation
82
+ if is_main_process:
83
+ diffusers.utils.logging.set_verbosity_info()
84
+ os.makedirs(output_dir, exist_ok=True)
85
+ os.makedirs(f"{output_dir}/checkpoints", exist_ok=True)
86
+ os.makedirs(f"{output_dir}/val_videos", exist_ok=True)
87
+ os.makedirs(f"{output_dir}/loss_charts", exist_ok=True)
88
+ shutil.copy(config.unet_config_path, output_dir)
89
+ shutil.copy(config.data.syncnet_config_path, output_dir)
90
+
91
+ device = torch.device(local_rank)
92
+
93
+ noise_scheduler = DDIMScheduler.from_pretrained("configs")
94
+
95
+ vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
96
+ vae.config.scaling_factor = 0.18215
97
+ vae.config.shift_factor = 0
98
+ vae_scale_factor = 2 ** (len(vae.config.block_out_channels) - 1)
99
+ vae.requires_grad_(False)
100
+ vae.to(device)
101
+
102
+ syncnet_eval_model = SyncNetEval(device=device)
103
+ syncnet_eval_model.loadParameters("checkpoints/auxiliary/syncnet_v2.model")
104
+
105
+ syncnet_detector = SyncNetDetector(device=device, detect_results_dir="detect_results")
106
+
107
+ if config.model.cross_attention_dim == 768:
108
+ whisper_model_path = "checkpoints/whisper/small.pt"
109
+ elif config.model.cross_attention_dim == 384:
110
+ whisper_model_path = "checkpoints/whisper/tiny.pt"
111
+ else:
112
+ raise NotImplementedError("cross_attention_dim must be 768 or 384")
113
+
114
+ audio_encoder = Audio2Feature(
115
+ model_path=whisper_model_path,
116
+ device=device,
117
+ audio_embeds_cache_dir=config.data.audio_embeds_cache_dir,
118
+ num_frames=config.data.num_frames,
119
+ )
120
+
121
+ unet, resume_global_step = UNet3DConditionModel.from_pretrained(
122
+ OmegaConf.to_container(config.model),
123
+ config.ckpt.resume_ckpt_path, # load checkpoint
124
+ device=device,
125
+ )
126
+
127
+ if config.model.add_audio_layer and config.run.use_syncnet:
128
+ syncnet_config = OmegaConf.load(config.data.syncnet_config_path)
129
+ if syncnet_config.ckpt.inference_ckpt_path == "":
130
+ raise ValueError("SyncNet path is not provided")
131
+ syncnet = SyncNet(OmegaConf.to_container(syncnet_config.model)).to(device=device, dtype=torch.float16)
132
+ syncnet_checkpoint = torch.load(syncnet_config.ckpt.inference_ckpt_path, map_location=device)
133
+ syncnet.load_state_dict(syncnet_checkpoint["state_dict"])
134
+ syncnet.requires_grad_(False)
135
+
136
+ unet.requires_grad_(True)
137
+ trainable_params = list(unet.parameters())
138
+
139
+ if config.optimizer.scale_lr:
140
+ config.optimizer.lr = config.optimizer.lr * num_processes
141
+
142
+ optimizer = torch.optim.AdamW(trainable_params, lr=config.optimizer.lr)
143
+
144
+ if is_main_process:
145
+ logger.info(f"trainable params number: {len(trainable_params)}")
146
+ logger.info(f"trainable params scale: {sum(p.numel() for p in trainable_params) / 1e6:.3f} M")
147
+
148
+ # Enable xformers
149
+ if config.run.enable_xformers_memory_efficient_attention:
150
+ if is_xformers_available():
151
+ unet.enable_xformers_memory_efficient_attention()
152
+ else:
153
+ raise ValueError("xformers is not available. Make sure it is installed correctly")
154
+
155
+ # Enable gradient checkpointing
156
+ if config.run.enable_gradient_checkpointing:
157
+ unet.enable_gradient_checkpointing()
158
+
159
+ # Get the training dataset
160
+ train_dataset = UNetDataset(config.data.train_data_dir, config)
161
+ distributed_sampler = DistributedSampler(
162
+ train_dataset,
163
+ num_replicas=num_processes,
164
+ rank=global_rank,
165
+ shuffle=True,
166
+ seed=config.run.seed,
167
+ )
168
+
169
+ # DataLoaders creation:
170
+ train_dataloader = torch.utils.data.DataLoader(
171
+ train_dataset,
172
+ batch_size=config.data.batch_size,
173
+ shuffle=False,
174
+ sampler=distributed_sampler,
175
+ num_workers=config.data.num_workers,
176
+ pin_memory=False,
177
+ drop_last=True,
178
+ worker_init_fn=train_dataset.worker_init_fn,
179
+ )
180
+
181
+ # Get the training iteration
182
+ if config.run.max_train_steps == -1:
183
+ assert config.run.max_train_epochs != -1
184
+ config.run.max_train_steps = config.run.max_train_epochs * len(train_dataloader)
185
+
186
+ # Scheduler
187
+ lr_scheduler = get_scheduler(
188
+ config.optimizer.lr_scheduler,
189
+ optimizer=optimizer,
190
+ num_warmup_steps=config.optimizer.lr_warmup_steps,
191
+ num_training_steps=config.run.max_train_steps,
192
+ )
193
+
194
+ if config.run.perceptual_loss_weight != 0 and config.run.pixel_space_supervise:
195
+ lpips_loss_func = lpips.LPIPS(net="vgg").to(device)
196
+
197
+ if config.run.trepa_loss_weight != 0 and config.run.pixel_space_supervise:
198
+ trepa_loss_func = TREPALoss(device=device)
199
+
200
+ # Validation pipeline
201
+ pipeline = LipsyncPipeline(
202
+ vae=vae,
203
+ audio_encoder=audio_encoder,
204
+ unet=unet,
205
+ scheduler=noise_scheduler,
206
+ ).to(device)
207
+ pipeline.set_progress_bar_config(disable=True)
208
+
209
+ # DDP warpper
210
+ unet = DDP(unet, device_ids=[local_rank], output_device=local_rank)
211
+
212
+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.
213
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader))
214
+ # Afterwards we recalculate our number of training epochs
215
+ num_train_epochs = math.ceil(config.run.max_train_steps / num_update_steps_per_epoch)
216
+
217
+ # Train!
218
+ total_batch_size = config.data.batch_size * num_processes
219
+
220
+ if is_main_process:
221
+ logger.info("***** Running training *****")
222
+ logger.info(f" Num examples = {len(train_dataset)}")
223
+ logger.info(f" Num Epochs = {num_train_epochs}")
224
+ logger.info(f" Instantaneous batch size per device = {config.data.batch_size}")
225
+ logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
226
+ logger.info(f" Total optimization steps = {config.run.max_train_steps}")
227
+ global_step = resume_global_step
228
+ first_epoch = resume_global_step // num_update_steps_per_epoch
229
+
230
+ # Only show the progress bar once on each machine.
231
+ progress_bar = tqdm(
232
+ range(0, config.run.max_train_steps),
233
+ initial=resume_global_step,
234
+ desc="Steps",
235
+ disable=not is_main_process,
236
+ )
237
+
238
+ train_step_list = []
239
+ sync_loss_list = []
240
+ recon_loss_list = []
241
+
242
+ val_step_list = []
243
+ sync_conf_list = []
244
+
245
+ # Support mixed-precision training
246
+ scaler = torch.cuda.amp.GradScaler() if config.run.mixed_precision_training else None
247
+
248
+ for epoch in range(first_epoch, num_train_epochs):
249
+ train_dataloader.sampler.set_epoch(epoch)
250
+ unet.train()
251
+
252
+ for step, batch in enumerate(train_dataloader):
253
+ ### >>>> Training >>>> ###
254
+
255
+ if config.model.add_audio_layer:
256
+ if batch["mel"] != []:
257
+ mel = batch["mel"].to(device, dtype=torch.float16)
258
+
259
+ audio_embeds_list = []
260
+ try:
261
+ for idx in range(len(batch["video_path"])):
262
+ video_path = batch["video_path"][idx]
263
+ start_idx = batch["start_idx"][idx]
264
+
265
+ with torch.no_grad():
266
+ audio_feat = audio_encoder.audio2feat(video_path)
267
+ audio_embeds = audio_encoder.crop_overlap_audio_window(audio_feat, start_idx)
268
+ audio_embeds_list.append(audio_embeds)
269
+ except Exception as e:
270
+ logger.info(f"{type(e).__name__} - {e} - {video_path}")
271
+ continue
272
+ audio_embeds = torch.stack(audio_embeds_list) # (B, 16, 50, 384)
273
+ audio_embeds = audio_embeds.to(device, dtype=torch.float16)
274
+ else:
275
+ audio_embeds = None
276
+
277
+ # Convert videos to latent space
278
+ gt_images = batch["gt"].to(device, dtype=torch.float16)
279
+ gt_masked_images = batch["masked_gt"].to(device, dtype=torch.float16)
280
+ mask = batch["mask"].to(device, dtype=torch.float16)
281
+ ref_images = batch["ref"].to(device, dtype=torch.float16)
282
+
283
+ gt_images = rearrange(gt_images, "b f c h w -> (b f) c h w")
284
+ gt_masked_images = rearrange(gt_masked_images, "b f c h w -> (b f) c h w")
285
+ mask = rearrange(mask, "b f c h w -> (b f) c h w")
286
+ ref_images = rearrange(ref_images, "b f c h w -> (b f) c h w")
287
+
288
+ with torch.no_grad():
289
+ gt_latents = vae.encode(gt_images).latent_dist.sample()
290
+ gt_masked_images = vae.encode(gt_masked_images).latent_dist.sample()
291
+ ref_images = vae.encode(ref_images).latent_dist.sample()
292
+
293
+ mask = torch.nn.functional.interpolate(mask, size=config.data.resolution // vae_scale_factor)
294
+
295
+ gt_latents = (
296
+ rearrange(gt_latents, "(b f) c h w -> b c f h w", f=config.data.num_frames) - vae.config.shift_factor
297
+ ) * vae.config.scaling_factor
298
+ gt_masked_images = (
299
+ rearrange(gt_masked_images, "(b f) c h w -> b c f h w", f=config.data.num_frames)
300
+ - vae.config.shift_factor
301
+ ) * vae.config.scaling_factor
302
+ ref_images = (
303
+ rearrange(ref_images, "(b f) c h w -> b c f h w", f=config.data.num_frames) - vae.config.shift_factor
304
+ ) * vae.config.scaling_factor
305
+ mask = rearrange(mask, "(b f) c h w -> b c f h w", f=config.data.num_frames)
306
+
307
+ # Sample noise that we'll add to the latents
308
+ if config.run.use_mixed_noise:
309
+ # Refer to the paper: https://arxiv.org/abs/2305.10474
310
+ noise_shared_std_dev = (config.run.mixed_noise_alpha**2 / (1 + config.run.mixed_noise_alpha**2)) ** 0.5
311
+ noise_shared = torch.randn_like(gt_latents) * noise_shared_std_dev
312
+ noise_shared = noise_shared[:, :, 0:1].repeat(1, 1, config.data.num_frames, 1, 1)
313
+
314
+ noise_ind_std_dev = (1 / (1 + config.run.mixed_noise_alpha**2)) ** 0.5
315
+ noise_ind = torch.randn_like(gt_latents) * noise_ind_std_dev
316
+ noise = noise_ind + noise_shared
317
+ else:
318
+ noise = torch.randn_like(gt_latents)
319
+ noise = noise[:, :, 0:1].repeat(
320
+ 1, 1, config.data.num_frames, 1, 1
321
+ ) # Using the same noise for all frames, refer to the paper: https://arxiv.org/abs/2308.09716
322
+
323
+ bsz = gt_latents.shape[0]
324
+
325
+ # Sample a random timestep for each video
326
+ timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=gt_latents.device)
327
+ timesteps = timesteps.long()
328
+
329
+ # Add noise to the latents according to the noise magnitude at each timestep
330
+ # (this is the forward diffusion process)
331
+ noisy_tensor = noise_scheduler.add_noise(gt_latents, noise, timesteps)
332
+
333
+ # Get the target for loss depending on the prediction type
334
+ if noise_scheduler.config.prediction_type == "epsilon":
335
+ target = noise
336
+ elif noise_scheduler.config.prediction_type == "v_prediction":
337
+ raise NotImplementedError
338
+ else:
339
+ raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
340
+
341
+ unet_input = torch.cat([noisy_tensor, mask, gt_masked_images, ref_images], dim=1)
342
+
343
+ # Predict the noise and compute loss
344
+ # Mixed-precision training
345
+ with torch.autocast(device_type="cuda", dtype=torch.float16, enabled=config.run.mixed_precision_training):
346
+ pred_noise = unet(unet_input, timesteps, encoder_hidden_states=audio_embeds).sample
347
+
348
+ if config.run.recon_loss_weight != 0:
349
+ recon_loss = F.mse_loss(pred_noise.float(), target.float(), reduction="mean")
350
+ else:
351
+ recon_loss = 0
352
+
353
+ pred_latents = reversed_forward(noise_scheduler, pred_noise, timesteps, noisy_tensor)
354
+
355
+ if config.run.pixel_space_supervise:
356
+ pred_images = vae.decode(
357
+ rearrange(pred_latents, "b c f h w -> (b f) c h w") / vae.config.scaling_factor
358
+ + vae.config.shift_factor
359
+ ).sample
360
+
361
+ if config.run.perceptual_loss_weight != 0 and config.run.pixel_space_supervise:
362
+ pred_images_perceptual = pred_images[:, :, pred_images.shape[2] // 2 :, :]
363
+ gt_images_perceptual = gt_images[:, :, gt_images.shape[2] // 2 :, :]
364
+ lpips_loss = lpips_loss_func(pred_images_perceptual.float(), gt_images_perceptual.float()).mean()
365
+ else:
366
+ lpips_loss = 0
367
+
368
+ if config.run.trepa_loss_weight != 0 and config.run.pixel_space_supervise:
369
+ trepa_pred_images = rearrange(pred_images, "(b f) c h w -> b c f h w", f=config.data.num_frames)
370
+ trepa_gt_images = rearrange(gt_images, "(b f) c h w -> b c f h w", f=config.data.num_frames)
371
+ trepa_loss = trepa_loss_func(trepa_pred_images, trepa_gt_images)
372
+ else:
373
+ trepa_loss = 0
374
+
375
+ if config.model.add_audio_layer and config.run.use_syncnet:
376
+ if config.run.pixel_space_supervise:
377
+ syncnet_input = rearrange(pred_images, "(b f) c h w -> b (f c) h w", f=config.data.num_frames)
378
+ else:
379
+ syncnet_input = rearrange(pred_latents, "b c f h w -> b (f c) h w")
380
+
381
+ if syncnet_config.data.lower_half:
382
+ height = syncnet_input.shape[2]
383
+ syncnet_input = syncnet_input[:, :, height // 2 :, :]
384
+ ones_tensor = torch.ones((config.data.batch_size, 1)).float().to(device=device)
385
+ vision_embeds, audio_embeds = syncnet(syncnet_input, mel)
386
+ sync_loss = cosine_loss(vision_embeds.float(), audio_embeds.float(), ones_tensor).mean()
387
+ sync_loss_list.append(gather_loss(sync_loss, device))
388
+ else:
389
+ sync_loss = 0
390
+
391
+ loss = (
392
+ recon_loss * config.run.recon_loss_weight
393
+ + sync_loss * config.run.sync_loss_weight
394
+ + lpips_loss * config.run.perceptual_loss_weight
395
+ + trepa_loss * config.run.trepa_loss_weight
396
+ )
397
+
398
+ train_step_list.append(global_step)
399
+ if config.run.recon_loss_weight != 0:
400
+ recon_loss_list.append(gather_loss(recon_loss, device))
401
+
402
+ optimizer.zero_grad()
403
+
404
+ # Backpropagate
405
+ if config.run.mixed_precision_training:
406
+ scaler.scale(loss).backward()
407
+ """ >>> gradient clipping >>> """
408
+ scaler.unscale_(optimizer)
409
+ torch.nn.utils.clip_grad_norm_(unet.parameters(), config.optimizer.max_grad_norm)
410
+ """ <<< gradient clipping <<< """
411
+ scaler.step(optimizer)
412
+ scaler.update()
413
+ else:
414
+ loss.backward()
415
+ """ >>> gradient clipping >>> """
416
+ torch.nn.utils.clip_grad_norm_(unet.parameters(), config.optimizer.max_grad_norm)
417
+ """ <<< gradient clipping <<< """
418
+ optimizer.step()
419
+
420
+ # Check the grad of attn blocks for debugging
421
+ # print(unet.module.up_blocks[3].attentions[2].transformer_blocks[0].audio_cross_attn.attn.to_q.weight.grad)
422
+
423
+ lr_scheduler.step()
424
+ progress_bar.update(1)
425
+ global_step += 1
426
+
427
+ ### <<<< Training <<<< ###
428
+
429
+ # Save checkpoint and conduct validation
430
+ if is_main_process and (global_step % config.ckpt.save_ckpt_steps == 0):
431
+ if config.run.recon_loss_weight != 0:
432
+ plot_loss_chart(
433
+ os.path.join(output_dir, f"loss_charts/recon_loss_chart-{global_step}.png"),
434
+ ("Reconstruction loss", train_step_list, recon_loss_list),
435
+ )
436
+ if config.model.add_audio_layer:
437
+ if sync_loss_list != []:
438
+ plot_loss_chart(
439
+ os.path.join(output_dir, f"loss_charts/sync_loss_chart-{global_step}.png"),
440
+ ("Sync loss", train_step_list, sync_loss_list),
441
+ )
442
+ model_save_path = os.path.join(output_dir, f"checkpoints/checkpoint-{global_step}.pt")
443
+ state_dict = {
444
+ "global_step": global_step,
445
+ "state_dict": unet.module.state_dict(), # to unwrap DDP
446
+ }
447
+ try:
448
+ torch.save(state_dict, model_save_path)
449
+ logger.info(f"Saved checkpoint to {model_save_path}")
450
+ except Exception as e:
451
+ logger.error(f"Error saving model: {e}")
452
+
453
+ # Validation
454
+ logger.info("Running validation... ")
455
+
456
+ validation_video_out_path = os.path.join(output_dir, f"val_videos/val_video_{global_step}.mp4")
457
+ validation_video_mask_path = os.path.join(output_dir, f"val_videos/val_video_mask.mp4")
458
+
459
+ with torch.autocast(device_type="cuda", dtype=torch.float16):
460
+ pipeline(
461
+ config.data.val_video_path,
462
+ config.data.val_audio_path,
463
+ validation_video_out_path,
464
+ validation_video_mask_path,
465
+ num_frames=config.data.num_frames,
466
+ num_inference_steps=config.run.inference_steps,
467
+ guidance_scale=config.run.guidance_scale,
468
+ weight_dtype=torch.float16,
469
+ width=config.data.resolution,
470
+ height=config.data.resolution,
471
+ mask=config.data.mask,
472
+ )
473
+
474
+ logger.info(f"Saved validation video output to {validation_video_out_path}")
475
+
476
+ val_step_list.append(global_step)
477
+
478
+ if config.model.add_audio_layer:
479
+ try:
480
+ _, conf = syncnet_eval(syncnet_eval_model, syncnet_detector, validation_video_out_path, "temp")
481
+ except Exception as e:
482
+ logger.info(e)
483
+ conf = 0
484
+ sync_conf_list.append(conf)
485
+ plot_loss_chart(
486
+ os.path.join(output_dir, f"loss_charts/sync_conf_chart-{global_step}.png"),
487
+ ("Sync confidence", val_step_list, sync_conf_list),
488
+ )
489
+
490
+ logs = {"step_loss": loss.item(), "lr": lr_scheduler.get_last_lr()[0]}
491
+ progress_bar.set_postfix(**logs)
492
+
493
+ if global_step >= config.run.max_train_steps:
494
+ break
495
+
496
+ progress_bar.close()
497
+ dist.destroy_process_group()
498
+
499
+
500
+ if __name__ == "__main__":
501
+ parser = argparse.ArgumentParser()
502
+
503
+ # Config file path
504
+ parser.add_argument("--unet_config_path", type=str, default="configs/unet.yaml")
505
+
506
+ args = parser.parse_args()
507
+ config = OmegaConf.load(args.unet_config_path)
508
+ config.unet_config_path = args.unet_config_path
509
+
510
+ main(config)