Spaces:
Running
Running
update
Browse files
examples/nx_clean_unet/run.sh
CHANGED
@@ -12,7 +12,7 @@ sh run.sh --stage 3 --stop_stage 3 --system_version centos --file_folder_name fi
|
|
12 |
--noise_dir "/data/tianxing/HuggingDatasets/nx_noise/data/noise" \
|
13 |
--speech_dir "/data/tianxing/HuggingDatasets/aishell/data_aishell/wav/train"
|
14 |
|
15 |
-
sh run.sh --stage
|
16 |
--noise_dir "/data/tianxing/HuggingDatasets/nx_noise/data/noise" \
|
17 |
--speech_dir "/data/tianxing/HuggingDatasets/aishell/data_aishell/wav/train" \
|
18 |
--max_epochs 100
|
|
|
12 |
--noise_dir "/data/tianxing/HuggingDatasets/nx_noise/data/noise" \
|
13 |
--speech_dir "/data/tianxing/HuggingDatasets/aishell/data_aishell/wav/train"
|
14 |
|
15 |
+
sh run.sh --stage 1 --stop_stage 2 --system_version centos --file_folder_name file_dir --final_model_name nx-clean-unet-aishell-20250228 \
|
16 |
--noise_dir "/data/tianxing/HuggingDatasets/nx_noise/data/noise" \
|
17 |
--speech_dir "/data/tianxing/HuggingDatasets/aishell/data_aishell/wav/train" \
|
18 |
--max_epochs 100
|
toolbox/torchaudio/models/nx_clean_unet/modeling_nx_clean_unet.py
CHANGED
@@ -215,6 +215,9 @@ class NXCleanUNet(nn.Module):
|
|
215 |
bottle_neck = torch.transpose(bottle_neck, dim0=-2, dim1=-1)
|
216 |
# bottle_neck shape: [batch_size, time_steps, input_size]
|
217 |
|
|
|
|
|
|
|
218 |
bottle_neck = self.transformer.forward(bottle_neck)
|
219 |
# bottle_neck shape: [batch_size, time_steps, input_size]
|
220 |
|
@@ -251,6 +254,9 @@ class NXCleanUNet(nn.Module):
|
|
251 |
bottle_neck = torch.transpose(bottle_neck, dim0=-2, dim1=-1)
|
252 |
# bottle_neck shape: [batch_size, time_steps, input_size]
|
253 |
|
|
|
|
|
|
|
254 |
bottle_neck = self.transformer.forward_chunk_by_chunk(bottle_neck)
|
255 |
# bottle_neck shape: [batch_size, time_steps, input_size]
|
256 |
|
|
|
215 |
bottle_neck = torch.transpose(bottle_neck, dim0=-2, dim1=-1)
|
216 |
# bottle_neck shape: [batch_size, time_steps, input_size]
|
217 |
|
218 |
+
bottle_neck = self.causal_encoder.forward(bottle_neck)
|
219 |
+
# bottle_neck shape: [batch_size, time_steps, input_size]
|
220 |
+
|
221 |
bottle_neck = self.transformer.forward(bottle_neck)
|
222 |
# bottle_neck shape: [batch_size, time_steps, input_size]
|
223 |
|
|
|
254 |
bottle_neck = torch.transpose(bottle_neck, dim0=-2, dim1=-1)
|
255 |
# bottle_neck shape: [batch_size, time_steps, input_size]
|
256 |
|
257 |
+
bottle_neck = self.causal_encoder.forward_chunk_by_chunk(bottle_neck)
|
258 |
+
# bottle_neck shape: [batch_size, time_steps, input_size]
|
259 |
+
|
260 |
bottle_neck = self.transformer.forward_chunk_by_chunk(bottle_neck)
|
261 |
# bottle_neck shape: [batch_size, time_steps, input_size]
|
262 |
|