Blane187 commited on
Commit
36da806
·
verified ·
1 Parent(s): 1c4c668

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -47
app.py CHANGED
@@ -66,58 +66,52 @@ with gr.Blocks(title="RVC UI") as app:
66
  with gr.Tabs():
67
  with gr.TabItem("Train"):
68
  gr.Markdown("### Step 1. Fill in the experimental configuration.\nExperimental data is stored in the 'logs' folder, with each experiment having a separate folder. Manually enter the experiment name path, which contains the experimental configuration, logs, and trained model files.")
69
- with gr.Row():
70
- exp_dir1 = gr.Textbox(label="Enter the experiment name", value="my-voice")
71
  author = gr.Textbox(label="Model Author (Nullable)")
72
  #np7 = gr.Slider(minimum=0,maximum=config.n_cpu,step=1,label="Number of CPU processes used for pitch extraction and data processing",value=int(np.ceil(config.n_cpu / 1.5)),interactive=True)
73
- with gr.Row():
74
  sr2 = gr.Radio(label=("Target sample rate"),choices=["40k", "48k"],value="40k",interactive=True)
75
  if_f0_3 = gr.Radio(label="Whether the model has pitch guidance (required for singing, optional for speech)",choices=[("Yes"), ("No")],value=("Yes"),interactive=True)
76
  version19 = gr.Radio(label=("Version"),choices=["v1", "v2"],value="v2",interactive=True,visible=True)
77
- gr.Markdown("### Step 2. Audio processing. \n#### 1. Slicing.\nAutomatically traverse all files in the training folder that can be decoded into audio and perform slice normalization. Generates 2 wav folders in the experiment directory. Currently, only single-singer/speaker training is supported.")
78
- with gr.Row():
79
- with gr.Column():
80
- trainset_dir4 = gr.Textbox(label="Enter the path of the training folder")
81
- spk_id5 = gr.Slider(minimum=0,maximum=4,step=1,label="Please specify the speaker/singer ID",value=0,interactive=True)
82
- but1 = gr.Button("Process data", variant="primary")
83
- with gr.Column():
84
- info1 = gr.Textbox(label="Output information", value="")
85
- #but1.click(preprocess_dataset,[trainset_dir4, exp_dir1, sr2, np7],[info1],api_name="train_preprocess")
86
- gr.Markdown("#### 2. Feature extraction.\nUse CPU to extract pitch (if the model has pitch), use GPU to extract features (select GPU index).")
87
- with gr.Row():
88
- with gr.Column():
89
- #gpu_info9 = gr.Textbox(label="GPU Information",value=gpu_info,visible=F0GPUVisible)
90
- #gpus6 = gr.Textbox(label="Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2",value=gpus,interactive=True,visible=F0GPUVisible)
91
- #gpus_rmvpe = gr.Textbox(label="Enter the GPU index(es) separated by '-', e.g., 0-0-1 to use 2 processes in GPU0 and 1 process in GPU1",value="%s-%s" % (gpus, gpus),interactive=True,visible=F0GPUVisible)
92
- f0method8 = gr.Radio(label="Select the pitch extraction algorithm: when extracting singing, you can use 'pm' to speed up. For high-quality speech with fast performance, but worse CPU usage, you can use 'dio'. 'harvest' results in better quality but is slower. 'rmvpe' has the best results and consumes less CPU/GPU",choices=["pm", "harvest", "dio", "rmvpe", "rmvpe_gpu"],value="rmvpe_gpu",interactive=True)
93
- with gr.Column():
94
- but2 = gr.Button("Feature extraction", variant="primary")
95
- info2 = gr.Textbox(label="Output information", value="")
96
- #f0method8.change(fn=change_f0_method,inputs=[f0method8],outputs=[gpus_rmvpe])
97
- #but2.click(extract_f0_feature,[gpus6,np7,f0method8,if_f0_3,exp_dir1,version19,gpus_rmvpe,],[info2],api_name="train_extract_f0_feature")
98
- gr.Markdown("### Step 3. Start training.\nFill in the training settings and start training the model and index.")
99
- with gr.Row():
100
- with gr.Column():
101
- save_epoch10 = gr.Slider(minimum=1,maximum=50,step=1,label="Save frequency (save_every_epoch)",value=5,interactive=True)
102
- total_epoch11 = gr.Slider(minimum=2,maximum=1000,step=1,label="Total training epochs (total_epoch)",value=20,interactive=True)
103
- batch_size12 = gr.Slider(minimum=1,maximum=40,step=1,label="Batch size per GPU",value=default_batch_size,interactive=True)
104
- if_save_latest13 = gr.Radio(label="Save only the latest '.ckpt' file to save disk space",choices=["Yes", "No"],value="No",interactive=True)
105
- if_cache_gpu17 = gr.Radio(label="Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement",choices=["Yes", "No"],value="No",interactive=True)
106
- if_save_every_weights18 = gr.Radio(label="Save a small final model to the 'weights' folder at each save point",choices=["Yes","No"],value="No",interactive=True)
107
- with gr.Column():
108
- pretrained_G14 = gr.Textbox(label="Load pre-trained base model G path",value="assets/pretrained_v2/f0G40k.pth",interactive=True)
109
- pretrained_D15 = gr.Textbox(label="Load pre-trained base model D path",value="assets/pretrained_v2/f0D40k.pth",interactive=True)
110
- gpus16 = gr.Textbox(label="Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2",value=gpus,interactive=True)
111
- #sr2.change(change_sr2,[sr2, if_f0_3, version19],[pretrained_G14, pretrained_D15])
112
- #version19.change(change_version19,[sr2, if_f0_3, version19],[pretrained_G14, pretrained_D15, sr2])
113
- if_f0_3.change(change_f0,[if_f0_3, sr2, version19],[f0method8, gpus_rmvpe, pretrained_G14, pretrained_D15])
114
- but3 = gr.Button("Train model", variant="primary")
115
- but4 = gr.Button("Train feature index", variant="primary")
116
- but5 = gr.Button("One-click training", variant="primary")
117
- #info3 = gr.Textbox(label=i18n("Output information"), value="")
118
- #but3.click(click_train,[exp_dir1,sr2,if_f0_3,spk_id5,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16,if_cache_gpu17,if_save_every_weights18,version19,author,],info3,api_name="train_start")
119
- #but4.click(train_index, [exp_dir1, version19], info3)
120
- #but5.click(train1key,[exp_dir1,sr2,if_f0_3,trainset_dir4,spk_id5,np7,f0method8,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16,if_cache_gpu17,if_save_every_weights18,version19,gpus_rmvpe,author],info3,api_name="train_start_all")
121
 
122
 
123
 
 
66
  with gr.Tabs():
67
  with gr.TabItem("Train"):
68
  gr.Markdown("### Step 1. Fill in the experimental configuration.\nExperimental data is stored in the 'logs' folder, with each experiment having a separate folder. Manually enter the experiment name path, which contains the experimental configuration, logs, and trained model files.")
69
+ exp_dir1 = Textbox(label="Enter the experiment name", value="my-voice")
 
70
  author = gr.Textbox(label="Model Author (Nullable)")
71
  #np7 = gr.Slider(minimum=0,maximum=config.n_cpu,step=1,label="Number of CPU processes used for pitch extraction and data processing",value=int(np.ceil(config.n_cpu / 1.5)),interactive=True)
 
72
  sr2 = gr.Radio(label=("Target sample rate"),choices=["40k", "48k"],value="40k",interactive=True)
73
  if_f0_3 = gr.Radio(label="Whether the model has pitch guidance (required for singing, optional for speech)",choices=[("Yes"), ("No")],value=("Yes"),interactive=True)
74
  version19 = gr.Radio(label=("Version"),choices=["v1", "v2"],value="v2",interactive=True,visible=True)
75
+ gr.Markdown("### Step 2. Audio processing. \n#### 1. Slicing.\nAutomatically traverse all files in the training folder that can be decoded into audio and perform slice normalization. Generates 2 wav folders in the experiment directory. Currently, only single-singer/speaker training is supported.")
76
+
77
+ trainset_dir4 = gr.Textbox(label="Enter the path of the training folder")
78
+ spk_id5 = gr.Slider(minimum=0,maximum=4,step=1,label="Please specify the speaker/singer ID",value=0,interactive=True)
79
+ but1 = gr.Button("Process data", variant="primary")
80
+
81
+ info1 = gr.Textbox(label="Output information", value="")
82
+ #but1.click(preprocess_dataset,[trainset_dir4, exp_dir1, sr2, np7],[info1],api_name="train_preprocess")
83
+ gr.Markdown("#### 2. Feature extraction.\nUse CPU to extract pitch (if the model has pitch), use GPU to extract features (select GPU index).")
84
+
85
+ #gpu_info9 = gr.Textbox(label="GPU Information",value=gpu_info,visible=F0GPUVisible)
86
+ #gpus6 = gr.Textbox(label="Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2",value=gpus,interactive=True,visible=F0GPUVisible)
87
+ #gpus_rmvpe = gr.Textbox(label="Enter the GPU index(es) separated by '-', e.g., 0-0-1 to use 2 processes in GPU0 and 1 process in GPU1",value="%s-%s" % (gpus, gpus),interactive=True,visible=F0GPUVisible)
88
+ f0method8 = gr.Radio(label="Select the pitch extraction algorithm: when extracting singing, you can use 'pm' to speed up. For high-quality speech with fast performance, but worse CPU usage, you can use 'dio'. 'harvest' results in better quality but is slower. 'rmvpe' has the best results and consumes less CPU/GPU",choices=["pm", "harvest", "dio", "rmvpe", "rmvpe_gpu"],value="rmvpe_gpu",interactive=True)
89
+
90
+ but2 = gr.Button("Feature extraction", variant="primary")
91
+ info2 = gr.Textbox(label="Output information", value="")
92
+ #f0method8.change(fn=change_f0_method,inputs=[f0method8],outputs=[gpus_rmvpe])
93
+ #but2.click(extract_f0_feature,[gpus6,np7,f0method8,if_f0_3,exp_dir1,version19,gpus_rmvpe,],[info2],api_name="train_extract_f0_feature")
94
+ gr.Markdown("### Step 3. Start training.\nFill in the training settings and start training the model and index.")
95
+ save_epoch10 = gr.Slider(minimum=1,maximum=50,step=1,label="Save frequency (save_every_epoch)",value=5,interactive=True)
96
+ total_epoch11 = gr.Slider(minimum=2,maximum=1000,step=1,label="Total training epochs (total_epoch)",value=20,interactive=True)
97
+ batch_size12 = gr.Slider(minimum=1,maximum=40,step=1,label="Batch size per GPU",value=default_batch_size,interactive=True)
98
+ if_save_latest13 = gr.Radio(label="Save only the latest '.ckpt' file to save disk space",choices=["Yes", "No"],value="No",interactive=True)
99
+ if_cache_gpu17 = gr.Radio(label="Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement",choices=["Yes", "No"],value="No",interactive=True)
100
+ if_save_every_weights18 = gr.Radio(label="Save a small final model to the 'weights' folder at each save point",choices=["Yes","No"],value="No",interactive=True)
101
+
102
+ pretrained_G14 = gr.Textbox(label="Load pre-trained base model G path",value="assets/pretrained_v2/f0G40k.pth",interactive=True)
103
+ pretrained_D15 = gr.Textbox(label="Load pre-trained base model D path",value="assets/pretrained_v2/f0D40k.pth",interactive=True)
104
+ gpus16 = gr.Textbox(label="Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2",value=gpus,interactive=True)
105
+ #sr2.change(change_sr2,[sr2, if_f0_3, version19],[pretrained_G14, pretrained_D15])
106
+ #version19.change(change_version19,[sr2, if_f0_3, version19],[pretrained_G14, pretrained_D15, sr2])
107
+ if_f0_3.change(change_f0,[if_f0_3, sr2, version19],[f0method8, gpus_rmvpe, pretrained_G14, pretrained_D15])
108
+ but3 = gr.Button("Train model", variant="primary")
109
+ but4 = gr.Button("Train feature index", variant="primary")
110
+ but5 = gr.Button("One-click training", variant="primary")
111
+ #info3 = gr.Textbox(label=i18n("Output information"), value="")
112
+ #but3.click(click_train,[exp_dir1,sr2,if_f0_3,spk_id5,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16,if_cache_gpu17,if_save_every_weights18,version19,author,],info3,api_name="train_start")
113
+ #but4.click(train_index, [exp_dir1, version19], info3)
114
+ #but5.click(train1key,[exp_dir1,sr2,if_f0_3,trainset_dir4,spk_id5,np7,f0method8,save_epoch10,total_epoch11,batch_size12,if_save_latest13,pretrained_G14,pretrained_D15,gpus16,if_cache_gpu17,if_save_every_weights18,version19,gpus_rmvpe,author],info3,api_name="train_start_all")
 
 
 
 
115
 
116
 
117