Blane187 commited on
Commit
bee64dd
·
verified ·
1 Parent(s): 328b59e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -20,13 +20,15 @@ with gr.Blocks(title="RVC UI") as app:
20
  gr.Markdown("<center><h1> RVC UI 🗣️")
21
  gr.Markdown("<h1>this ui not done yet!")
22
  models = gr.Dropdown(label="voice model", choices=sorted(names))
 
 
23
  clean_button = gr.Button("Unload model to save GPU memory", variant="primary")
24
  spk_item = gr.Slider(minimum=0,maximum=2333,step=1,label="Select Speaker/Singer ID",value=0,visible=False,interactive=False)
25
  clean_button.click(fn=clean, inputs=[], outputs=[models], api_name="infer_clean")
26
  modelinfo = gr.Textbox(label="Model info", max_lines=8, visible=False)
 
27
  with gr.Tabs():
28
  with gr.TabItem("Single inference"):
29
- pith_voice = gr.Number(label="Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12)",value=0,)
30
  input_audio0 = gr.Audio(label="The audio file to be processed",type="filepath")
31
  file_index1 = gr.File(label="Path to the feature index file. Leave blank to use the selected result from the dropdown")
32
  with gr.Column():
@@ -36,9 +38,9 @@ with gr.Blocks(title="RVC UI") as app:
36
  protect0 = gr.Slider(minimum=0,maximum=0.5,label="Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy",value=0.33,step=0.01,interactive=True)
37
  filter_radius0 = gr.Slider(minimum=0,maximum=7,label=("If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness."),value=3,step=1,interactive=True)
38
  f0_file = gr.File(label="F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation",visible=False)
39
- but0 = gr.Button("Convert", variant="primary")
40
  vc_output1 = gr.Textbox(label="Output information", interactive=False)
41
- vc_output2 = gr.Audio(label="Export audio (click on the three dots in the lower right corner to download)",type="filepath")
42
  #refresh_button.click(fn=change_choices,inputs=[],outputs=[models, file_index2],api_name="infer_refresh")
43
  with gr.TabItem("Batch inference"):
44
  gr.Markdown(f"<center>Batch conversion\n. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').")
@@ -57,6 +59,6 @@ with gr.Blocks(title="RVC UI") as app:
57
  index_rate2 = gr.Slider(minimum=0,maximum=1,label="Feature searching ratio",value=1,interactive=True)
58
  format1 = gr.Radio(label="Export file format",choices=["wav", "flac", "mp3", "m4a"],value="wav",interactive=True)
59
  but1 = gr.Button("Convert", variant="primary")
60
- vc_output3 = gr.Textbox(label="Output information")
61
 
62
  app.launch()
 
20
  gr.Markdown("<center><h1> RVC UI 🗣️")
21
  gr.Markdown("<h1>this ui not done yet!")
22
  models = gr.Dropdown(label="voice model", choices=sorted(names))
23
+ with gr.Row():
24
+ pith_voice = gr.Number(label="Transpose (integer, number of semitones, raise by an octave: 12, lower by an octave: -12)",value=0,)
25
  clean_button = gr.Button("Unload model to save GPU memory", variant="primary")
26
  spk_item = gr.Slider(minimum=0,maximum=2333,step=1,label="Select Speaker/Singer ID",value=0,visible=False,interactive=False)
27
  clean_button.click(fn=clean, inputs=[], outputs=[models], api_name="infer_clean")
28
  modelinfo = gr.Textbox(label="Model info", max_lines=8, visible=False)
29
+ but0 = gr.Button("Convert", variant="primary")
30
  with gr.Tabs():
31
  with gr.TabItem("Single inference"):
 
32
  input_audio0 = gr.Audio(label="The audio file to be processed",type="filepath")
33
  file_index1 = gr.File(label="Path to the feature index file. Leave blank to use the selected result from the dropdown")
34
  with gr.Column():
 
38
  protect0 = gr.Slider(minimum=0,maximum=0.5,label="Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy",value=0.33,step=0.01,interactive=True)
39
  filter_radius0 = gr.Slider(minimum=0,maximum=7,label=("If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness."),value=3,step=1,interactive=True)
40
  f0_file = gr.File(label="F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation",visible=False)
41
+
42
  vc_output1 = gr.Textbox(label="Output information", interactive=False)
43
+ vc_output2 = gr.Audio(label="Export audio (click on the three dots in the lower right corner to download)",type="filepath",interactive=False)
44
  #refresh_button.click(fn=change_choices,inputs=[],outputs=[models, file_index2],api_name="infer_refresh")
45
  with gr.TabItem("Batch inference"):
46
  gr.Markdown(f"<center>Batch conversion\n. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').")
 
59
  index_rate2 = gr.Slider(minimum=0,maximum=1,label="Feature searching ratio",value=1,interactive=True)
60
  format1 = gr.Radio(label="Export file format",choices=["wav", "flac", "mp3", "m4a"],value="wav",interactive=True)
61
  but1 = gr.Button("Convert", variant="primary")
62
+ vc_output3 = gr.Textbox(label="Output information",interactive=False)
63
 
64
  app.launch()