File size: 2,227 Bytes
a216f79
e99861a
a216f79
441e098
 
1b3b97a
c60eb53
4f9cc75
f773307
4f9cc75
3a1706f
1b3b97a
e87b62b
 
 
 
4f9cc75
 
 
9bbc445
b8e9a43
7a63ec6
1b3b97a
b8e9a43
1b3b97a
 
 
 
 
 
 
b8e9a43
 
1b3b97a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import os
os.system('pip install gradio==2.3.0a0')
os.system('pip freeze')
import sys
sys.path.append('.')
import gradio as gr
os.system('pip install -U torchtext==0.8.0')
#os.system('python setup.py install --install-dir .')

os.system('./separate_scripts/download_checkpoints.sh')

def inference(audio):
    os.system('python bytesep/inference.py --config_yaml=./scripts/4_train/musdb18/configs/vocals-accompaniment,resunet_subbandtime.yaml --                checkpoint_path=./downloaded_checkpoints/resunet143_subbtandtime_vocals_8.8dB_350k_steps.pth --audio_path='+audio.name+' --output_path=./sep_vocals.mp3')
    #os.system('./separate_scripts/separate_vocals.sh ' + audio.name + ' "sep_vocals.mp3"')
    os.system('python bytesep/inference.py --config_yaml=./scripts/4_train/musdb18/configs/vocals-accompaniment,resunet_subbandtime.yaml --checkpoint_path=./downloaded_checkpoints/resunet143_subbtandtime_vocals_8.8dB_350k_steps.pth --audio_path='+audio.name+' --output_path=./sep_accompaniment.mp3')
    #os.system('./separate_scripts/separate_accompaniment.sh ' + audio.name + ' "sep_accompaniment.mp3"')
    #os.system('python separate_scripts/separate.py --audio_path=' +audio.name+' --source_type="accompaniment"')
    #os.system('python separate_scripts/separate.py --audio_path=' +audio.name+' --source_type="vocals"')
    return 'sep_vocals.mp3', 'sep_accompaniment.mp3'
title = "Music Source Separation"
description = "Gradio demo for Music Source Separation. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below. Currently supports .wav files"
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.05418'>Decoupling Magnitude and Phase Estimation with Deep ResUNet for Music Source Separation</a> | <a href='https://github.com/bytedance/music_source_separation'>Github Repo</a></p>"

examples = [['example.wav']]
gr.Interface(
    inference, 
    gr.inputs.Audio(type="file", label="Input"), 
    [gr.outputs.Audio(type="file", label="Vocals"),gr.outputs.Audio(type="file", label="Accompaniment")],
    title=title,
    description=description,
    article=article,
    enable_queue=True,
    examples=examples
    ).launch(debug=True)