File size: 1,297 Bytes
441e098 1b3b97a c60eb53 3a1706f 1b3b97a 9bbc445 b8e9a43 9bbc445 1b3b97a b8e9a43 1b3b97a b8e9a43 1b3b97a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
import sys
sys.path.append('.')
import gradio as gr
import os
os.system('pip install -U torchtext==0.8.0')
os.system('./separate_scripts/download_checkpoints.sh')
def inference(audio):
os.system('./separate_scripts/separate_vocals.sh ' + audio.name + ' "sep_vocals.mp3"')
os.system('./separate_scripts/separate_accompaniment.sh ' + audio.name + ' "sep_accompaniment.mp3"')
return 'sep_vocals.mp3', 'sep_accompaniment.mp3'
title = "Music Source Separation"
description = "Gradio demo for Music Source Separation. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below. Currently supports .wav files"
article = "<p style='text-align: center'><a href='https://github.com/bytedance/music_source_separation'>Decoupling Magnitude and Phase Estimation with Deep ResUNet for Music Source Separation</a> | <a href='https://github.com/bytedance/music_source_separation'>Github Repo</a></p>"
examples = [['example.wav']]
gr.Interface(
inference,
gr.inputs.Audio(type="file", label="Input"),
[gr.outputs.Audio(type="file", label="Vocals"),gr.outputs.Audio(type="file", label="Accompaniment")],
title=title,
description=description,
article=article,
enable_queue=True,
examples=examples
).launch(debug=True) |