Update app.py
Browse files
app.py
CHANGED
@@ -1,47 +1,114 @@
|
|
1 |
import os
|
2 |
-
import time
|
3 |
-
import json
|
4 |
import gradio as gr
|
5 |
import torch
|
6 |
import torchaudio
|
7 |
import numpy as np
|
8 |
from denoiser.demucs import Demucs
|
9 |
from pydub import AudioSegment
|
10 |
-
|
|
|
|
|
11 |
modelpath = './denoiser/master64.th'
|
12 |
-
|
13 |
def transcribe(file_upload, microphone):
|
14 |
file = microphone if microphone is not None else file_upload
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
# 載入模型
|
17 |
model = Demucs(hidden=64)
|
18 |
state_dict = torch.load(modelpath, map_location='cpu')
|
19 |
model.load_state_dict(state_dict)
|
20 |
-
|
|
|
21 |
# 載入音訊並強制轉單聲道
|
22 |
-
x, sr = torchaudio.load(file
|
23 |
-
|
24 |
-
# 新增:音訊長度檢查(插入在此處)
|
25 |
-
MAX_AUDIO_SECONDS = 600 # 10分鐘限制
|
26 |
-
if x.shape[1] / sr > MAX_AUDIO_SECONDS:
|
27 |
-
raise ValueError(f"音訊長度不可超過 {MAX_AUDIO_SECONDS} 秒,當前音訊長度:{x.shape[1]/sr:.1f} 秒")
|
28 |
-
|
29 |
-
# 單聲道轉換
|
30 |
if x.shape[0] > 1:
|
31 |
x = torch.mean(x, dim=0, keepdim=True)
|
32 |
|
|
|
|
|
|
|
|
|
|
|
33 |
# 執行降噪
|
34 |
-
|
35 |
-
|
|
|
36 |
# 後處理
|
37 |
out = out / max(out.abs().max().item(), 1)
|
38 |
torchaudio.save('enhanced.wav', out, sr)
|
39 |
|
40 |
-
#
|
41 |
-
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
# import os
|
47 |
# import time
|
@@ -113,38 +180,3 @@ def transcribe(file_upload, microphone):
|
|
113 |
# "microphone": str,
|
114 |
# "return": str
|
115 |
# }
|
116 |
-
|
117 |
-
demo = gr.Interface(
|
118 |
-
fn=transcribe,
|
119 |
-
inputs=[
|
120 |
-
gr.Audio(type="filepath", label="語音質檢原始音檔", sources=["upload", "microphone"]) # 顯式指定來源
|
121 |
-
],
|
122 |
-
outputs=[
|
123 |
-
gr.Audio(type="filepath", label="Output") # 保持列表形式
|
124 |
-
],
|
125 |
-
live=True,
|
126 |
-
allow_flagging="never",
|
127 |
-
title="<h1>語音質檢/噪音去除 (語音增強)</h1>",
|
128 |
-
description="""<h2><a href='https://www.twman.org' target='_blank'>TonTon Huang Ph.D.</a> | <a href='https://blog.twman.org/p/deeplearning101.html' target='_blank'>手把手帶你一起踩AI坑</a><br></h2><br>
|
129 |
-
為了提升語音識別的效果,可以在識別前先進行噪音去除<br>
|
130 |
-
<a href='https://github.com/Deep-Learning-101' target='_blank'>Deep Learning 101 Github</a> | <a href='http://deeplearning101.twman.org' target='_blank'>Deep Learning 101</a> | <a href='https://www.facebook.com/groups/525579498272187/' target='_blank'>台灣人工智慧社團 FB</a> | <a href='https://www.youtube.com/c/DeepLearning101' target='_blank'>YouTube</a><br>
|
131 |
-
<a href='https://blog.twman.org/2025/03/AIAgent.html' target='_blank'>那些 AI Agent 要踩的坑</a>:探討多種 AI 代理人工具的應用經驗與挑戰,分享實用經驗與工具推薦。<br>
|
132 |
-
<a href='https://blog.twman.org/2024/08/LLM.html' target='_blank'>白話文手把手帶你科普 GenAI</a>:淺顯介紹生成式人工智慧核心概念,強調硬體資源和數據的重要性。<br>
|
133 |
-
<a href='https://blog.twman.org/2024/09/LLM.html' target='_blank'>大型語言模型直接就打完收工?</a>:回顧 LLM 領域探索歷程,討論硬體升級對 AI 開發的重要性。<br>
|
134 |
-
<a href='https://blog.twman.org/2024/07/RAG.html' target='_blank'>那些檢索增強生成要踩的坑</a>:探討 RAG 技術應用與挑戰,提供實用經驗分享和工具建議。<br>
|
135 |
-
<a href='https://blog.twman.org/2024/02/LLM.html' target='_blank'>那些大型語言模型要踩的坑</a>:探討多種 LLM 工具的應用與挑戰,強調硬體資源的重要性。<br>
|
136 |
-
<a href='https://blog.twman.org/2023/04/GPT.html' target='_blank'>Large Language Model,LLM</a>:探討 LLM 的發展與應用,強調硬體資源在開發中的關鍵作用。。<br>
|
137 |
-
<a href='https://blog.twman.org/2024/11/diffusion.html' target='_blank'>ComfyUI + Stable Diffuision</a>:深入探討影像生成與分割技術的應用,強調硬體資源的重要性。<br>
|
138 |
-
<a href='https://blog.twman.org/2024/02/asr-tts.html' target='_blank'>那些ASR和TTS可能會踩的坑</a>:探討 ASR 和 TTS 技術應用中的問題,強調數據質量的重要性。<br>
|
139 |
-
<a href='https://blog.twman.org/2021/04/NLP.html' target='_blank'>那些自然語言處理 (Natural Language Processing, NLP) 踩的坑</a>:分享 NLP 領域的實踐經驗,強調數據質量對模型效果的影響。<br>
|
140 |
-
<a href='https://blog.twman.org/2021/04/ASR.html' target='_blank'>那些語音處理 (Speech Processing) 踩的坑</a>:分享語音處理領域的實務經驗,強調資料品質對模型效果的影響。<br>
|
141 |
-
<a href='https://blog.twman.org/2023/07/wsl.html' target='_blank'>用PPOCRLabel來幫PaddleOCR做OCR的微調和標註</a><br>
|
142 |
-
<a href='https://blog.twman.org/2023/07/HugIE.html' target='_blank'>基於機器閱讀理解和指令微調的統一信息抽取框架之診斷書醫囑資訊擷取分析</a><br>
|
143 |
-
<a href='https://github.com/facebookresearch/denoiser' target='_blank'> Real Time Speech Enhancement in the Waveform Domain (Interspeech 2020)</a>""",
|
144 |
-
# examples=[
|
145 |
-
# ["exampleAudio/15s_2020-03-27_sep1.wav"],
|
146 |
-
# ["exampleAudio/13s_2020-03-27_sep2.wav"],
|
147 |
-
# ],
|
148 |
-
)
|
149 |
-
|
150 |
-
demo.launch(debug=True, share=True)
|
|
|
1 |
import os
|
|
|
|
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
import torchaudio
|
5 |
import numpy as np
|
6 |
from denoiser.demucs import Demucs
|
7 |
from pydub import AudioSegment
|
8 |
+
import soundfile as sf
|
9 |
+
import librosa
|
10 |
+
|
11 |
modelpath = './denoiser/master64.th'
|
12 |
+
|
13 |
def transcribe(file_upload, microphone):
|
14 |
file = microphone if microphone is not None else file_upload
|
15 |
|
16 |
+
# 統一音訊預處理流程
|
17 |
+
def preprocess_audio(input_path, output_path):
|
18 |
+
# 用 pydub 處理格式轉換
|
19 |
+
audio = AudioSegment.from_file(input_path)
|
20 |
+
|
21 |
+
# 強制轉單聲道 + 16kHz 採樣率
|
22 |
+
if audio.channels > 1:
|
23 |
+
audio = audio.set_channels(1)
|
24 |
+
if audio.frame_rate != 16000:
|
25 |
+
audio = audio.set_frame_rate(16000)
|
26 |
+
|
27 |
+
# 導出為 WAV 暫存檔
|
28 |
+
audio.export(output_path, format="wav")
|
29 |
+
return output_path
|
30 |
+
|
31 |
+
# 處理 MP3 特殊流程
|
32 |
+
if file.lower().endswith(".mp3"):
|
33 |
+
temp_wav = "temp_input.wav"
|
34 |
+
preprocess_audio(file, temp_wav)
|
35 |
+
file = temp_wav
|
36 |
+
|
37 |
# 載入模型
|
38 |
model = Demucs(hidden=64)
|
39 |
state_dict = torch.load(modelpath, map_location='cpu')
|
40 |
model.load_state_dict(state_dict)
|
41 |
+
model.eval()
|
42 |
+
|
43 |
# 載入音訊並強制轉單聲道
|
44 |
+
x, sr = torchaudio.load(file)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
if x.shape[0] > 1:
|
46 |
x = torch.mean(x, dim=0, keepdim=True)
|
47 |
|
48 |
+
# 音訊長度檢查
|
49 |
+
MAX_AUDIO_SECONDS = 600
|
50 |
+
if x.shape[1] / sr > MAX_AUDIO_SECONDS:
|
51 |
+
raise ValueError(f"音訊過長!限制:{MAX_AUDIO_SECONDS} 秒,當前:{x.shape[1]/sr:.1f} 秒")
|
52 |
+
|
53 |
# 執行降噪
|
54 |
+
with torch.no_grad():
|
55 |
+
out = model(x[None])[0]
|
56 |
+
|
57 |
# 後處理
|
58 |
out = out / max(out.abs().max().item(), 1)
|
59 |
torchaudio.save('enhanced.wav', out, sr)
|
60 |
|
61 |
+
# 轉 MP3 輸出
|
62 |
+
enhanced_mp3 = 'enhanced.mp3'
|
63 |
+
AudioSegment.from_wav('enhanced.wav').export(
|
64 |
+
enhanced_mp3,
|
65 |
+
format="mp3",
|
66 |
+
bitrate="256k"
|
67 |
+
)
|
68 |
+
|
69 |
+
# 清理暫存檔
|
70 |
+
if os.path.exists("temp_input.wav"):
|
71 |
+
os.remove("temp_input.wav")
|
72 |
|
73 |
+
return enhanced_mp3
|
74 |
+
|
75 |
+
# 👇 重要:修正 Gradio 類型推導問題
|
76 |
+
transcribe.__annotations__ = {
|
77 |
+
"file_upload": str,
|
78 |
+
"microphone": str,
|
79 |
+
"return": str
|
80 |
+
}
|
81 |
+
|
82 |
+
demo = gr.Interface(
|
83 |
+
fn=transcribe,
|
84 |
+
inputs=[
|
85 |
+
gr.Audio(type="filepath", label="上傳音訊檔案", sources=["upload", "microphone"])
|
86 |
+
],
|
87 |
+
outputs=[
|
88 |
+
gr.Audio(type="filepath", label="處理後音訊")
|
89 |
+
],
|
90 |
+
live=True,
|
91 |
+
allow_flagging="never",
|
92 |
+
title="<h1>語音質檢/噪音去除 (語音增強)</h1>",
|
93 |
+
description="""<h2><a href='https://www.twman.org' target='_blank'>TonTon Huang Ph.D.</a> | <a href='https://blog.twman.org/p/deeplearning101.html' target='_blank'>手把手帶你一起踩AI坑</a><br></h2><br>
|
94 |
+
為了提升語音識別的效果,可以在識別前先進行噪音去除<br>
|
95 |
+
<a href='https://github.com/Deep-Learning-101' target='_blank'>Deep Learning 101 Github</a> | <a href='http://deeplearning101.twman.org' target='_blank'>Deep Learning 101</a> | <a href='https://www.facebook.com/groups/525579498272187/' target='_blank'>台灣人工智慧社團 FB</a> | <a href='https://www.youtube.com/c/DeepLearning101' target='_blank'>YouTube</a><br>
|
96 |
+
<a href='https://blog.twman.org/2025/03/AIAgent.html' target='_blank'>那些 AI Agent 要踩的坑</a>:探討多種 AI 代理人工具的應用經驗與挑戰,分享實用經驗與工具推薦。<br>
|
97 |
+
<a href='https://blog.twman.org/2024/08/LLM.html' target='_blank'>白話文手把手帶你科普 GenAI</a>:淺顯介紹生成式人工智慧核心概念,強調硬體資源和數據的重要性。<br>
|
98 |
+
<a href='https://blog.twman.org/2024/09/LLM.html' target='_blank'>大型語言模型直接就打完收工?</a>:回顧 LLM 領域探索歷程,討論硬體升級對 AI 開發的重要性。<br>
|
99 |
+
<a href='https://blog.twman.org/2024/07/RAG.html' target='_blank'>那些檢索增強生成要踩的坑</a>:探討 RAG 技術應用與挑戰,提供實用經驗分享和工具建議。<br>
|
100 |
+
<a href='https://blog.twman.org/2024/02/LLM.html' target='_blank'>那些大型語言模型要踩的坑</a>:探討多種 LLM 工具的應用與挑戰,強調硬體資源的重要性。<br>
|
101 |
+
<a href='https://blog.twman.org/2023/04/GPT.html' target='_blank'>Large Language Model,LLM</a>:探討 LLM 的發展與應用,強調硬體資源在開發中的關鍵作用。。<br>
|
102 |
+
<a href='https://blog.twman.org/2024/11/diffusion.html' target='_blank'>ComfyUI + Stable Diffuision</a>:深入探討影像生成與分割技術的應用,強調硬體資源的重要性。<br>
|
103 |
+
<a href='https://blog.twman.org/2024/02/asr-tts.html' target='_blank'>那些ASR和TTS可能會踩的坑</a>:探討 ASR 和 TTS 技術應用中的問題,強調數據質量的重要性。<br>
|
104 |
+
<a href='https://blog.twman.org/2021/04/NLP.html' target='_blank'>那些自然語言處理 (Natural Language Processing, NLP) 踩的坑</a>:分享 NLP 領域的實踐經驗,強調數據質量對模型效果的影響。<br>
|
105 |
+
<a href='https://blog.twman.org/2021/04/ASR.html' target='_blank'>那些語音處理 (Speech Processing) 踩的坑</a>:分享語音處理領域的實務經驗,強調資料品質對模型效果的影響。<br>
|
106 |
+
<a href='https://blog.twman.org/2023/07/wsl.html' target='_blank'>用PPOCRLabel來幫PaddleOCR做OCR的微調和標註</a><br>
|
107 |
+
<a href='https://blog.twman.org/2023/07/HugIE.html' target='_blank'>基於機器閱讀理解和指令微調的統一信息抽取框架之診斷書醫囑資訊擷取分析</a><br>
|
108 |
+
<a href='https://github.com/facebookresearch/denoiser' target='_blank'> Real Time Speech Enhancement in the Waveform Domain (Interspeech 2020)</a>""",
|
109 |
+
)
|
110 |
+
|
111 |
+
demo.launch(debug=True, share=True)
|
112 |
|
113 |
# import os
|
114 |
# import time
|
|
|
180 |
# "microphone": str,
|
181 |
# "return": str
|
182 |
# }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|