-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp2.py
More file actions
executable file
·765 lines (664 loc) · 35.4 KB
/
app2.py
File metadata and controls
executable file
·765 lines (664 loc) · 35.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
import os
import numpy as np
import torch
import gradio as gr
import spaces
import json
import tempfile
import shutil
import time
import subprocess
import logging
from typing import Optional, Tuple, Dict, Any
from funasr import AutoModel
from pathlib import Path
os.environ["TOKENIZERS_PARALLELISM"] = "false"
if os.environ.get("HF_REPO_ID", "").strip() == "":
os.environ["HF_REPO_ID"] = "openbmb/VoxCPM-0.5B"
import voxcpm
from ui_assets import GRADIO_JS, GRADIO_CSS
from voxcpm_demo import VoxCPMDemo
def create_demo_interface(demo: VoxCPMDemo):
"""Build the Gradio UI for VoxCPM demo."""
# static assets (logo path and frontend scripts)
gr.set_static_paths(paths=[Path.cwd().absolute()/"assets", Path.cwd().absolute()/"static"])
with gr.Blocks(
js=GRADIO_JS,
theme=gr.themes.Soft(
primary_hue="blue",
secondary_hue="gray",
neutral_hue="slate",
font=[gr.themes.GoogleFont("Inter"), "Arial", "sans-serif"]
),
css=GRADIO_CSS
) as interface:
# Header logo
gr.HTML('<div class="logo-container"><img src="/gradio_api/file=assets/voxcpm_logo.png" alt="VoxCPM Logo"></div>')
# Quick Start
with gr.Accordion("📋 Quick Start Guide |快速入门", open=False, elem_id="acc_quick"):
gr.Markdown("""
### How to Use |使用说明
1. **(Optional) Provide a Voice Prompt** - Upload or record an audio clip to provide the desired voice characteristics for synthesis.
**(可选)提供参考声音** - 上传或录制一段音频,为声音合成提供音色、语调和情感等个性化特征
2. **(Optional) Enter prompt text** - If you provided a voice prompt, enter the corresponding transcript here (auto-recognition available).
**(可选项)输入参考文本** - 如果提供了参考语音,请输入其对应的文本内容(支持自动识别)。
3. **Enter target text** - Type the text you want the model to speak.
**输入目标文本** - 输入您希望模型朗读的文字内容。
4. **Generate Speech** - Click the "Generate" button to create your audio.
**生成语音** - 点击"生成"按钮,即可为您创造出音频。
""")
# 音色配置指南
with gr.Accordion("🎤 音色配置指南", open=False, elem_id="acc_presets"):
gr.Markdown("""
### 音色配置功能说明
1. **使用预设音色** - 从下拉菜单中选择已配置的音色组合。
2. **配置文件格式** - 可以通过 `voice_presets.json` 文件自定义音色组合。
3. **示例配置** - 参考 `voice_presets.json.example` 文件了解配置格式。
### 配置文件结构
```json
{
"default_preset": "example", # 默认音色配置名称
"default_text": "默认目标文本", # 默认目标文本
"max_chars_per_segment": 200, # 单段最大字数限制
"segment_pause_duration": 0.5, # 段落间静音时长(秒)
"presets": {
"preset_name": {
"name": "显示名称",
"description": "描述信息",
"prompt_speech": "音频文件路径",
"prompt_text": "参考文本"
}
}
}
```
### 文本分段配置说明
- **max_chars_per_segment**: 单次生成允许的最大字数
- 中文字符:1个字符 = 1个字数
- 英文字符:2个字符 = 1个字数
- 数字:1个数字 = 1个字数
- 标点符号:不计入字数
- **segment_pause_duration**: 分段音频间的静音时长(秒)
- 超过字数限制时将智能拆分,保持句子完整性
""")
# Pro Tips
with gr.Accordion("💡 Pro Tips |使用建议", open=False, elem_id="acc_tips"):
gr.Markdown("""
### Prompt Speech Enhancement|参考语音降噪
- **Enable** to remove background noise for a clean, studio-like voice, with an external ZipEnhancer component.
**启用**:通过 ZipEnhancer 组件消除背景噪音,获得更好的音质。
- **Disable** to preserve the original audio's background atmosphere.
**禁用**:保留原始音频的背景环境声,如果想复刻相应声学环境。
### Text Normalization|文本正则化
- **Enable** to process general text with an external WeTextProcessing component.
**启用**:使用 WeTextProcessing 组件,可处理常见文本。
- **Disable** to use VoxCPM's native text understanding ability. For example, it supports phonemes input ({HH AH0 L OW1}), try it!
**禁用**:将使用 VoxCPM 内置的文本理解能力。如,支持音素输入(如 {da4}{jia1}好)和公式符号合成,尝试一下!
### CFG Value|CFG 值
- **Lower CFG** if the voice prompt sounds strained or expressive.
**调低**:如果提示语音听起来不自然或过于夸张。
- **Higher CFG** for better adherence to the prompt speech style or input text.
**调高**:为更好地贴合提示音频的风格或输入文本。
### Inference Timesteps|推理时间步
- **Lower** for faster synthesis speed.
**调低**:合成速度更快。
- **Higher** for better synthesis quality.
**调高**:合成质量更佳。
""")
# 音频/视频片段截取工具
with gr.Accordion("🎬 音频/视频片段截取工具", open=False, elem_id="acc_extractor"):
gr.Markdown("""
### 功能说明
上传音频或视频文件,设置开始和结束时间,截取指定片段并生成音频文件。
截取的音频将保持原始采样率和声道数,输出为WAV格式。
### 使用步骤
1. **上传文件** - 支持常见的音频/视频格式
2. **播放预览** - 播放文件并记录想要截取的时间段
3. **设置时间** - 输入开始时间和结束时间(片段时长限制为5-40秒)
4. **截取音频** - 点击按钮生成音频片段文件
""")
with gr.Row():
with gr.Column():
# 文件上传
extractor_file = gr.File(
label="上传音频/视频文件",
file_types=["audio", "video"],
type="filepath"
)
# 文件信息显示
file_info = gr.Textbox(
label="文件信息",
interactive=False,
placeholder="请先上传文件..."
)
# 播放器
extractor_player = gr.Audio(
label="文件预览",
visible=False,
interactive=True
)
with gr.Column():
# 时间设置
with gr.Row():
with gr.Column(scale=3, min_width=120):
start_time = gr.Textbox(
label="开始时间",
value="0:00:00.000",
placeholder="0:10:22.150",
interactive=True,
elem_id="start_time"
)
use_current_start_btn = gr.Button(
"使用播放进度",
size="sm",
elem_id="use_current_start",
scale=0,
min_width=60
)
with gr.Column(scale=3, min_width=120):
end_time = gr.Textbox(
label="结束时间",
value="0:00:10.000",
placeholder="0:10:22.150",
interactive=True,
elem_id="end_time"
)
use_current_end_btn = gr.Button(
"使用播放进度",
size="sm",
elem_id="use_current_end",
scale=0,
min_width=60
)
with gr.Column(scale=1, min_width=50):
# 空列用于填充剩余空间
pass
# 时间验证信息
time_validation = gr.Textbox(
label="时间范围验证",
interactive=False,
placeholder="请设置开始和结束时间..."
)
# 操作按钮
with gr.Row():
extract_btn = gr.Button(
"截取音频片段",
variant="primary",
interactive=False
)
reset_time_btn = gr.Button(
"重置时间",
variant="secondary"
)
# 结果显示
extracted_audio = gr.Audio(
label="截取的音频片段",
visible=False,
interactive=False,
show_download_button=True
)
# 用作prompt speech按钮
use_as_prompt_btn = gr.Button(
"用作 Prompt Speech",
variant="secondary",
visible=False,
elem_id="use_as_prompt_btn"
)
# Main controls
with gr.Row():
with gr.Column():
# 音色配置选择器
preset_options = [(preset.get("name", key) if isinstance(preset, dict) else key, key) for key, preset in demo.voice_presets.items()]
with gr.Row():
preset = gr.Dropdown(
label="音色配置",
choices=preset_options,
value=demo.default_preset_name if demo.default_preset_name else None,
interactive=True,
info="选择预设的音色组合",
elem_id="preset"
)
# 初始化prompt_wav组件,使用默认预设的音频文件(如果有)
default_wav = None
if demo.default_preset_name and demo.get_preset_by_name(demo.default_preset_name):
preset_audio = demo.get_preset_by_name(demo.default_preset_name).get("prompt_speech")
if preset_audio and os.path.exists(preset_audio):
default_wav = preset_audio
elif os.path.exists("./examples/example.wav"):
default_wav = "./examples/example.wav"
prompt_wav = gr.Audio(
sources=["upload", 'microphone'],
type="filepath",
label="Prompt Speech (Optional, or let VoxCPM improvise)",
value=default_wav,
)
DoDenoisePromptAudio = gr.Checkbox(
value=False,
label="Prompt Speech Enhancement",
elem_id="chk_denoise",
info="We use ZipEnhancer model to denoise the prompt audio."
)
# 降噪音频显示组件
denoised_audio_output = gr.Audio(
label="Denoised Audio",
visible=True, # 保持可见确保DOM存在,通过CSS控制初始隐藏
interactive=False,
show_download_button=True,
elem_id="denoised_audio",
type="filepath" # 确保传递文件路径而不是音频数据
)
with gr.Row():
# 初始化prompt_text组件,使用默认预设的文本(如果有)
default_prompt_text = "Just by listening a few minutes a day, you'll be able to eliminate negative thoughts by conditioning your mind to be more positive."
if demo.default_preset_name and demo.get_preset_by_name(demo.default_preset_name):
default_prompt_text = demo.get_preset_by_name(demo.default_preset_name).get("prompt_text", default_prompt_text)
prompt_text = gr.Textbox(
value=default_prompt_text,
label="Prompt Text",
placeholder="Please enter the prompt text. Automatic recognition is supported, and you can correct the results yourself..."
)
# 隐藏组件用于处理用户输入的配置名称和描述
preset_name_input = gr.Textbox(visible=True, elem_id="preset_name_hidden")
preset_description_input = gr.Textbox(visible=True, elem_id="preset_description_hidden")
save_trigger = gr.Button(visible=True, elem_id="save_trigger_hidden")
# 隐藏的刷新预设按钮,用于JavaScript自动触发
refresh_presets_btn = gr.Button(visible=True, elem_id="refresh_presets_hidden")
with gr.Column():
cfg_value = gr.Slider(
minimum=1.0,
maximum=3.0,
value=2.0,
step=0.1,
label="CFG Value (Guidance Scale)",
info="Higher values increase adherence to prompt, lower values allow more creativity"
)
inference_timesteps = gr.Slider(
minimum=4,
maximum=30,
value=10,
step=1,
label="Inference Timesteps",
info="Number of inference timesteps for generation (higher values may improve quality but slower)"
)
with gr.Row():
text = gr.Textbox(
value=demo.default_text if demo.default_text else "VoxCPM is an innovative end-to-end TTS model from ModelBest, designed to generate highly realistic speech.",
label="Target Text",
)
with gr.Row():
DoNormalizeText = gr.Checkbox(
value=False,
label="Text Normalization",
elem_id="chk_normalize",
info="We use wetext library to normalize the input text."
)
# Generate Speech按钮 - 在所有参数后面,生成结果前面
run_btn = gr.Button("Generate Speech", variant="primary", elem_classes=["generate-speech-mobile"])
# 进度显示组件
progress_display = gr.HTML(
value="",
label="生成进度",
visible=False,
elem_id="progress_display"
)
audio_output = gr.Audio(label="Output Audio")
save_preset_btn = gr.Button("保存当前音色组合", variant="secondary", visible=True, elem_id="save_preset_btn")
# 保存状态显示
save_status = gr.Textbox(
label="保存状态",
interactive=False,
visible=True,
elem_id="save_status"
)
# 定义音色配置选择事件
def load_preset(preset_name):
if not preset_name or preset_name not in demo.voice_presets:
return None, ""
preset = demo.voice_presets[preset_name]
# 设置预设切换标志,避免语音识别覆盖预设文本
preset_switching["value"] = True
# 直接返回预设配置,不再使用thread_local
return preset.get("prompt_speech", None), preset.get("prompt_text", "")
# 绑定音色配置选择事件
preset.change(
fn=load_preset,
inputs=[preset],
outputs=[prompt_wav, prompt_text]
)
# 用于跟踪预设切换状态的变量
preset_switching = {"value": False}
# 刷新预设列表的函数
def refresh_preset_list():
"""刷新预设列表的函数"""
demo.load_voice_presets() # 重新从JSON文件加载最新配置
updated_preset_options = [(preset.get("name", key) if isinstance(preset, dict) else key, key) for key, preset in demo.voice_presets.items()]
# 保持当前选中的值,如果不存在则清空
current_value = preset.value if preset.value in [option[1] for option in updated_preset_options] else None
return gr.update(choices=updated_preset_options, value=current_value)
# 语音识别方法(改进版本)
def custom_prompt_wav_recognition(prompt_wav, current_prompt_text):
# 如果正在进行预设切换,不执行语音识别
if preset_switching["value"]:
preset_switching["value"] = False # 重置标志
return current_prompt_text # 保持当前文本不变
# 正常的语音识别逻辑
if prompt_wav is None:
return ""
res = demo.asr_model.generate(input=prompt_wav, language="auto", use_itn=True)
text = res[0]["text"].split('|>')[-1]
return text
# 包装函数来处理TTS生成和降噪音频显示
def generate_and_handle_denoised_audio(*args, progress=gr.Progress()):
"""
包装generate_tts_audio方法,处理返回的音频和降噪音频路径
"""
# 清理之前任务生成的临时文件
demo.cleanup_temp_files()
# 定义进度回调函数
def progress_callback(current_segment, total_segments, processed_chars, total_chars, current_segment_chars, status):
# 计算总体进度百分比
segment_progress = current_segment / total_segments
char_progress = processed_chars / total_chars if total_chars > 0 else 0
overall_progress = (segment_progress + char_progress) / 2
if status == "generating":
desc = f"生成第 {current_segment}/{total_segments} 段 ({current_segment_chars} 字),总进度 {processed_chars}/{total_chars}"
progress_html = f"""
<div style="font-family: monospace; background: #f0f0f0; padding: 10px; border-radius: 5px;">
<div>📊 当前进度:子任务 {current_segment}/{total_segments},处理字数 {processed_chars}/{total_chars}</div>
<div>🎵 子任务进度:正在生成第 {current_segment} 段({current_segment_chars} 字)...</div>
</div>
"""
elif status == "completed":
desc = f"第 {current_segment}/{total_segments} 段完成,总进度 {processed_chars}/{total_chars}"
progress_html = f"""
<div style="font-family: monospace; background: #f0f0f0; padding: 10px; border-radius: 5px;">
<div>📊 当前进度:子任务 {current_segment}/{total_segments},处理字数 {processed_chars}/{total_chars}</div>
<div>✅ 子任务进度:第 {current_segment} 段生成完成</div>
</div>
"""
elif status == "merging":
desc = f"正在合并音频段落,总进度 {processed_chars}/{total_chars}"
progress_html = f"""
<div style="font-family: monospace; background: #f0f0f0; padding: 10px; border-radius: 5px;">
<div>📊 当前进度:子任务 {current_segment}/{total_segments},处理字数 {processed_chars}/{total_chars}</div>
<div>🔗 子任务进度:正在合并音频段落...</div>
</div>
"""
overall_progress = 0.95
elif status == "finished":
desc = f"生成完成!总进度 {processed_chars}/{total_chars}"
progress_html = f"""
<div style="font-family: monospace; background: #e8f5e8; padding: 10px; border-radius: 5px;">
<div>📊 当前进度:子任务 {current_segment}/{total_segments},处理字数 {processed_chars}/{total_chars}</div>
<div>🎉 子任务进度:生成完成!</div>
</div>
"""
overall_progress = 1.0
# 使用Gradio的进度更新
progress(overall_progress, desc=desc)
# 打印到控制台
print(f"进度更新: {desc}")
result, denoised_path = demo.generate_tts_audio(*args, progress_callback=progress_callback)
# 控制降噪音频组件的可见性
if denoised_path:
return result, gr.update(value=denoised_path, visible=True), gr.update(value="", visible=False)
else:
return result, gr.update(visible=False), gr.update(value="", visible=False)
# 控制降噪音频组件可见性的函数
def toggle_denoised_audio_visibility(denoise_enabled):
"""
根据降噪是否启用来控制降噪音频组件的可见性
"""
if denoise_enabled:
return gr.update(visible=True)
else:
return gr.update(visible=False)
# Wiring
run_btn.click(
fn=generate_and_handle_denoised_audio,
inputs=[text, prompt_wav, prompt_text, cfg_value, inference_timesteps, DoNormalizeText, DoDenoisePromptAudio],
outputs=[audio_output, denoised_audio_output, progress_display],
show_progress=True,
api_name="generate",
).then(
fn=None,
js="() => { if (typeof showSavePresetButton === 'function') { showSavePresetButton(); } }"
)
# 使用自定义的语音识别函数,传入当前prompt_text值
prompt_wav.change(fn=custom_prompt_wav_recognition, inputs=[prompt_wav, prompt_text], outputs=[prompt_text])
# ---------- 音频截取工具事件绑定 ----------
def handle_file_upload(file_path):
"""处理文件上传事件"""
if not file_path:
return gr.update(value=""), gr.update(visible=False), gr.update(interactive=False)
# 获取文件信息
media_info = demo.get_media_info(file_path)
if not media_info:
return gr.update(value="文件格式不支持或文件损坏"), gr.update(visible=False), gr.update(interactive=False)
# 格式化文件信息
duration_str = demo.format_duration(media_info['duration'])
file_type = "视频文件" if media_info['has_video'] else "音频文件"
file_info_text = (
f"文件名: {os.path.basename(file_path)}\n"
f"类型: {file_type}\n"
f"时长: {duration_str}\n"
f"格式: {media_info['format_name']}\n"
f"采样率: {media_info['sample_rate']}Hz\n"
f"声道数: {media_info['channels']}\n"
f"编码: {media_info['codec_name']}\n"
f"文件大小: {media_info['file_size'] / 1024 / 1024:.1f}MB"
)
# 处理播放器音频源
preview_audio_path = file_path
# 如果是视频文件,提取音频用于预览
if media_info['has_video']:
print(f"🎬 检测到视频文件,正在提取音频用于预览...")
extracted_audio = demo.extract_audio_from_video(file_path)
if extracted_audio:
preview_audio_path = extracted_audio
# print(f"✅ 视频音频提取成功,预览文件: {preview_audio_path}")
else:
print(f"❌ 视频音频提取失败,使用原文件")
# 即使提取失败,也显示播放器,让用户知道文件已上传
pass
# 显示播放器,设置截取按钮为启用状态
return (
gr.update(value=file_info_text),
gr.update(value=preview_audio_path, visible=True),
gr.update(interactive=True)
)
def validate_time_inputs(start_str, end_str, file_path):
"""验证时间输入"""
if not file_path:
return gr.update(value="请先上传文件")
media_info = demo.get_media_info(file_path)
if not media_info:
return gr.update(value="无法获取文件信息")
# 转换时间格式为秒数
start_seconds = demo.ffmpeg_time_to_seconds(start_str)
end_seconds = demo.ffmpeg_time_to_seconds(end_str)
is_valid, message = demo.validate_time_range(start_seconds, end_seconds, media_info['duration'])
return gr.update(value=message)
def extract_audio_clip(file_path, start_str, end_str):
"""截取音频片段"""
# 清理之前任务生成的临时文件
demo.cleanup_temp_files()
if not file_path:
return gr.update(visible=False), gr.update(value="请先上传文件")
media_info = demo.get_media_info(file_path)
if not media_info:
return gr.update(visible=False), gr.update(value="无法获取文件信息")
# 转换时间格式为秒数
start_seconds = demo.ffmpeg_time_to_seconds(start_str)
end_seconds = demo.ffmpeg_time_to_seconds(end_str)
is_valid, message = demo.validate_time_range(start_seconds, end_seconds, media_info['duration'])
if not is_valid:
return gr.update(visible=False), gr.update(value=f"验证失败: {message}")
# 执行截取
output_path = demo.extract_audio_segment(file_path, start_seconds, end_seconds)
if output_path:
return (
gr.update(value=output_path, visible=True),
gr.update(value=f"✅ 截取成功: {message}"),
gr.update(visible=True) # 显示"用作prompt speech"按钮
)
else:
return (
gr.update(visible=False),
gr.update(value="❌ 截取失败,请检查文件格式或ffmpeg安装"),
gr.update(visible=False) # 隐藏"用作prompt speech"按钮
)
def reset_time_settings():
"""重置时间设置"""
return gr.update(value="0:00:00.000"), gr.update(value="0:00:10.000"), gr.update(value="已重置时间设置")
def use_extracted_as_prompt(extracted_audio_path):
"""将截取的音频设置为prompt speech并识别文本"""
if not extracted_audio_path:
return gr.update(), gr.update()
# 设置audio为提取的音频文件路径
prompt_audio_update = gr.update(value=extracted_audio_path)
# 识别音频中的文本
try:
res = demo.asr_model.generate(input=extracted_audio_path, language="auto", use_itn=True)
recognized_text = res[0]["text"].split('|>')[-1]
prompt_text_update = gr.update(value=recognized_text)
print(f"✅ 音频识别完成: {recognized_text}")
except Exception as e:
print(f"❌ 音频识别失败: {e}")
prompt_text_update = gr.update()
return prompt_audio_update, prompt_text_update
def do_save_preset(preset_name, description, prompt_wav, prompt_text, denoised_audio_value, denoise_enabled):
"""实际执行保存音色配置的函数"""
if not preset_name or not preset_name.strip():
return gr.update(value="❌ 配置名称不能为空", visible=True), gr.update()
preset_name = preset_name.strip()
description = description.strip() if description else f"自动保存的音色配置 - {time.strftime('%Y-%m-%d %H:%M:%S')}"
# 详细日志:打印传入的参数信息
# print(f"🔍 保存音色配置调试信息:")
# print(f" - 配置名称: {preset_name}")
# print(f" - 降噪启用: {denoise_enabled}")
# print(f" - 原始音频: {prompt_wav}")
# print(f" - 降噪音频: {denoised_audio_value}")
# print(f" - 降噪音频类型: {type(denoised_audio_value)}")
# 智能选择最佳音频文件
audio_path = None
audio_source = "未知"
# 优先使用降噪音频(如果启用降噪且降噪音频可用)
if denoise_enabled and denoised_audio_value:
# print(f" - 检查降噪音频可用性...")
if isinstance(denoised_audio_value, str) and denoised_audio_value.strip():
if os.path.exists(denoised_audio_value):
audio_path = denoised_audio_value
audio_source = "降噪音频"
print(f" - ✅ 使用降噪音频: {audio_path}")
else:
print(f" - ⚠️ 降噪音频文件不存在: {denoised_audio_value}")
else:
print(f" - ⚠️ 降噪音频值无效: {denoised_audio_value}")
# 如果降噪音频不可用,使用原始音频
if not audio_path:
if prompt_wav and isinstance(prompt_wav, str) and os.path.exists(prompt_wav):
audio_path = prompt_wav
audio_source = "原始音频"
# print(f" - 📁 使用原始音频: {audio_path}")
else:
print(f" - ❌ 原始音频也不可用: {prompt_wav}")
# 验证最终选择的音频文件
if not audio_path or not isinstance(audio_path, str):
return gr.update(value="⚠️ 没有可保存的音频文件", visible=True), gr.update()
if not os.path.exists(audio_path):
return gr.update(value=f"⚠️ 音频文件不存在: {audio_path}", visible=True), gr.update()
# print(f" - 🎵 最终选择音频源: {audio_source}")
# print(f" - 📂 最终音频路径: {audio_path}")
# 保存配置
success = demo.save_voice_preset(preset_name, audio_path, prompt_text or "", description)
if success:
# 重新加载配置并更新下拉菜单
demo.load_voice_presets()
updated_preset_options = [(preset.get("name", key) if isinstance(preset, dict) else key, key) for key, preset in demo.voice_presets.items()]
# 在成功消息中显示使用的音频源
success_message = f"✅ 音色配置已保存: {preset_name} (使用{audio_source})"
return (
gr.update(value=success_message, visible=True),
gr.update(choices=updated_preset_options, value=preset_name)
)
else:
return gr.update(value="❌ 保存音色配置失败", visible=True), gr.update()
# 绑定音频截取工具事件
extractor_file.change(
fn=handle_file_upload,
inputs=[extractor_file],
outputs=[file_info, extractor_player, extract_btn]
)
start_time.change(
fn=validate_time_inputs,
inputs=[start_time, end_time, extractor_file],
outputs=[time_validation]
)
end_time.change(
fn=validate_time_inputs,
inputs=[start_time, end_time, extractor_file],
outputs=[time_validation]
)
extract_btn.click(
fn=extract_audio_clip,
inputs=[extractor_file, start_time, end_time],
outputs=[extracted_audio, time_validation, use_as_prompt_btn],
show_progress=True
)
reset_time_btn.click(
fn=reset_time_settings,
outputs=[start_time, end_time, time_validation]
)
# 绑定"用作prompt speech"按钮事件
use_as_prompt_btn.click(
fn=use_extracted_as_prompt,
inputs=[extracted_audio],
outputs=[prompt_wav, prompt_text],
show_progress=True
)
# 注意:保存按钮的点击事件由JavaScript处理,不在Python中绑定
# 绑定隐藏的刷新预设按钮
refresh_presets_btn.click(
fn=refresh_preset_list,
outputs=[preset],
show_progress=False
)
# 绑定隐藏的实际保存触发器
save_trigger.click(
fn=do_save_preset,
inputs=[preset_name_input, preset_description_input, prompt_wav, prompt_text, denoised_audio_output, DoDenoisePromptAudio],
outputs=[save_status, preset],
show_progress=True
)
return interface
def run_demo(server_name: str = "localhost", server_port: int = 7860, show_error: bool = True):
demo = VoxCPMDemo()
interface = create_demo_interface(demo)
# Recommended to enable queue on Spaces for better throughput
# 从配置中获取允许的路径
allowed_paths = demo.get_allowed_paths()
# 如果配置中没有允许的路径,使用presets目录作为默认路径
if not allowed_paths:
# 使用presets目录作为默认的用户文件访问目录
presets_dir = os.path.join(os.getcwd(), "presets")
os.makedirs(presets_dir, exist_ok=True) # 确保目录存在
allowed_paths = [presets_dir]
print(f"⚠️ 配置中未找到allowed_paths,使用默认路径: {presets_dir}")
# 添加当前工作目录到允许的路径(包含所有子目录:denoised_audio, extracted_audio, temp_preview_audio, presets等)
current_dir = os.getcwd()
if current_dir not in allowed_paths:
allowed_paths.append(current_dir)
print(f"✅ 已添加当前工作目录到允许路径: {current_dir}")
# print(f" └─ 包含子目录: denoised_audio, extracted_audio, temp_preview_audio, presets")
# 确保所有必要的目录都存在
presets_dir = os.path.join(os.getcwd(), "presets")
os.makedirs(presets_dir, exist_ok=True)
interface.queue(max_size=10).launch(
server_name=server_name,
server_port=server_port,
show_error=show_error,
allowed_paths=allowed_paths
)
if __name__ == "__main__":
# 设置server_name为"0.0.0.0"以允许局域网访问,但禁止公网访问(受防火墙保护)
run_demo(server_name="0.0.0.0")