-
Notifications
You must be signed in to change notification settings - Fork 0
/
long_audio.py
94 lines (76 loc) · 3.41 KB
/
long_audio.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import glob
import os
from pydub import AudioSegment
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
inference_pipeline = pipeline(
task=Tasks.auto_speech_recognition,
model='damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
vad_model='damo/speech_fsmn_vad_zh-cn-16k-common-pytorch',
punc_model='damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch',
)
def recog(audio_path):
rec_result = inference_pipeline(audio_in=audio_path)
return rec_result
def clip(audio_path, rec_result, input_base_path, output_dir="datasets", min_time=3000, max_time=15000):
rel_path = os.path.relpath(audio_path, start=input_base_path)
output_path = os.path.join(output_dir, rel_path)
output_dir, file_name_with_extension = os.path.split(output_path)
file_name, file_extension = os.path.splitext(file_name_with_extension)
os.makedirs(output_dir, exist_ok=True)
speaker_name = os.path.basename(os.path.dirname(audio_path))
audio = AudioSegment.from_file(audio_path)
sentences = rec_result['sentences']
start_time = 0
last_end_time = 0
text = ""
continue_clip = False
i = 0
sentences_num = len(sentences)
lines = []
if len(audio) <= max_time:
output_file = os.path.join(output_dir, f"{file_name}{file_extension}")
audio.export(output_file, format="wav")
for sentence in sentences:
text += sentence['text']
lines.append(f"{output_file}|{speaker_name}|ZH|{text}\n")
return lines
for num, sentence in enumerate(sentences):
if not continue_clip:
start_time = sentence['ts_list'][0][0]
current_start_time = sentence['ts_list'][0][0]
current_end_time = sentence['ts_list'][-1][1]
text += sentence['text']
segment_time = current_end_time - start_time
if segment_time > min_time or num == sentences_num - 1:
if num == sentences_num - 1:
current_end_time = -1
if current_start_time - last_end_time >= 1000:
silence_segment = AudioSegment.silent(duration=1000)
sliced_audio = audio[start_time:last_end_time] + silence_segment + audio[
current_start_time:current_end_time]
else:
sliced_audio = audio[start_time:current_end_time]
output_file = os.path.join(output_dir, f"{file_name}_{i}{file_extension}")
sliced_audio.export(output_file, format="wav")
lines.append(f"{output_file}|{speaker_name}|ZH|{text}\n")
text = ""
continue_clip = False
i += 1
else:
last_end_time = current_end_time
continue_clip = True
return lines
def clip_all(input_path, output_dir="datasets", min_time=3000, max_time=15000):
audio_files = glob.glob(os.path.join(input_path, "**", "*.*"), recursive=True)
lines = []
for audio_path in audio_files:
rec_result = recog(audio_path)
lines.extend(clip(audio_path, rec_result, input_base_path=input_path, output_dir=output_dir, min_time=min_time,
max_time=max_time))
save_path = "all.txt"
with open(save_path, "w", encoding='utf8') as f:
f.writelines(lines)
return save_path
if __name__ == '__main__':
clip_all('cache', output_dir="datasets")