[logger/test]添加debug级日志记录接收数据长度,测试说话人识别效果。

This commit is contained in:
Ziyang.Zhang 2025-06-25 16:56:10 +08:00
parent fc1dac7927
commit d5b9953905
5 changed files with 33 additions and 0 deletions

View File

@ -112,6 +112,9 @@ class ASRFunctor(BaseFunctor):
while self._is_running: while self._is_running:
try: try:
data = self._input_queue.get(True, timeout=1) data = self._input_queue.get(True, timeout=1)
if data is None:
break
logger.debug("[ASRFunctor]获取到的数据length: %s", len(data))
self._process(data) self._process(data)
self._input_queue.task_done() self._input_queue.task_done()
# 当队列为空时, 间隔1s检测是否进入停止事件。 # 当队列为空时, 间隔1s检测是否进入停止事件。

View File

@ -98,6 +98,9 @@ class SPKFunctor(BaseFunctor):
while self._is_running: while self._is_running:
try: try:
data = self._input_queue.get(True, timeout=1) data = self._input_queue.get(True, timeout=1)
if data is None:
break
logger.debug("[SPKFunctor]获取到的数据length: %s", len(data))
self._process(data) self._process(data)
self._input_queue.task_done() self._input_queue.task_done()
# 当队列为空时, 间隔1s检测是否进入停止事件。 # 当队列为空时, 间隔1s检测是否进入停止事件。

View File

@ -202,6 +202,9 @@ class VADFunctor(BaseFunctor):
while self._is_running: while self._is_running:
try: try:
data = self._input_queue.get(True, timeout=1) data = self._input_queue.get(True, timeout=1)
if data is None:
break
logger.debug("[VADFunctor]获取到的数据length: %s", len(data))
self._process(data) self._process(data)
self._input_queue.task_done() self._input_queue.task_done()
# 当队列为空时, 间隔1s检测是否进入停止事件。 # 当队列为空时, 间隔1s检测是否进入停止事件。

View File

@ -224,6 +224,7 @@ class ASRPipeline(PipelineBase):
while self._is_running and not self._stop_event: while self._is_running and not self._stop_event:
try: try:
data = self._input_queue.get(timeout=self._queue_timeout) data = self._input_queue.get(timeout=self._queue_timeout)
logger.debug("[ASRpipeline]获取到的数据length: %s", len(data))
# 检查是否是结束信号 # 检查是否是结束信号
if data is None: if data is None:
logger.info("收到结束信号,管道准备停止") logger.info("收到结束信号,管道准备停止")

23
tests/spkverify_use.py Normal file
View File

@ -0,0 +1,23 @@
from modelscope.pipelines import pipeline
sv_pipeline = pipeline(
task='speaker-verification',
model='iic/speech_campplus_sv_zh-cn_16k-common',
model_revision='v1.0.0'
)
speaker1_a_wav = 'https://modelscope.cn/api/v1/models/damo/speech_campplus_sv_zh-cn_16k-common/repo?Revision=master&FilePath=examples/speaker1_a_cn_16k.wav'
speaker1_b_wav = 'https://modelscope.cn/api/v1/models/damo/speech_campplus_sv_zh-cn_16k-common/repo?Revision=master&FilePath=examples/speaker1_b_cn_16k.wav'
speaker2_a_wav = 'https://modelscope.cn/api/v1/models/damo/speech_campplus_sv_zh-cn_16k-common/repo?Revision=master&FilePath=examples/speaker2_a_cn_16k.wav'
# 相同说话人语音
result = sv_pipeline([speaker1_a_wav, speaker1_b_wav])
print(result)
# 不同说话人语音
result = sv_pipeline([speaker1_a_wav, speaker2_a_wav])
print(result)
# 可以自定义得分阈值来进行识别,阈值越高,判定为同一人的条件越严格
result = sv_pipeline([speaker1_a_wav, speaker1_a_wav], thr=0.6)
print(result)
# 可以传入output_emb参数输出结果中就会包含提取到的说话人embedding
result = sv_pipeline([speaker1_a_wav, speaker2_a_wav], output_emb=True)
print(result['embs'], result['outputs'])
# 可以传入save_dir参数提取到的说话人embedding会存储在save_dir目录中
result = sv_pipeline([speaker1_a_wav, speaker2_a_wav], save_dir='savePath/')