志浩 3 лет назад
Родитель
Сommit
31dda90f2d

+ 11 - 5
egs_modelscope/speaker_verification/speech_xvector_sv-zh-cn-cnceleb-16k-spk3465-pytorch/infer.py

@@ -9,17 +9,23 @@ if __name__ == '__main__':
     )
 
     # 提取不同句子的说话人嵌入码
+    # for url use "utt_id" as key
     rec_result = inference_sv_pipline(
         audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/sv_example_enroll.wav')
-    enroll = rec_result["spk_embedding"]
+    enroll = rec_result["utt_id"]
 
-    rec_result = inference_sv_pipline(
-        audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/sv_example_same.wav')
-    same = rec_result["spk_embedding"]
+    # for local file use "utt_id" as key
+    rec_result = inference_sv_pipline(audio_in='sv_example_same.wav')["test1"]
+    same = rec_result["test1"]
+
+    import soundfile
+    wav = soundfile.read('sv_example_enroll.wav')[0]
+    # for raw inputs use "utt_id" as key
+    spk_embedding = inference_sv_pipline(audio_in=wav)["utt_id"]
 
     rec_result = inference_sv_pipline(
         audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/sv_example_different.wav')
-    different = rec_result["spk_embedding"]
+    different = rec_result["utt_id"]
 
     # 对相同的说话人计算余弦相似度
     sv_threshold = 0.9465