ws_server_2pass.py 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. import asyncio
  2. import json
  3. import websockets
  4. import time
  5. import logging
  6. import tracemalloc
  7. import numpy as np
  8. from parse_args import args
  9. from modelscope.pipelines import pipeline
  10. from modelscope.utils.constant import Tasks
  11. from modelscope.utils.logger import get_logger
  12. from funasr.runtime.python.onnxruntime.funasr_onnx.utils.frontend import load_bytes
  13. tracemalloc.start()
  14. logger = get_logger(log_level=logging.CRITICAL)
  15. logger.setLevel(logging.CRITICAL)
  16. websocket_users = set()
  17. print("model loading")
  18. # asr
  19. inference_pipeline_asr = pipeline(
  20. task=Tasks.auto_speech_recognition,
  21. model=args.asr_model,
  22. ngpu=args.ngpu,
  23. ncpu=args.ncpu,
  24. model_revision=None)
  25. # vad
  26. inference_pipeline_vad = pipeline(
  27. task=Tasks.voice_activity_detection,
  28. model=args.vad_model,
  29. model_revision=None,
  30. output_dir=None,
  31. batch_size=1,
  32. mode='online',
  33. ngpu=args.ngpu,
  34. ncpu=args.ncpu,
  35. )
  36. if args.punc_model != "":
  37. inference_pipeline_punc = pipeline(
  38. task=Tasks.punctuation,
  39. model=args.punc_model,
  40. model_revision=None,
  41. ngpu=args.ngpu,
  42. ncpu=args.ncpu,
  43. )
  44. else:
  45. inference_pipeline_punc = None
  46. inference_pipeline_asr_online = pipeline(
  47. task=Tasks.auto_speech_recognition,
  48. model=args.asr_model_online,
  49. ngpu=args.ngpu,
  50. ncpu=args.ncpu,
  51. model_revision='v1.0.4')
  52. print("model loaded")
  53. async def ws_serve(websocket, path):
  54. frames = []
  55. frames_asr = []
  56. frames_asr_online = []
  57. global websocket_users
  58. websocket_users.add(websocket)
  59. websocket.param_dict_asr = {}
  60. websocket.param_dict_asr_online = {"cache": dict()}
  61. websocket.param_dict_vad = {'in_cache': dict(), "is_final": False}
  62. websocket.param_dict_punc = {'cache': list()}
  63. websocket.vad_pre_idx = 0
  64. speech_start = False
  65. websocket.wav_name = "microphone"
  66. print("new user connected", flush=True)
  67. try:
  68. async for message in websocket:
  69. if isinstance(message, str):
  70. messagejson = json.loads(message)
  71. if "is_speaking" in messagejson:
  72. websocket.is_speaking = messagejson["is_speaking"]
  73. websocket.param_dict_asr_online["is_final"] = not websocket.is_speaking
  74. if "chunk_interval" in messagejson:
  75. websocket.chunk_interval = messagejson["chunk_interval"]
  76. if "wav_name" in messagejson:
  77. websocket.wav_name = messagejson.get("wav_name")
  78. if "chunk_size" in messagejson:
  79. websocket.param_dict_asr_online["chunk_size"] = messagejson["chunk_size"]
  80. if len(frames_asr_online) > 0 or len(frames_asr) > 0 or not isinstance(message, str):
  81. if not isinstance(message, str):
  82. frames.append(message)
  83. duration_ms = len(message)//32
  84. websocket.vad_pre_idx += duration_ms
  85. # asr online
  86. frames_asr_online.append(message)
  87. if len(frames_asr_online) % websocket.chunk_interval == 0:
  88. audio_in = b"".join(frames_asr_online)
  89. await async_asr_online(websocket, audio_in)
  90. frames_asr_online = []
  91. if speech_start:
  92. frames_asr.append(message)
  93. # vad online
  94. speech_start_i, speech_end_i = await async_vad(websocket, message)
  95. if speech_start_i:
  96. speech_start = True
  97. beg_bias = (websocket.vad_pre_idx-speech_start_i)//duration_ms
  98. frames_pre = frames[-beg_bias:]
  99. frames_asr = []
  100. frames_asr.extend(frames_pre)
  101. # asr punc offline
  102. if speech_end_i or not websocket.is_speaking:
  103. audio_in = b"".join(frames_asr)
  104. await async_asr(websocket, audio_in)
  105. frames_asr = []
  106. speech_start = False
  107. frames_asr_online = []
  108. websocket.param_dict_asr_online = {"cache": dict()}
  109. if not websocket.is_speaking:
  110. websocket.vad_pre_idx = 0
  111. frames = []
  112. websocket.param_dict_vad = {'in_cache': dict()}
  113. else:
  114. frames = frames[-20:]
  115. except websockets.ConnectionClosed:
  116. print("ConnectionClosed...", websocket_users)
  117. websocket_users.remove(websocket)
  118. except websockets.InvalidState:
  119. print("InvalidState...")
  120. except Exception as e:
  121. print("Exception:", e)
  122. async def async_vad(websocket, audio_in):
  123. segments_result = inference_pipeline_vad(audio_in=audio_in, param_dict=websocket.param_dict_vad)
  124. speech_start = False
  125. speech_end = False
  126. if len(segments_result) == 0 or len(segments_result["text"]) > 1:
  127. return speech_start, speech_end
  128. if segments_result["text"][0][0] != -1:
  129. speech_start = segments_result["text"][0][0]
  130. if segments_result["text"][0][1] != -1:
  131. speech_end = True
  132. return speech_start, speech_end
  133. async def async_asr(websocket, audio_in):
  134. if len(audio_in) > 0:
  135. # print(len(audio_in))
  136. audio_in = load_bytes(audio_in)
  137. rec_result = inference_pipeline_asr(audio_in=audio_in,
  138. param_dict=websocket.param_dict_asr)
  139. # print(rec_result)
  140. if inference_pipeline_punc is not None and 'text' in rec_result and len(rec_result["text"])>0:
  141. rec_result = inference_pipeline_punc(text_in=rec_result['text'],
  142. param_dict=websocket.param_dict_punc)
  143. # print("offline", rec_result)
  144. message = json.dumps({"mode": "2pass-offline", "text": rec_result["text"], "wav_name": websocket.wav_name})
  145. await websocket.send(message)
  146. async def async_asr_online(websocket, audio_in):
  147. if len(audio_in) > 0:
  148. audio_in = load_bytes(audio_in)
  149. rec_result = inference_pipeline_asr_online(audio_in=audio_in,
  150. param_dict=websocket.param_dict_asr_online)
  151. if websocket.param_dict_asr_online.get("is_final", False):
  152. websocket.param_dict_asr_online["cache"] = dict()
  153. if "text" in rec_result:
  154. if rec_result["text"] != "sil" and rec_result["text"] != "waiting_for_more_voice":
  155. # print("online", rec_result)
  156. message = json.dumps({"mode": "2pass-online", "text": rec_result["text"], "wav_name": websocket.wav_name})
  157. await websocket.send(message)
  158. start_server = websockets.serve(ws_serve, args.host, args.port, subprotocols=["binary"], ping_interval=None)
  159. asyncio.get_event_loop().run_until_complete(start_server)
  160. asyncio.get_event_loop().run_forever()