wss_srv_asr.py 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. import asyncio
  2. import json
  3. import websockets
  4. import time
  5. import logging
  6. import tracemalloc
  7. import numpy as np
  8. import ssl
  9. from parse_args import args
  10. from modelscope.pipelines import pipeline
  11. from modelscope.utils.constant import Tasks
  12. from modelscope.utils.logger import get_logger
  13. from funasr.runtime.python.onnxruntime.funasr_onnx.utils.frontend import load_bytes
  14. tracemalloc.start()
  15. logger = get_logger(log_level=logging.CRITICAL)
  16. logger.setLevel(logging.CRITICAL)
  17. websocket_users = set()
  18. print("model loading")
  19. # asr
  20. inference_pipeline_asr = pipeline(
  21. task=Tasks.auto_speech_recognition,
  22. model=args.asr_model,
  23. ngpu=args.ngpu,
  24. ncpu=args.ncpu,
  25. model_revision=None)
  26. # vad
  27. inference_pipeline_vad = pipeline(
  28. task=Tasks.voice_activity_detection,
  29. model=args.vad_model,
  30. model_revision=None,
  31. output_dir=None,
  32. batch_size=1,
  33. mode='online',
  34. ngpu=args.ngpu,
  35. ncpu=args.ncpu,
  36. )
  37. if args.punc_model != "":
  38. inference_pipeline_punc = pipeline(
  39. task=Tasks.punctuation,
  40. model=args.punc_model,
  41. model_revision="v1.0.2",
  42. ngpu=args.ngpu,
  43. ncpu=args.ncpu,
  44. )
  45. else:
  46. inference_pipeline_punc = None
  47. inference_pipeline_asr_online = pipeline(
  48. task=Tasks.auto_speech_recognition,
  49. model=args.asr_model_online,
  50. ngpu=args.ngpu,
  51. ncpu=args.ncpu,
  52. model_revision='v1.0.4',
  53. update_model='v1.0.4',
  54. mode='paraformer_streaming')
  55. print("model loaded")
  56. async def ws_serve(websocket, path):
  57. frames = []
  58. frames_asr = []
  59. frames_asr_online = []
  60. global websocket_users
  61. websocket_users.add(websocket)
  62. websocket.param_dict_asr = {}
  63. websocket.param_dict_asr_online = {"cache": dict()}
  64. websocket.param_dict_vad = {'in_cache': dict(), "is_final": False}
  65. websocket.param_dict_punc = {'cache': list()}
  66. websocket.vad_pre_idx = 0
  67. speech_start = False
  68. speech_end_i = False
  69. websocket.wav_name = "microphone"
  70. websocket.mode = "2pass"
  71. print("new user connected", flush=True)
  72. try:
  73. async for message in websocket:
  74. if isinstance(message, str):
  75. messagejson = json.loads(message)
  76. if "is_speaking" in messagejson:
  77. websocket.is_speaking = messagejson["is_speaking"]
  78. websocket.param_dict_asr_online["is_final"] = not websocket.is_speaking
  79. if "chunk_interval" in messagejson:
  80. websocket.chunk_interval = messagejson["chunk_interval"]
  81. if "wav_name" in messagejson:
  82. websocket.wav_name = messagejson.get("wav_name")
  83. if "chunk_size" in messagejson:
  84. websocket.param_dict_asr_online["chunk_size"] = messagejson["chunk_size"]
  85. if "mode" in messagejson:
  86. websocket.mode = messagejson["mode"]
  87. if len(frames_asr_online) > 0 or len(frames_asr) > 0 or not isinstance(message, str):
  88. if not isinstance(message, str):
  89. frames.append(message)
  90. duration_ms = len(message)//32
  91. websocket.vad_pre_idx += duration_ms
  92. # asr online
  93. frames_asr_online.append(message)
  94. websocket.param_dict_asr_online["is_final"] = speech_end_i
  95. if len(frames_asr_online) % websocket.chunk_interval == 0 or websocket.param_dict_asr_online["is_final"]:
  96. if websocket.mode == "2pass" or websocket.mode == "online":
  97. audio_in = b"".join(frames_asr_online)
  98. await async_asr_online(websocket, audio_in)
  99. frames_asr_online = []
  100. if speech_start:
  101. frames_asr.append(message)
  102. # vad online
  103. speech_start_i, speech_end_i = await async_vad(websocket, message)
  104. if speech_start_i:
  105. speech_start = True
  106. beg_bias = (websocket.vad_pre_idx-speech_start_i)//duration_ms
  107. frames_pre = frames[-beg_bias:]
  108. frames_asr = []
  109. frames_asr.extend(frames_pre)
  110. # asr punc offline
  111. if speech_end_i or not websocket.is_speaking:
  112. # print("vad end point")
  113. if websocket.mode == "2pass" or websocket.mode == "offline":
  114. audio_in = b"".join(frames_asr)
  115. await async_asr(websocket, audio_in)
  116. frames_asr = []
  117. speech_start = False
  118. # frames_asr_online = []
  119. # websocket.param_dict_asr_online = {"cache": dict()}
  120. if not websocket.is_speaking:
  121. websocket.vad_pre_idx = 0
  122. frames = []
  123. websocket.param_dict_vad = {'in_cache': dict()}
  124. else:
  125. frames = frames[-20:]
  126. except websockets.ConnectionClosed:
  127. print("ConnectionClosed...", websocket_users)
  128. websocket_users.remove(websocket)
  129. except websockets.InvalidState:
  130. print("InvalidState...")
  131. except Exception as e:
  132. print("Exception:", e)
  133. async def async_vad(websocket, audio_in):
  134. segments_result = inference_pipeline_vad(audio_in=audio_in, param_dict=websocket.param_dict_vad)
  135. speech_start = False
  136. speech_end = False
  137. if len(segments_result) == 0 or len(segments_result["text"]) > 1:
  138. return speech_start, speech_end
  139. if segments_result["text"][0][0] != -1:
  140. speech_start = segments_result["text"][0][0]
  141. if segments_result["text"][0][1] != -1:
  142. speech_end = True
  143. return speech_start, speech_end
  144. async def async_asr(websocket, audio_in):
  145. if len(audio_in) > 0:
  146. # print(len(audio_in))
  147. audio_in = load_bytes(audio_in)
  148. rec_result = inference_pipeline_asr(audio_in=audio_in,
  149. param_dict=websocket.param_dict_asr)
  150. # print(rec_result)
  151. if inference_pipeline_punc is not None and 'text' in rec_result and len(rec_result["text"])>0:
  152. rec_result = inference_pipeline_punc(text_in=rec_result['text'],
  153. param_dict=websocket.param_dict_punc)
  154. # print("offline", rec_result)
  155. if 'text' in rec_result:
  156. message = json.dumps({"mode": "2pass-offline", "text": rec_result["text"], "wav_name": websocket.wav_name})
  157. await websocket.send(message)
  158. async def async_asr_online(websocket, audio_in):
  159. if len(audio_in) > 0:
  160. audio_in = load_bytes(audio_in)
  161. # print(websocket.param_dict_asr_online.get("is_final", False))
  162. rec_result = inference_pipeline_asr_online(audio_in=audio_in,
  163. param_dict=websocket.param_dict_asr_online)
  164. # print(rec_result)
  165. if websocket.mode == "2pass" and websocket.param_dict_asr_online.get("is_final", False):
  166. return
  167. # websocket.param_dict_asr_online["cache"] = dict()
  168. if "text" in rec_result:
  169. if rec_result["text"] != "sil" and rec_result["text"] != "waiting_for_more_voice":
  170. # print("online", rec_result)
  171. message = json.dumps({"mode": "2pass-online", "text": rec_result["text"], "wav_name": websocket.wav_name})
  172. await websocket.send(message)
  173. if len(args.certfile)>0:
  174. ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
  175. # Generate with Lets Encrypt, copied to this location, chown to current user and 400 permissions
  176. ssl_cert = args.certfile
  177. ssl_key = args.keyfile
  178. ssl_context.load_cert_chain(ssl_cert, keyfile=ssl_key)
  179. start_server = websockets.serve(ws_serve, args.host, args.port, subprotocols=["binary"], ping_interval=None,ssl=ssl_context)
  180. else:
  181. start_server = websockets.serve(ws_serve, args.host, args.port, subprotocols=["binary"], ping_interval=None)
  182. asyncio.get_event_loop().run_until_complete(start_server)
  183. asyncio.get_event_loop().run_forever()