ws_server_offline.py 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. import asyncio
  2. import json
  3. import websockets
  4. import time
  5. import logging
  6. import tracemalloc
  7. import numpy as np
  8. from parse_args import args
  9. from modelscope.pipelines import pipeline
  10. from modelscope.utils.constant import Tasks
  11. from modelscope.utils.logger import get_logger
  12. from funasr.runtime.python.onnxruntime.funasr_onnx.utils.frontend import load_bytes
  13. tracemalloc.start()
  14. logger = get_logger(log_level=logging.CRITICAL)
  15. logger.setLevel(logging.CRITICAL)
  16. websocket_users = set()
  17. print("model loading")
  18. # asr
  19. inference_pipeline_asr = pipeline(
  20. task=Tasks.auto_speech_recognition,
  21. model=args.asr_model,
  22. ngpu=args.ngpu,
  23. ncpu=args.ncpu,
  24. model_revision=None)
  25. # vad
  26. inference_pipeline_vad = pipeline(
  27. task=Tasks.voice_activity_detection,
  28. model=args.vad_model,
  29. model_revision=None,
  30. output_dir=None,
  31. batch_size=1,
  32. mode='online',
  33. ngpu=args.ngpu,
  34. ncpu=args.ncpu,
  35. )
  36. if args.punc_model != "":
  37. inference_pipeline_punc = pipeline(
  38. task=Tasks.punctuation,
  39. model=args.punc_model,
  40. model_revision=None,
  41. ngpu=args.ngpu,
  42. ncpu=args.ncpu,
  43. )
  44. else:
  45. inference_pipeline_punc = None
  46. print("model loaded")
  47. async def ws_serve(websocket, path):
  48. frames = []
  49. frames_asr = []
  50. global websocket_users
  51. websocket_users.add(websocket)
  52. websocket.param_dict_asr = {}
  53. websocket.param_dict_vad = {'in_cache': dict(), "is_final": False}
  54. websocket.param_dict_punc = {'cache': list()}
  55. websocket.vad_pre_idx = 0
  56. speech_start = False
  57. websocket.wav_name = "microphone"
  58. print("new user connected", flush=True)
  59. try:
  60. async for message in websocket:
  61. if isinstance(message, str):
  62. messagejson = json.loads(message)
  63. if "is_speaking" in messagejson:
  64. websocket.is_speaking = messagejson["is_speaking"]
  65. websocket.param_dict_vad["is_final"] = not websocket.is_speaking
  66. if "wav_name" in messagejson:
  67. websocket.wav_name = messagejson.get("wav_name")
  68. if len(frames_asr) > 0 or not isinstance(message, str):
  69. if not isinstance(message, str):
  70. frames.append(message)
  71. duration_ms = len(message)//32
  72. websocket.vad_pre_idx += duration_ms
  73. if speech_start:
  74. frames_asr.append(message)
  75. speech_start_i, speech_end_i = await async_vad(websocket, message)
  76. if speech_start_i:
  77. speech_start = True
  78. beg_bias = (websocket.vad_pre_idx-speech_start_i)//duration_ms
  79. frames_pre = frames[-beg_bias:]
  80. frames_asr = []
  81. frames_asr.extend(frames_pre)
  82. if speech_end_i or not websocket.is_speaking:
  83. audio_in = b"".join(frames_asr)
  84. await async_asr(websocket, audio_in)
  85. frames_asr = []
  86. speech_start = False
  87. if not websocket.is_speaking:
  88. websocket.vad_pre_idx = 0
  89. frames = []
  90. websocket.param_dict_vad = {'in_cache': dict()}
  91. else:
  92. frames = frames[-20:]
  93. except websockets.ConnectionClosed:
  94. print("ConnectionClosed...", websocket_users)
  95. websocket_users.remove(websocket)
  96. except websockets.InvalidState:
  97. print("InvalidState...")
  98. except Exception as e:
  99. print("Exception:", e)
  100. async def async_vad(websocket, audio_in):
  101. segments_result = inference_pipeline_vad(audio_in=audio_in, param_dict=websocket.param_dict_vad)
  102. speech_start = False
  103. speech_end = False
  104. if len(segments_result) == 0 or len(segments_result["text"]) > 1:
  105. return speech_start, speech_end
  106. if segments_result["text"][0][0] != -1:
  107. speech_start = segments_result["text"][0][0]
  108. if segments_result["text"][0][1] != -1:
  109. speech_end = True
  110. return speech_start, speech_end
  111. async def async_asr(websocket, audio_in):
  112. if len(audio_in) > 0:
  113. # print(len(audio_in))
  114. audio_in = load_bytes(audio_in)
  115. rec_result = inference_pipeline_asr(audio_in=audio_in,
  116. param_dict=websocket.param_dict_asr)
  117. print(rec_result)
  118. if inference_pipeline_punc is not None and 'text' in rec_result and len(rec_result["text"])>0:
  119. rec_result = inference_pipeline_punc(text_in=rec_result['text'],
  120. param_dict=websocket.param_dict_punc)
  121. # print(rec_result)
  122. message = json.dumps({"mode": "offline", "text": rec_result["text"], "wav_name": websocket.wav_name})
  123. await websocket.send(message)
  124. start_server = websockets.serve(ws_serve, args.host, args.port, subprotocols=["binary"], ping_interval=None)
  125. asyncio.get_event_loop().run_until_complete(start_server)
  126. asyncio.get_event_loop().run_forever()