ASR_server_streaming_asr.py 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. import asyncio
  2. import json
  3. import websockets
  4. import time
  5. from queue import Queue
  6. import threading
  7. import argparse
  8. from modelscope.pipelines import pipeline
  9. from modelscope.utils.constant import Tasks
  10. from modelscope.utils.logger import get_logger
  11. import logging
  12. import tracemalloc
  13. import numpy as np
  14. tracemalloc.start()
  15. logger = get_logger(log_level=logging.CRITICAL)
  16. logger.setLevel(logging.CRITICAL)
  17. websocket_users = set() #维护客户端列表
  18. parser = argparse.ArgumentParser()
  19. parser.add_argument("--host",
  20. type=str,
  21. default="0.0.0.0",
  22. required=False,
  23. help="host ip, localhost, 0.0.0.0")
  24. parser.add_argument("--port",
  25. type=int,
  26. default=10095,
  27. required=False,
  28. help="grpc server port")
  29. parser.add_argument("--asr_model",
  30. type=str,
  31. default="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
  32. help="model from modelscope")
  33. parser.add_argument("--vad_model",
  34. type=str,
  35. default="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch",
  36. help="model from modelscope")
  37. parser.add_argument("--punc_model",
  38. type=str,
  39. default="damo/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727",
  40. help="model from modelscope")
  41. parser.add_argument("--ngpu",
  42. type=int,
  43. default=1,
  44. help="0 for cpu, 1 for gpu")
  45. args = parser.parse_args()
  46. print("model loading")
  47. def load_bytes(input):
  48. middle_data = np.frombuffer(input, dtype=np.int16)
  49. middle_data = np.asarray(middle_data)
  50. if middle_data.dtype.kind not in 'iu':
  51. raise TypeError("'middle_data' must be an array of integers")
  52. dtype = np.dtype('float32')
  53. if dtype.kind != 'f':
  54. raise TypeError("'dtype' must be a floating point type")
  55. i = np.iinfo(middle_data.dtype)
  56. abs_max = 2 ** (i.bits - 1)
  57. offset = i.min + abs_max
  58. array = np.frombuffer((middle_data.astype(dtype) - offset) / abs_max, dtype=np.float32)
  59. return array
  60. inference_pipeline_asr_online = pipeline(
  61. task=Tasks.auto_speech_recognition,
  62. # model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online',
  63. model='damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online',
  64. model_revision=None)
  65. print("model loaded")
  66. async def ws_serve(websocket, path):
  67. frames_online = []
  68. global websocket_users
  69. websocket.send_msg = Queue()
  70. websocket_users.add(websocket)
  71. websocket.param_dict_asr_online = {"cache": dict()}
  72. websocket.speek_online = Queue()
  73. ss_online = threading.Thread(target=asr_online, args=(websocket,))
  74. ss_online.start()
  75. try:
  76. async for message in websocket:
  77. message = json.loads(message)
  78. audio = bytes(message['audio'], 'ISO-8859-1')
  79. chunk = message["chunk"]
  80. chunk_num = 500//chunk
  81. is_speaking = message["is_speaking"]
  82. websocket.param_dict_asr_online["is_final"] = not is_speaking
  83. frames_online.append(audio)
  84. if len(frames_online) % chunk_num == 0 or not is_speaking:
  85. audio_in = b"".join(frames_online)
  86. websocket.speek_online.put(audio_in)
  87. frames_online = []
  88. if not websocket.send_msg.empty():
  89. await websocket.send(websocket.send_msg.get())
  90. websocket.send_msg.task_done()
  91. except websockets.ConnectionClosed:
  92. print("ConnectionClosed...", websocket_users) # 链接断开
  93. websocket_users.remove(websocket)
  94. except websockets.InvalidState:
  95. print("InvalidState...") # 无效状态
  96. except Exception as e:
  97. print("Exception:", e)
  98. def asr_online(websocket): # ASR推理
  99. global inference_pipeline_asr_online
  100. global websocket_users
  101. while websocket in websocket_users:
  102. if not websocket.speek_online.empty():
  103. audio_in = websocket.speek_online.get()
  104. websocket.speek_online.task_done()
  105. if len(audio_in) > 0:
  106. # print(len(audio_in))
  107. audio_in = load_bytes(audio_in)
  108. # print(audio_in.shape)
  109. print(websocket.param_dict_asr_online["is_final"])
  110. rec_result = inference_pipeline_asr_online(audio_in=audio_in, param_dict=websocket.param_dict_asr_online)
  111. if websocket.param_dict_asr_online["is_final"]:
  112. websocket.param_dict_asr_online["cache"] = dict()
  113. print(rec_result)
  114. if "text" in rec_result:
  115. if rec_result["text"] != "sil" and rec_result["text"] != "waiting_for_more_voice":
  116. message = json.dumps({"mode": "online", "text": rec_result["text"]})
  117. websocket.send_msg.put(message) # 存入发送队列 直接调用send发送不了
  118. time.sleep(0.005)
  119. start_server = websockets.serve(ws_serve, args.host, args.port, subprotocols=["binary"], ping_interval=None)
  120. asyncio.get_event_loop().run_until_complete(start_server)
  121. asyncio.get_event_loop().run_forever()