paraformer.cpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707
  1. /**
  2. * Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
  3. * MIT License (https://opensource.org/licenses/MIT)
  4. */
  5. #include "precomp.h"
  6. #include "paraformer.h"
  7. #include "encode_converter.h"
  8. #include <cstddef>
  9. using namespace std;
  10. namespace funasr {
  11. Paraformer::Paraformer()
  12. :use_hotword(false),
  13. env_(ORT_LOGGING_LEVEL_ERROR, "paraformer"),session_options_{},
  14. hw_env_(ORT_LOGGING_LEVEL_ERROR, "paraformer_hw"),hw_session_options{} {
  15. }
  16. // offline
  17. void Paraformer::InitAsr(const std::string &am_model, const std::string &am_cmvn, const std::string &am_config, int thread_num){
  18. LoadConfigFromYaml(am_config.c_str());
  19. // knf options
  20. fbank_opts_.frame_opts.dither = 0;
  21. fbank_opts_.mel_opts.num_bins = n_mels;
  22. fbank_opts_.frame_opts.samp_freq = asr_sample_rate;
  23. fbank_opts_.frame_opts.window_type = window_type;
  24. fbank_opts_.frame_opts.frame_shift_ms = frame_shift;
  25. fbank_opts_.frame_opts.frame_length_ms = frame_length;
  26. fbank_opts_.energy_floor = 0;
  27. fbank_opts_.mel_opts.debug_mel = false;
  28. // fbank_ = std::make_unique<knf::OnlineFbank>(fbank_opts);
  29. // session_options_.SetInterOpNumThreads(1);
  30. session_options_.SetIntraOpNumThreads(thread_num);
  31. session_options_.SetGraphOptimizationLevel(ORT_ENABLE_ALL);
  32. // DisableCpuMemArena can improve performance
  33. session_options_.DisableCpuMemArena();
  34. try {
  35. m_session_ = std::make_unique<Ort::Session>(env_, ORTSTRING(am_model).c_str(), session_options_);
  36. LOG(INFO) << "Successfully load model from " << am_model;
  37. } catch (std::exception const &e) {
  38. LOG(ERROR) << "Error when load am onnx model: " << e.what();
  39. exit(-1);
  40. }
  41. string strName;
  42. GetInputName(m_session_.get(), strName);
  43. m_strInputNames.push_back(strName.c_str());
  44. GetInputName(m_session_.get(), strName,1);
  45. m_strInputNames.push_back(strName);
  46. if (use_hotword) {
  47. GetInputName(m_session_.get(), strName, 2);
  48. m_strInputNames.push_back(strName);
  49. }
  50. size_t numOutputNodes = m_session_->GetOutputCount();
  51. for(int index=0; index<numOutputNodes; index++){
  52. GetOutputName(m_session_.get(), strName, index);
  53. m_strOutputNames.push_back(strName);
  54. }
  55. for (auto& item : m_strInputNames)
  56. m_szInputNames.push_back(item.c_str());
  57. for (auto& item : m_strOutputNames)
  58. m_szOutputNames.push_back(item.c_str());
  59. vocab = new Vocab(am_config.c_str());
  60. phone_set_ = new PhoneSet(am_config.c_str());
  61. LoadCmvn(am_cmvn.c_str());
  62. }
  63. // online
  64. void Paraformer::InitAsr(const std::string &en_model, const std::string &de_model, const std::string &am_cmvn, const std::string &am_config, int thread_num){
  65. LoadOnlineConfigFromYaml(am_config.c_str());
  66. // knf options
  67. fbank_opts_.frame_opts.dither = 0;
  68. fbank_opts_.mel_opts.num_bins = n_mels;
  69. fbank_opts_.frame_opts.samp_freq = asr_sample_rate;
  70. fbank_opts_.frame_opts.window_type = window_type;
  71. fbank_opts_.frame_opts.frame_shift_ms = frame_shift;
  72. fbank_opts_.frame_opts.frame_length_ms = frame_length;
  73. fbank_opts_.energy_floor = 0;
  74. fbank_opts_.mel_opts.debug_mel = false;
  75. // session_options_.SetInterOpNumThreads(1);
  76. session_options_.SetIntraOpNumThreads(thread_num);
  77. session_options_.SetGraphOptimizationLevel(ORT_ENABLE_ALL);
  78. // DisableCpuMemArena can improve performance
  79. session_options_.DisableCpuMemArena();
  80. try {
  81. encoder_session_ = std::make_unique<Ort::Session>(env_, ORTSTRING(en_model).c_str(), session_options_);
  82. LOG(INFO) << "Successfully load model from " << en_model;
  83. } catch (std::exception const &e) {
  84. LOG(ERROR) << "Error when load am encoder model: " << e.what();
  85. exit(-1);
  86. }
  87. try {
  88. decoder_session_ = std::make_unique<Ort::Session>(env_, ORTSTRING(de_model).c_str(), session_options_);
  89. LOG(INFO) << "Successfully load model from " << de_model;
  90. } catch (std::exception const &e) {
  91. LOG(ERROR) << "Error when load am decoder model: " << e.what();
  92. exit(-1);
  93. }
  94. // encoder
  95. string strName;
  96. GetInputName(encoder_session_.get(), strName);
  97. en_strInputNames.push_back(strName.c_str());
  98. GetInputName(encoder_session_.get(), strName,1);
  99. en_strInputNames.push_back(strName);
  100. GetOutputName(encoder_session_.get(), strName);
  101. en_strOutputNames.push_back(strName);
  102. GetOutputName(encoder_session_.get(), strName,1);
  103. en_strOutputNames.push_back(strName);
  104. GetOutputName(encoder_session_.get(), strName,2);
  105. en_strOutputNames.push_back(strName);
  106. for (auto& item : en_strInputNames)
  107. en_szInputNames_.push_back(item.c_str());
  108. for (auto& item : en_strOutputNames)
  109. en_szOutputNames_.push_back(item.c_str());
  110. // decoder
  111. int de_input_len = 4 + fsmn_layers;
  112. int de_out_len = 2 + fsmn_layers;
  113. for(int i=0;i<de_input_len; i++){
  114. GetInputName(decoder_session_.get(), strName, i);
  115. de_strInputNames.push_back(strName.c_str());
  116. }
  117. for(int i=0;i<de_out_len; i++){
  118. GetOutputName(decoder_session_.get(), strName,i);
  119. de_strOutputNames.push_back(strName);
  120. }
  121. for (auto& item : de_strInputNames)
  122. de_szInputNames_.push_back(item.c_str());
  123. for (auto& item : de_strOutputNames)
  124. de_szOutputNames_.push_back(item.c_str());
  125. vocab = new Vocab(am_config.c_str());
  126. phone_set_ = new PhoneSet(am_config.c_str());
  127. LoadCmvn(am_cmvn.c_str());
  128. }
  129. // 2pass
  130. void Paraformer::InitAsr(const std::string &am_model, const std::string &en_model, const std::string &de_model, const std::string &am_cmvn, const std::string &am_config, int thread_num){
  131. // online
  132. InitAsr(en_model, de_model, am_cmvn, am_config, thread_num);
  133. // offline
  134. try {
  135. m_session_ = std::make_unique<Ort::Session>(env_, ORTSTRING(am_model).c_str(), session_options_);
  136. LOG(INFO) << "Successfully load model from " << am_model;
  137. } catch (std::exception const &e) {
  138. LOG(ERROR) << "Error when load am onnx model: " << e.what();
  139. exit(-1);
  140. }
  141. string strName;
  142. GetInputName(m_session_.get(), strName);
  143. m_strInputNames.push_back(strName.c_str());
  144. GetInputName(m_session_.get(), strName,1);
  145. m_strInputNames.push_back(strName);
  146. if (use_hotword) {
  147. GetInputName(m_session_.get(), strName, 2);
  148. m_strInputNames.push_back(strName);
  149. }
  150. // support time stamp
  151. size_t numOutputNodes = m_session_->GetOutputCount();
  152. for(int index=0; index<numOutputNodes; index++){
  153. GetOutputName(m_session_.get(), strName, index);
  154. m_strOutputNames.push_back(strName);
  155. }
  156. for (auto& item : m_strInputNames)
  157. m_szInputNames.push_back(item.c_str());
  158. for (auto& item : m_strOutputNames)
  159. m_szOutputNames.push_back(item.c_str());
  160. }
  161. void Paraformer::InitLm(const std::string &lm_file,
  162. const std::string &lm_cfg_file,
  163. const std::string &lex_file) {
  164. try {
  165. lm_ = std::shared_ptr<fst::Fst<fst::StdArc>>(
  166. fst::Fst<fst::StdArc>::Read(lm_file));
  167. if (lm_){
  168. lm_vocab = new Vocab(lm_cfg_file.c_str(), lex_file.c_str());
  169. LOG(INFO) << "Successfully load lm file " << lm_file;
  170. }else{
  171. LOG(ERROR) << "Failed to load lm file " << lm_file;
  172. }
  173. } catch (std::exception const &e) {
  174. LOG(ERROR) << "Error when load lm file: " << e.what();
  175. exit(0);
  176. }
  177. }
  178. void Paraformer::LoadConfigFromYaml(const char* filename){
  179. YAML::Node config;
  180. try{
  181. config = YAML::LoadFile(filename);
  182. }catch(exception const &e){
  183. LOG(ERROR) << "Error loading file, yaml file error or not exist.";
  184. exit(-1);
  185. }
  186. try{
  187. YAML::Node frontend_conf = config["frontend_conf"];
  188. this->asr_sample_rate = frontend_conf["fs"].as<int>();
  189. YAML::Node lang_conf = config["lang"];
  190. if (lang_conf.IsDefined()){
  191. language = lang_conf.as<string>();
  192. }
  193. }catch(exception const &e){
  194. LOG(ERROR) << "Error when load argument from vad config YAML.";
  195. exit(-1);
  196. }
  197. }
  198. void Paraformer::LoadOnlineConfigFromYaml(const char* filename){
  199. YAML::Node config;
  200. try{
  201. config = YAML::LoadFile(filename);
  202. }catch(exception const &e){
  203. LOG(ERROR) << "Error loading file, yaml file error or not exist.";
  204. exit(-1);
  205. }
  206. try{
  207. YAML::Node frontend_conf = config["frontend_conf"];
  208. YAML::Node encoder_conf = config["encoder_conf"];
  209. YAML::Node decoder_conf = config["decoder_conf"];
  210. YAML::Node predictor_conf = config["predictor_conf"];
  211. this->window_type = frontend_conf["window"].as<string>();
  212. this->n_mels = frontend_conf["n_mels"].as<int>();
  213. this->frame_length = frontend_conf["frame_length"].as<int>();
  214. this->frame_shift = frontend_conf["frame_shift"].as<int>();
  215. this->lfr_m = frontend_conf["lfr_m"].as<int>();
  216. this->lfr_n = frontend_conf["lfr_n"].as<int>();
  217. this->encoder_size = encoder_conf["output_size"].as<int>();
  218. this->fsmn_dims = encoder_conf["output_size"].as<int>();
  219. this->fsmn_layers = decoder_conf["num_blocks"].as<int>();
  220. this->fsmn_lorder = decoder_conf["kernel_size"].as<int>()-1;
  221. this->cif_threshold = predictor_conf["threshold"].as<double>();
  222. this->tail_alphas = predictor_conf["tail_threshold"].as<double>();
  223. this->asr_sample_rate = frontend_conf["fs"].as<int>();
  224. }catch(exception const &e){
  225. LOG(ERROR) << "Error when load argument from vad config YAML.";
  226. exit(-1);
  227. }
  228. }
  229. void Paraformer::InitHwCompiler(const std::string &hw_model, int thread_num) {
  230. hw_session_options.SetIntraOpNumThreads(thread_num);
  231. hw_session_options.SetGraphOptimizationLevel(ORT_ENABLE_ALL);
  232. // DisableCpuMemArena can improve performance
  233. hw_session_options.DisableCpuMemArena();
  234. try {
  235. hw_m_session = std::make_unique<Ort::Session>(hw_env_, ORTSTRING(hw_model).c_str(), hw_session_options);
  236. LOG(INFO) << "Successfully load model from " << hw_model;
  237. } catch (std::exception const &e) {
  238. LOG(ERROR) << "Error when load hw compiler onnx model: " << e.what();
  239. exit(-1);
  240. }
  241. string strName;
  242. GetInputName(hw_m_session.get(), strName);
  243. hw_m_strInputNames.push_back(strName.c_str());
  244. //GetInputName(hw_m_session.get(), strName,1);
  245. //hw_m_strInputNames.push_back(strName);
  246. GetOutputName(hw_m_session.get(), strName);
  247. hw_m_strOutputNames.push_back(strName);
  248. for (auto& item : hw_m_strInputNames)
  249. hw_m_szInputNames.push_back(item.c_str());
  250. for (auto& item : hw_m_strOutputNames)
  251. hw_m_szOutputNames.push_back(item.c_str());
  252. // if init hotword compiler is called, this is a hotword paraformer model
  253. use_hotword = true;
  254. }
  255. void Paraformer::InitSegDict(const std::string &seg_dict_model) {
  256. seg_dict = new SegDict(seg_dict_model.c_str());
  257. }
  258. Paraformer::~Paraformer()
  259. {
  260. if(vocab){
  261. delete vocab;
  262. }
  263. if(lm_vocab){
  264. delete lm_vocab;
  265. }
  266. if(seg_dict){
  267. delete seg_dict;
  268. }
  269. if(phone_set_){
  270. delete phone_set_;
  271. }
  272. }
  273. void Paraformer::StartUtterance()
  274. {
  275. }
  276. void Paraformer::EndUtterance()
  277. {
  278. }
  279. void Paraformer::Reset()
  280. {
  281. }
  282. void Paraformer::FbankKaldi(float sample_rate, const float* waves, int len, std::vector<std::vector<float>> &asr_feats) {
  283. knf::OnlineFbank fbank_(fbank_opts_);
  284. std::vector<float> buf(len);
  285. for (int32_t i = 0; i != len; ++i) {
  286. buf[i] = waves[i] * 32768;
  287. }
  288. fbank_.AcceptWaveform(sample_rate, buf.data(), buf.size());
  289. int32_t frames = fbank_.NumFramesReady();
  290. for (int32_t i = 0; i != frames; ++i) {
  291. const float *frame = fbank_.GetFrame(i);
  292. std::vector<float> frame_vector(frame, frame + fbank_opts_.mel_opts.num_bins);
  293. asr_feats.emplace_back(frame_vector);
  294. }
  295. }
  296. void Paraformer::LoadCmvn(const char *filename)
  297. {
  298. ifstream cmvn_stream(filename);
  299. if (!cmvn_stream.is_open()) {
  300. LOG(ERROR) << "Failed to open file: " << filename;
  301. exit(-1);
  302. }
  303. string line;
  304. while (getline(cmvn_stream, line)) {
  305. istringstream iss(line);
  306. vector<string> line_item{istream_iterator<string>{iss}, istream_iterator<string>{}};
  307. if (line_item[0] == "<AddShift>") {
  308. getline(cmvn_stream, line);
  309. istringstream means_lines_stream(line);
  310. vector<string> means_lines{istream_iterator<string>{means_lines_stream}, istream_iterator<string>{}};
  311. if (means_lines[0] == "<LearnRateCoef>") {
  312. for (int j = 3; j < means_lines.size() - 1; j++) {
  313. means_list_.push_back(stof(means_lines[j]));
  314. }
  315. continue;
  316. }
  317. }
  318. else if (line_item[0] == "<Rescale>") {
  319. getline(cmvn_stream, line);
  320. istringstream vars_lines_stream(line);
  321. vector<string> vars_lines{istream_iterator<string>{vars_lines_stream}, istream_iterator<string>{}};
  322. if (vars_lines[0] == "<LearnRateCoef>") {
  323. for (int j = 3; j < vars_lines.size() - 1; j++) {
  324. vars_list_.push_back(stof(vars_lines[j])*scale);
  325. }
  326. continue;
  327. }
  328. }
  329. }
  330. }
  331. string Paraformer::GreedySearch(float * in, int n_len, int64_t token_nums, bool is_stamp, std::vector<float> us_alphas, std::vector<float> us_cif_peak)
  332. {
  333. vector<int> hyps;
  334. int Tmax = n_len;
  335. for (int i = 0; i < Tmax; i++) {
  336. int max_idx;
  337. float max_val;
  338. FindMax(in + i * token_nums, token_nums, max_val, max_idx);
  339. hyps.push_back(max_idx);
  340. }
  341. if(!is_stamp){
  342. return vocab->Vector2StringV2(hyps, language);
  343. }else{
  344. std::vector<string> char_list;
  345. std::vector<std::vector<float>> timestamp_list;
  346. std::string res_str;
  347. vocab->Vector2String(hyps, char_list);
  348. std::vector<string> raw_char(char_list);
  349. TimestampOnnx(us_alphas, us_cif_peak, char_list, res_str, timestamp_list);
  350. return PostProcess(raw_char, timestamp_list);
  351. }
  352. }
  353. string Paraformer::BeamSearch(WfstDecoder* &wfst_decoder, float *in, int len, int64_t token_nums)
  354. {
  355. return wfst_decoder->Search(in, len, token_nums);
  356. }
  357. string Paraformer::FinalizeDecode(WfstDecoder* &wfst_decoder,
  358. bool is_stamp, std::vector<float> us_alphas, std::vector<float> us_cif_peak)
  359. {
  360. return wfst_decoder->FinalizeDecode(is_stamp, us_alphas, us_cif_peak);
  361. }
  362. void Paraformer::LfrCmvn(std::vector<std::vector<float>> &asr_feats) {
  363. std::vector<std::vector<float>> out_feats;
  364. int T = asr_feats.size();
  365. int T_lrf = ceil(1.0 * T / lfr_n);
  366. // Pad frames at start(copy first frame)
  367. for (int i = 0; i < (lfr_m - 1) / 2; i++) {
  368. asr_feats.insert(asr_feats.begin(), asr_feats[0]);
  369. }
  370. // Merge lfr_m frames as one,lfr_n frames per window
  371. T = T + (lfr_m - 1) / 2;
  372. std::vector<float> p;
  373. for (int i = 0; i < T_lrf; i++) {
  374. if (lfr_m <= T - i * lfr_n) {
  375. for (int j = 0; j < lfr_m; j++) {
  376. p.insert(p.end(), asr_feats[i * lfr_n + j].begin(), asr_feats[i * lfr_n + j].end());
  377. }
  378. out_feats.emplace_back(p);
  379. p.clear();
  380. } else {
  381. // Fill to lfr_m frames at last window if less than lfr_m frames (copy last frame)
  382. int num_padding = lfr_m - (T - i * lfr_n);
  383. for (int j = 0; j < (asr_feats.size() - i * lfr_n); j++) {
  384. p.insert(p.end(), asr_feats[i * lfr_n + j].begin(), asr_feats[i * lfr_n + j].end());
  385. }
  386. for (int j = 0; j < num_padding; j++) {
  387. p.insert(p.end(), asr_feats[asr_feats.size() - 1].begin(), asr_feats[asr_feats.size() - 1].end());
  388. }
  389. out_feats.emplace_back(p);
  390. p.clear();
  391. }
  392. }
  393. // Apply cmvn
  394. for (auto &out_feat: out_feats) {
  395. for (int j = 0; j < means_list_.size(); j++) {
  396. out_feat[j] = (out_feat[j] + means_list_[j]) * vars_list_[j];
  397. }
  398. }
  399. asr_feats = out_feats;
  400. }
  401. string Paraformer::Forward(float* din, int len, bool input_finished, const std::vector<std::vector<float>> &hw_emb, void* decoder_handle)
  402. {
  403. WfstDecoder* wfst_decoder = (WfstDecoder*)decoder_handle;
  404. int32_t in_feat_dim = fbank_opts_.mel_opts.num_bins;
  405. std::vector<std::vector<float>> asr_feats;
  406. FbankKaldi(asr_sample_rate, din, len, asr_feats);
  407. if(asr_feats.size() == 0){
  408. return "";
  409. }
  410. LfrCmvn(asr_feats);
  411. int32_t feat_dim = lfr_m*in_feat_dim;
  412. int32_t num_frames = asr_feats.size();
  413. std::vector<float> wav_feats;
  414. for (const auto &frame_feat: asr_feats) {
  415. wav_feats.insert(wav_feats.end(), frame_feat.begin(), frame_feat.end());
  416. }
  417. #ifdef _WIN_X86
  418. Ort::MemoryInfo m_memoryInfo = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
  419. #else
  420. Ort::MemoryInfo m_memoryInfo = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
  421. #endif
  422. const int64_t input_shape_[3] = {1, num_frames, feat_dim};
  423. Ort::Value onnx_feats = Ort::Value::CreateTensor<float>(m_memoryInfo,
  424. wav_feats.data(),
  425. wav_feats.size(),
  426. input_shape_,
  427. 3);
  428. const int64_t paraformer_length_shape[1] = {1};
  429. std::vector<int32_t> paraformer_length;
  430. paraformer_length.emplace_back(num_frames);
  431. Ort::Value onnx_feats_len = Ort::Value::CreateTensor<int32_t>(
  432. m_memoryInfo, paraformer_length.data(), paraformer_length.size(), paraformer_length_shape, 1);
  433. std::vector<Ort::Value> input_onnx;
  434. input_onnx.emplace_back(std::move(onnx_feats));
  435. input_onnx.emplace_back(std::move(onnx_feats_len));
  436. std::vector<float> embedding;
  437. try{
  438. if (use_hotword) {
  439. if(hw_emb.size()<=0){
  440. LOG(ERROR) << "hw_emb is null";
  441. return "";
  442. }
  443. //PrintMat(hw_emb, "input_clas_emb");
  444. const int64_t hotword_shape[3] = {1, static_cast<int64_t>(hw_emb.size()), static_cast<int64_t>(hw_emb[0].size())};
  445. embedding.reserve(hw_emb.size() * hw_emb[0].size());
  446. for (auto item : hw_emb) {
  447. embedding.insert(embedding.end(), item.begin(), item.end());
  448. }
  449. //LOG(INFO) << "hotword shape " << hotword_shape[0] << " " << hotword_shape[1] << " " << hotword_shape[2] << " size " << embedding.size();
  450. Ort::Value onnx_hw_emb = Ort::Value::CreateTensor<float>(
  451. m_memoryInfo, embedding.data(), embedding.size(), hotword_shape, 3);
  452. input_onnx.emplace_back(std::move(onnx_hw_emb));
  453. }
  454. }catch (std::exception const &e)
  455. {
  456. LOG(ERROR)<<e.what();
  457. return "";
  458. }
  459. string result="";
  460. try {
  461. auto outputTensor = m_session_->Run(Ort::RunOptions{nullptr}, m_szInputNames.data(), input_onnx.data(), input_onnx.size(), m_szOutputNames.data(), m_szOutputNames.size());
  462. std::vector<int64_t> outputShape = outputTensor[0].GetTensorTypeAndShapeInfo().GetShape();
  463. //LOG(INFO) << "paraformer out shape " << outputShape[0] << " " << outputShape[1] << " " << outputShape[2];
  464. int64_t outputCount = std::accumulate(outputShape.begin(), outputShape.end(), 1, std::multiplies<int64_t>());
  465. float* floatData = outputTensor[0].GetTensorMutableData<float>();
  466. auto encoder_out_lens = outputTensor[1].GetTensorMutableData<int64_t>();
  467. // timestamp
  468. if(outputTensor.size() == 4){
  469. std::vector<int64_t> us_alphas_shape = outputTensor[2].GetTensorTypeAndShapeInfo().GetShape();
  470. float* us_alphas_data = outputTensor[2].GetTensorMutableData<float>();
  471. std::vector<float> us_alphas(us_alphas_shape[1]);
  472. for (int i = 0; i < us_alphas_shape[1]; i++) {
  473. us_alphas[i] = us_alphas_data[i];
  474. }
  475. std::vector<int64_t> us_peaks_shape = outputTensor[3].GetTensorTypeAndShapeInfo().GetShape();
  476. float* us_peaks_data = outputTensor[3].GetTensorMutableData<float>();
  477. std::vector<float> us_peaks(us_peaks_shape[1]);
  478. for (int i = 0; i < us_peaks_shape[1]; i++) {
  479. us_peaks[i] = us_peaks_data[i];
  480. }
  481. if (lm_ == nullptr) {
  482. result = GreedySearch(floatData, *encoder_out_lens, outputShape[2], true, us_alphas, us_peaks);
  483. } else {
  484. result = BeamSearch(wfst_decoder, floatData, *encoder_out_lens, outputShape[2]);
  485. if (input_finished) {
  486. result = FinalizeDecode(wfst_decoder, true, us_alphas, us_peaks);
  487. }
  488. }
  489. }else{
  490. if (lm_ == nullptr) {
  491. result = GreedySearch(floatData, *encoder_out_lens, outputShape[2]);
  492. } else {
  493. result = BeamSearch(wfst_decoder, floatData, *encoder_out_lens, outputShape[2]);
  494. if (input_finished) {
  495. result = FinalizeDecode(wfst_decoder);
  496. }
  497. }
  498. }
  499. }
  500. catch (std::exception const &e)
  501. {
  502. LOG(ERROR)<<e.what();
  503. }
  504. return result;
  505. }
  506. std::vector<std::vector<float>> Paraformer::CompileHotwordEmbedding(std::string &hotwords) {
  507. int embedding_dim = encoder_size;
  508. std::vector<std::vector<float>> hw_emb;
  509. if (!use_hotword) {
  510. std::vector<float> vec(embedding_dim, 0);
  511. hw_emb.push_back(vec);
  512. return hw_emb;
  513. }
  514. int max_hotword_len = 10;
  515. std::vector<int32_t> hotword_matrix;
  516. std::vector<int32_t> lengths;
  517. int hotword_size = 1;
  518. int real_hw_size = 0;
  519. if (!hotwords.empty()) {
  520. std::vector<std::string> hotword_array = split(hotwords, ' ');
  521. hotword_size = hotword_array.size() + 1;
  522. hotword_matrix.reserve(hotword_size * max_hotword_len);
  523. for (auto hotword : hotword_array) {
  524. std::vector<std::string> chars;
  525. if (EncodeConverter::IsAllChineseCharactor((const U8CHAR_T*)hotword.c_str(), hotword.size())) {
  526. KeepChineseCharacterAndSplit(hotword, chars);
  527. } else {
  528. // for english
  529. std::vector<std::string> words = split(hotword, ' ');
  530. for (auto word : words) {
  531. std::vector<string> tokens = seg_dict->GetTokensByWord(word);
  532. chars.insert(chars.end(), tokens.begin(), tokens.end());
  533. }
  534. }
  535. if(chars.size()==0){
  536. continue;
  537. }
  538. std::vector<int32_t> hw_vector(max_hotword_len, 0);
  539. int vector_len = std::min(max_hotword_len, (int)chars.size());
  540. int chs_oov = false;
  541. for (int i=0; i<vector_len; i++) {
  542. hw_vector[i] = phone_set_->String2Id(chars[i]);
  543. if(hw_vector[i] == -1){
  544. chs_oov = true;
  545. break;
  546. }
  547. }
  548. if(chs_oov){
  549. LOG(INFO) << "OOV: " << hotword;
  550. continue;
  551. }
  552. LOG(INFO) << hotword;
  553. lengths.push_back(vector_len);
  554. real_hw_size += 1;
  555. hotword_matrix.insert(hotword_matrix.end(), hw_vector.begin(), hw_vector.end());
  556. }
  557. hotword_size = real_hw_size + 1;
  558. }
  559. std::vector<int32_t> blank_vec(max_hotword_len, 0);
  560. blank_vec[0] = 1;
  561. hotword_matrix.insert(hotword_matrix.end(), blank_vec.begin(), blank_vec.end());
  562. lengths.push_back(1);
  563. #ifdef _WIN_X86
  564. Ort::MemoryInfo m_memoryInfo = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
  565. #else
  566. Ort::MemoryInfo m_memoryInfo = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
  567. #endif
  568. const int64_t input_shape_[2] = {hotword_size, max_hotword_len};
  569. Ort::Value onnx_hotword = Ort::Value::CreateTensor<int32_t>(m_memoryInfo,
  570. (int32_t*)hotword_matrix.data(),
  571. hotword_size * max_hotword_len,
  572. input_shape_,
  573. 2);
  574. LOG(INFO) << "clas shape " << hotword_size << " " << max_hotword_len << std::endl;
  575. std::vector<Ort::Value> input_onnx;
  576. input_onnx.emplace_back(std::move(onnx_hotword));
  577. std::vector<std::vector<float>> result;
  578. try {
  579. auto outputTensor = hw_m_session->Run(Ort::RunOptions{nullptr}, hw_m_szInputNames.data(), input_onnx.data(), input_onnx.size(), hw_m_szOutputNames.data(), hw_m_szOutputNames.size());
  580. std::vector<int64_t> outputShape = outputTensor[0].GetTensorTypeAndShapeInfo().GetShape();
  581. int64_t outputCount = std::accumulate(outputShape.begin(), outputShape.end(), 1, std::multiplies<int64_t>());
  582. float* floatData = outputTensor[0].GetTensorMutableData<float>(); // shape [max_hotword_len, hotword_size, dim]
  583. // get embedding by real hotword length
  584. assert(outputShape[0] == max_hotword_len);
  585. assert(outputShape[1] == hotword_size);
  586. embedding_dim = outputShape[2];
  587. for (int j = 0; j < hotword_size; j++)
  588. {
  589. int start_pos = hotword_size * (lengths[j] - 1) * embedding_dim + j * embedding_dim;
  590. std::vector<float> embedding;
  591. embedding.insert(embedding.begin(), floatData + start_pos, floatData + start_pos + embedding_dim);
  592. result.push_back(embedding);
  593. }
  594. }
  595. catch (std::exception const &e)
  596. {
  597. LOG(ERROR)<<e.what();
  598. }
  599. //PrintMat(result, "clas_embedding_output");
  600. return result;
  601. }
  602. Vocab* Paraformer::GetVocab()
  603. {
  604. return vocab;
  605. }
  606. Vocab* Paraformer::GetLmVocab()
  607. {
  608. return lm_vocab;
  609. }
  610. PhoneSet* Paraformer::GetPhoneSet()
  611. {
  612. return phone_set_;
  613. }
  614. string Paraformer::Rescoring()
  615. {
  616. LOG(ERROR)<<"Not Imp!!!!!!";
  617. return "";
  618. }
  619. } // namespace funasr