paraformer.cpp 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810
  1. /**
  2. * Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
  3. * MIT License (https://opensource.org/licenses/MIT)
  4. */
  5. #include "precomp.h"
  6. #include "paraformer.h"
  7. #include "encode_converter.h"
  8. #include <cstddef>
  9. using namespace std;
  10. namespace funasr {
  11. Paraformer::Paraformer()
  12. :use_hotword(false),
  13. env_(ORT_LOGGING_LEVEL_ERROR, "paraformer"),session_options_{},
  14. hw_env_(ORT_LOGGING_LEVEL_ERROR, "paraformer_hw"),hw_session_options{} {
  15. }
  16. // offline
  17. void Paraformer::InitAsr(const std::string &am_model, const std::string &am_cmvn, const std::string &am_config, int thread_num){
  18. // knf options
  19. fbank_opts_.frame_opts.dither = 0;
  20. fbank_opts_.mel_opts.num_bins = n_mels;
  21. fbank_opts_.frame_opts.samp_freq = MODEL_SAMPLE_RATE;
  22. fbank_opts_.frame_opts.window_type = window_type;
  23. fbank_opts_.frame_opts.frame_shift_ms = frame_shift;
  24. fbank_opts_.frame_opts.frame_length_ms = frame_length;
  25. fbank_opts_.energy_floor = 0;
  26. fbank_opts_.mel_opts.debug_mel = false;
  27. // fbank_ = std::make_unique<knf::OnlineFbank>(fbank_opts);
  28. // session_options_.SetInterOpNumThreads(1);
  29. session_options_.SetIntraOpNumThreads(thread_num);
  30. session_options_.SetGraphOptimizationLevel(ORT_ENABLE_ALL);
  31. // DisableCpuMemArena can improve performance
  32. session_options_.DisableCpuMemArena();
  33. try {
  34. m_session_ = std::make_unique<Ort::Session>(env_, am_model.c_str(), session_options_);
  35. LOG(INFO) << "Successfully load model from " << am_model;
  36. } catch (std::exception const &e) {
  37. LOG(ERROR) << "Error when load am onnx model: " << e.what();
  38. exit(0);
  39. }
  40. string strName;
  41. GetInputName(m_session_.get(), strName);
  42. m_strInputNames.push_back(strName.c_str());
  43. GetInputName(m_session_.get(), strName,1);
  44. m_strInputNames.push_back(strName);
  45. if (use_hotword) {
  46. GetInputName(m_session_.get(), strName, 2);
  47. m_strInputNames.push_back(strName);
  48. }
  49. size_t numOutputNodes = m_session_->GetOutputCount();
  50. for(int index=0; index<numOutputNodes; index++){
  51. GetOutputName(m_session_.get(), strName, index);
  52. m_strOutputNames.push_back(strName);
  53. }
  54. for (auto& item : m_strInputNames)
  55. m_szInputNames.push_back(item.c_str());
  56. for (auto& item : m_strOutputNames)
  57. m_szOutputNames.push_back(item.c_str());
  58. vocab = new Vocab(am_config.c_str());
  59. LoadCmvn(am_cmvn.c_str());
  60. }
  61. // online
  62. void Paraformer::InitAsr(const std::string &en_model, const std::string &de_model, const std::string &am_cmvn, const std::string &am_config, int thread_num){
  63. LoadOnlineConfigFromYaml(am_config.c_str());
  64. // knf options
  65. fbank_opts_.frame_opts.dither = 0;
  66. fbank_opts_.mel_opts.num_bins = n_mels;
  67. fbank_opts_.frame_opts.samp_freq = MODEL_SAMPLE_RATE;
  68. fbank_opts_.frame_opts.window_type = window_type;
  69. fbank_opts_.frame_opts.frame_shift_ms = frame_shift;
  70. fbank_opts_.frame_opts.frame_length_ms = frame_length;
  71. fbank_opts_.energy_floor = 0;
  72. fbank_opts_.mel_opts.debug_mel = false;
  73. // session_options_.SetInterOpNumThreads(1);
  74. session_options_.SetIntraOpNumThreads(thread_num);
  75. session_options_.SetGraphOptimizationLevel(ORT_ENABLE_ALL);
  76. // DisableCpuMemArena can improve performance
  77. session_options_.DisableCpuMemArena();
  78. try {
  79. encoder_session_ = std::make_unique<Ort::Session>(env_, en_model.c_str(), session_options_);
  80. LOG(INFO) << "Successfully load model from " << en_model;
  81. } catch (std::exception const &e) {
  82. LOG(ERROR) << "Error when load am encoder model: " << e.what();
  83. exit(0);
  84. }
  85. try {
  86. decoder_session_ = std::make_unique<Ort::Session>(env_, de_model.c_str(), session_options_);
  87. LOG(INFO) << "Successfully load model from " << de_model;
  88. } catch (std::exception const &e) {
  89. LOG(ERROR) << "Error when load am decoder model: " << e.what();
  90. exit(0);
  91. }
  92. // encoder
  93. string strName;
  94. GetInputName(encoder_session_.get(), strName);
  95. en_strInputNames.push_back(strName.c_str());
  96. GetInputName(encoder_session_.get(), strName,1);
  97. en_strInputNames.push_back(strName);
  98. GetOutputName(encoder_session_.get(), strName);
  99. en_strOutputNames.push_back(strName);
  100. GetOutputName(encoder_session_.get(), strName,1);
  101. en_strOutputNames.push_back(strName);
  102. GetOutputName(encoder_session_.get(), strName,2);
  103. en_strOutputNames.push_back(strName);
  104. for (auto& item : en_strInputNames)
  105. en_szInputNames_.push_back(item.c_str());
  106. for (auto& item : en_strOutputNames)
  107. en_szOutputNames_.push_back(item.c_str());
  108. // decoder
  109. int de_input_len = 4 + fsmn_layers;
  110. int de_out_len = 2 + fsmn_layers;
  111. for(int i=0;i<de_input_len; i++){
  112. GetInputName(decoder_session_.get(), strName, i);
  113. de_strInputNames.push_back(strName.c_str());
  114. }
  115. for(int i=0;i<de_out_len; i++){
  116. GetOutputName(decoder_session_.get(), strName,i);
  117. de_strOutputNames.push_back(strName);
  118. }
  119. for (auto& item : de_strInputNames)
  120. de_szInputNames_.push_back(item.c_str());
  121. for (auto& item : de_strOutputNames)
  122. de_szOutputNames_.push_back(item.c_str());
  123. vocab = new Vocab(am_config.c_str());
  124. LoadCmvn(am_cmvn.c_str());
  125. }
  126. // 2pass
  127. void Paraformer::InitAsr(const std::string &am_model, const std::string &en_model, const std::string &de_model, const std::string &am_cmvn, const std::string &am_config, int thread_num){
  128. // online
  129. InitAsr(en_model, de_model, am_cmvn, am_config, thread_num);
  130. // offline
  131. try {
  132. m_session_ = std::make_unique<Ort::Session>(env_, am_model.c_str(), session_options_);
  133. LOG(INFO) << "Successfully load model from " << am_model;
  134. } catch (std::exception const &e) {
  135. LOG(ERROR) << "Error when load am onnx model: " << e.what();
  136. exit(0);
  137. }
  138. string strName;
  139. GetInputName(m_session_.get(), strName);
  140. m_strInputNames.push_back(strName.c_str());
  141. GetInputName(m_session_.get(), strName,1);
  142. m_strInputNames.push_back(strName);
  143. GetOutputName(m_session_.get(), strName);
  144. m_strOutputNames.push_back(strName);
  145. GetOutputName(m_session_.get(), strName,1);
  146. m_strOutputNames.push_back(strName);
  147. for (auto& item : m_strInputNames)
  148. m_szInputNames.push_back(item.c_str());
  149. for (auto& item : m_strOutputNames)
  150. m_szOutputNames.push_back(item.c_str());
  151. }
  152. void Paraformer::LoadOnlineConfigFromYaml(const char* filename){
  153. YAML::Node config;
  154. try{
  155. config = YAML::LoadFile(filename);
  156. }catch(exception const &e){
  157. LOG(ERROR) << "Error loading file, yaml file error or not exist.";
  158. exit(-1);
  159. }
  160. try{
  161. YAML::Node frontend_conf = config["frontend_conf"];
  162. YAML::Node encoder_conf = config["encoder_conf"];
  163. YAML::Node decoder_conf = config["decoder_conf"];
  164. YAML::Node predictor_conf = config["predictor_conf"];
  165. this->window_type = frontend_conf["window"].as<string>();
  166. this->n_mels = frontend_conf["n_mels"].as<int>();
  167. this->frame_length = frontend_conf["frame_length"].as<int>();
  168. this->frame_shift = frontend_conf["frame_shift"].as<int>();
  169. this->lfr_m = frontend_conf["lfr_m"].as<int>();
  170. this->lfr_n = frontend_conf["lfr_n"].as<int>();
  171. this->encoder_size = encoder_conf["output_size"].as<int>();
  172. this->fsmn_dims = encoder_conf["output_size"].as<int>();
  173. this->fsmn_layers = decoder_conf["num_blocks"].as<int>();
  174. this->fsmn_lorder = decoder_conf["kernel_size"].as<int>()-1;
  175. this->cif_threshold = predictor_conf["threshold"].as<double>();
  176. this->tail_alphas = predictor_conf["tail_threshold"].as<double>();
  177. }catch(exception const &e){
  178. LOG(ERROR) << "Error when load argument from vad config YAML.";
  179. exit(-1);
  180. }
  181. }
  182. void Paraformer::InitHwCompiler(const std::string &hw_model, int thread_num) {
  183. hw_session_options.SetIntraOpNumThreads(thread_num);
  184. hw_session_options.SetGraphOptimizationLevel(ORT_ENABLE_ALL);
  185. // DisableCpuMemArena can improve performance
  186. hw_session_options.DisableCpuMemArena();
  187. try {
  188. hw_m_session = std::make_unique<Ort::Session>(hw_env_, hw_model.c_str(), hw_session_options);
  189. LOG(INFO) << "Successfully load model from " << hw_model;
  190. } catch (std::exception const &e) {
  191. LOG(ERROR) << "Error when load hw compiler onnx model: " << e.what();
  192. exit(0);
  193. }
  194. string strName;
  195. GetInputName(hw_m_session.get(), strName);
  196. hw_m_strInputNames.push_back(strName.c_str());
  197. //GetInputName(hw_m_session.get(), strName,1);
  198. //hw_m_strInputNames.push_back(strName);
  199. GetOutputName(hw_m_session.get(), strName);
  200. hw_m_strOutputNames.push_back(strName);
  201. for (auto& item : hw_m_strInputNames)
  202. hw_m_szInputNames.push_back(item.c_str());
  203. for (auto& item : hw_m_strOutputNames)
  204. hw_m_szOutputNames.push_back(item.c_str());
  205. // if init hotword compiler is called, this is a hotword paraformer model
  206. use_hotword = true;
  207. }
  208. void Paraformer::InitSegDict(const std::string &seg_dict_model) {
  209. seg_dict = new SegDict(seg_dict_model.c_str());
  210. }
  211. Paraformer::~Paraformer()
  212. {
  213. if(vocab)
  214. delete vocab;
  215. if(seg_dict)
  216. delete seg_dict;
  217. }
  218. void Paraformer::Reset()
  219. {
  220. }
  221. vector<float> Paraformer::FbankKaldi(float sample_rate, const float* waves, int len) {
  222. knf::OnlineFbank fbank_(fbank_opts_);
  223. std::vector<float> buf(len);
  224. for (int32_t i = 0; i != len; ++i) {
  225. buf[i] = waves[i] * 32768;
  226. }
  227. fbank_.AcceptWaveform(sample_rate, buf.data(), buf.size());
  228. //fbank_->InputFinished();
  229. int32_t frames = fbank_.NumFramesReady();
  230. int32_t feature_dim = fbank_opts_.mel_opts.num_bins;
  231. vector<float> features(frames * feature_dim);
  232. float *p = features.data();
  233. //std::cout << "samples " << len << std::endl;
  234. //std::cout << "fbank frames " << frames << std::endl;
  235. //std::cout << "fbank dim " << feature_dim << std::endl;
  236. //std::cout << "feature size " << features.size() << std::endl;
  237. for (int32_t i = 0; i != frames; ++i) {
  238. const float *f = fbank_.GetFrame(i);
  239. std::copy(f, f + feature_dim, p);
  240. p += feature_dim;
  241. }
  242. return features;
  243. }
  244. void Paraformer::LoadCmvn(const char *filename)
  245. {
  246. ifstream cmvn_stream(filename);
  247. if (!cmvn_stream.is_open()) {
  248. LOG(ERROR) << "Failed to open file: " << filename;
  249. exit(0);
  250. }
  251. string line;
  252. while (getline(cmvn_stream, line)) {
  253. istringstream iss(line);
  254. vector<string> line_item{istream_iterator<string>{iss}, istream_iterator<string>{}};
  255. if (line_item[0] == "<AddShift>") {
  256. getline(cmvn_stream, line);
  257. istringstream means_lines_stream(line);
  258. vector<string> means_lines{istream_iterator<string>{means_lines_stream}, istream_iterator<string>{}};
  259. if (means_lines[0] == "<LearnRateCoef>") {
  260. for (int j = 3; j < means_lines.size() - 1; j++) {
  261. means_list_.push_back(stof(means_lines[j]));
  262. }
  263. continue;
  264. }
  265. }
  266. else if (line_item[0] == "<Rescale>") {
  267. getline(cmvn_stream, line);
  268. istringstream vars_lines_stream(line);
  269. vector<string> vars_lines{istream_iterator<string>{vars_lines_stream}, istream_iterator<string>{}};
  270. if (vars_lines[0] == "<LearnRateCoef>") {
  271. for (int j = 3; j < vars_lines.size() - 1; j++) {
  272. vars_list_.push_back(stof(vars_lines[j])*scale);
  273. }
  274. continue;
  275. }
  276. }
  277. }
  278. }
  279. string Paraformer::GreedySearch(float * in, int n_len, int64_t token_nums, bool is_stamp, std::vector<float> us_alphas, std::vector<float> us_cif_peak)
  280. {
  281. vector<int> hyps;
  282. int Tmax = n_len;
  283. for (int i = 0; i < Tmax; i++) {
  284. int max_idx;
  285. float max_val;
  286. FindMax(in + i * token_nums, token_nums, max_val, max_idx);
  287. hyps.push_back(max_idx);
  288. }
  289. if(!is_stamp){
  290. return vocab->Vector2StringV2(hyps);
  291. }else{
  292. std::vector<string> char_list;
  293. std::vector<std::vector<float>> timestamp_list;
  294. std::string res_str;
  295. vocab->Vector2String(hyps, char_list);
  296. std::vector<string> raw_char(char_list);
  297. TimestampOnnx(us_alphas, us_cif_peak, char_list, res_str, timestamp_list);
  298. return PostProcess(raw_char, timestamp_list);
  299. }
  300. }
  301. string Paraformer::PostProcess(std::vector<string> &raw_char, std::vector<std::vector<float>> &timestamp_list){
  302. std::vector<std::vector<float>> timestamp_merge;
  303. int i;
  304. list<string> words;
  305. int is_pre_english = false;
  306. int pre_english_len = 0;
  307. int is_combining = false;
  308. string combine = "";
  309. float begin=-1;
  310. for (i=0; i<raw_char.size(); i++){
  311. string word = raw_char[i];
  312. // step1 space character skips
  313. if (word == "<s>" || word == "</s>" || word == "<unk>")
  314. continue;
  315. // step2 combie phoneme to full word
  316. {
  317. int sub_word = !(word.find("@@") == string::npos);
  318. // process word start and middle part
  319. if (sub_word) {
  320. combine += word.erase(word.length() - 2);
  321. if(!is_combining){
  322. begin = timestamp_list[i][0];
  323. }
  324. is_combining = true;
  325. continue;
  326. }
  327. // process word end part
  328. else if (is_combining) {
  329. combine += word;
  330. is_combining = false;
  331. word = combine;
  332. combine = "";
  333. }
  334. }
  335. // step3 process english word deal with space , turn abbreviation to upper case
  336. {
  337. // input word is chinese, not need process
  338. if (vocab->IsChinese(word)) {
  339. words.push_back(word);
  340. timestamp_merge.emplace_back(timestamp_list[i]);
  341. is_pre_english = false;
  342. }
  343. // input word is english word
  344. else {
  345. // pre word is chinese
  346. if (!is_pre_english) {
  347. // word[0] = word[0] - 32;
  348. words.push_back(word);
  349. begin = (begin==-1)?timestamp_list[i][0]:begin;
  350. std::vector<float> vec = {begin, timestamp_list[i][1]};
  351. timestamp_merge.emplace_back(vec);
  352. begin = -1;
  353. pre_english_len = word.size();
  354. }
  355. // pre word is english word
  356. else {
  357. // single letter turn to upper case
  358. // if (word.size() == 1) {
  359. // word[0] = word[0] - 32;
  360. // }
  361. if (pre_english_len > 1) {
  362. words.push_back(" ");
  363. words.push_back(word);
  364. begin = (begin==-1)?timestamp_list[i][0]:begin;
  365. std::vector<float> vec = {begin, timestamp_list[i][1]};
  366. timestamp_merge.emplace_back(vec);
  367. begin = -1;
  368. pre_english_len = word.size();
  369. }
  370. else {
  371. // if (word.size() > 1) {
  372. // words.push_back(" ");
  373. // }
  374. words.push_back(" ");
  375. words.push_back(word);
  376. begin = (begin==-1)?timestamp_list[i][0]:begin;
  377. std::vector<float> vec = {begin, timestamp_list[i][1]};
  378. timestamp_merge.emplace_back(vec);
  379. begin = -1;
  380. pre_english_len = word.size();
  381. }
  382. }
  383. is_pre_english = true;
  384. }
  385. }
  386. }
  387. string stamp_str="";
  388. for (i=0; i<timestamp_merge.size(); i++) {
  389. stamp_str += std::to_string(timestamp_merge[i][0]);
  390. stamp_str += ", ";
  391. stamp_str += std::to_string(timestamp_merge[i][1]);
  392. if(i!=timestamp_merge.size()-1){
  393. stamp_str += ",";
  394. }
  395. }
  396. stringstream ss;
  397. for (auto it = words.begin(); it != words.end(); it++) {
  398. ss << *it;
  399. }
  400. return ss.str()+" | "+stamp_str;
  401. }
  402. void Paraformer::TimestampOnnx(std::vector<float>& us_alphas,
  403. std::vector<float> us_cif_peak,
  404. std::vector<string>& char_list,
  405. std::string &res_str,
  406. std::vector<std::vector<float>> &timestamp_vec,
  407. float begin_time,
  408. float total_offset){
  409. if (char_list.empty()) {
  410. return ;
  411. }
  412. const float START_END_THRESHOLD = 5.0;
  413. const float MAX_TOKEN_DURATION = 30.0;
  414. const float TIME_RATE = 10.0 * 6 / 1000 / 3;
  415. // 3 times upsampled, cif_peak is flattened into a 1D array
  416. std::vector<float> cif_peak = us_cif_peak;
  417. int num_frames = cif_peak.size();
  418. if (char_list.back() == "</s>") {
  419. char_list.pop_back();
  420. }
  421. vector<vector<float>> timestamp_list;
  422. vector<string> new_char_list;
  423. vector<float> fire_place;
  424. // for bicif model trained with large data, cif2 actually fires when a character starts
  425. // so treat the frames between two peaks as the duration of the former token
  426. for (int i = 0; i < num_frames; i++) {
  427. if (cif_peak[i] > 1.0 - 1e-4) {
  428. fire_place.push_back(i + total_offset);
  429. }
  430. }
  431. int num_peak = fire_place.size();
  432. if(num_peak != (int)char_list.size() + 1){
  433. float sum = std::accumulate(us_alphas.begin(), us_alphas.end(), 0.0f);
  434. float scale = sum/((int)char_list.size() + 1);
  435. cif_peak.clear();
  436. sum = 0.0;
  437. for(auto &alpha:us_alphas){
  438. alpha = alpha/scale;
  439. sum += alpha;
  440. cif_peak.emplace_back(sum);
  441. if(sum>=1.0 - 1e-4){
  442. sum -=(1.0 - 1e-4);
  443. }
  444. }
  445. fire_place.clear();
  446. for (int i = 0; i < num_frames; i++) {
  447. if (cif_peak[i] > 1.0 - 1e-4) {
  448. fire_place.push_back(i + total_offset);
  449. }
  450. }
  451. }
  452. // begin silence
  453. if (fire_place[0] > START_END_THRESHOLD) {
  454. new_char_list.push_back("<sil>");
  455. timestamp_list.push_back({0.0, fire_place[0] * TIME_RATE});
  456. }
  457. // tokens timestamp
  458. for (int i = 0; i < num_peak - 1; i++) {
  459. new_char_list.push_back(char_list[i]);
  460. if (i == num_peak - 2 || MAX_TOKEN_DURATION < 0 || fire_place[i + 1] - fire_place[i] < MAX_TOKEN_DURATION) {
  461. timestamp_list.push_back({fire_place[i] * TIME_RATE, fire_place[i + 1] * TIME_RATE});
  462. } else {
  463. // cut the duration to token and sil of the 0-weight frames last long
  464. float _split = fire_place[i] + MAX_TOKEN_DURATION;
  465. timestamp_list.push_back({fire_place[i] * TIME_RATE, _split * TIME_RATE});
  466. timestamp_list.push_back({_split * TIME_RATE, fire_place[i + 1] * TIME_RATE});
  467. new_char_list.push_back("<sil>");
  468. }
  469. }
  470. // tail token and end silence
  471. if (num_frames - fire_place.back() > START_END_THRESHOLD) {
  472. float _end = (num_frames + fire_place.back()) / 2.0;
  473. timestamp_list.back()[1] = _end * TIME_RATE;
  474. timestamp_list.push_back({_end * TIME_RATE, num_frames * TIME_RATE});
  475. new_char_list.push_back("<sil>");
  476. } else {
  477. timestamp_list.back()[1] = num_frames * TIME_RATE;
  478. }
  479. if (begin_time) { // add offset time in model with vad
  480. for (auto& timestamp : timestamp_list) {
  481. timestamp[0] += begin_time / 1000.0;
  482. timestamp[1] += begin_time / 1000.0;
  483. }
  484. }
  485. assert(new_char_list.size() == timestamp_list.size());
  486. for (int i = 0; i < (int)new_char_list.size(); i++) {
  487. res_str += new_char_list[i] + " " + to_string(timestamp_list[i][0]) + " " + to_string(timestamp_list[i][1]) + ";";
  488. }
  489. for (int i = 0; i < (int)new_char_list.size(); i++) {
  490. if(new_char_list[i] != "<sil>"){
  491. timestamp_vec.push_back(timestamp_list[i]);
  492. }
  493. }
  494. }
  495. vector<float> Paraformer::ApplyLfr(const std::vector<float> &in)
  496. {
  497. int32_t in_feat_dim = fbank_opts_.mel_opts.num_bins;
  498. int32_t in_num_frames = in.size() / in_feat_dim;
  499. int32_t out_num_frames =
  500. (in_num_frames - lfr_m) / lfr_n + 1;
  501. int32_t out_feat_dim = in_feat_dim * lfr_m;
  502. std::vector<float> out(out_num_frames * out_feat_dim);
  503. const float *p_in = in.data();
  504. float *p_out = out.data();
  505. for (int32_t i = 0; i != out_num_frames; ++i) {
  506. std::copy(p_in, p_in + out_feat_dim, p_out);
  507. p_out += out_feat_dim;
  508. p_in += lfr_n * in_feat_dim;
  509. }
  510. return out;
  511. }
  512. void Paraformer::ApplyCmvn(std::vector<float> *v)
  513. {
  514. int32_t dim = means_list_.size();
  515. int32_t num_frames = v->size() / dim;
  516. float *p = v->data();
  517. for (int32_t i = 0; i != num_frames; ++i) {
  518. for (int32_t k = 0; k != dim; ++k) {
  519. p[k] = (p[k] + means_list_[k]) * vars_list_[k];
  520. }
  521. p += dim;
  522. }
  523. }
  524. string Paraformer::Forward(float* din, int len, bool input_finished, const std::vector<std::vector<float>> &hw_emb)
  525. {
  526. int32_t in_feat_dim = fbank_opts_.mel_opts.num_bins;
  527. std::vector<float> wav_feats = FbankKaldi(MODEL_SAMPLE_RATE, din, len);
  528. wav_feats = ApplyLfr(wav_feats);
  529. ApplyCmvn(&wav_feats);
  530. int32_t feat_dim = lfr_m*in_feat_dim;
  531. int32_t num_frames = wav_feats.size() / feat_dim;
  532. //std::cout << "feat in: " << num_frames << " " << feat_dim << std::endl;
  533. #ifdef _WIN_X86
  534. Ort::MemoryInfo m_memoryInfo = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
  535. #else
  536. Ort::MemoryInfo m_memoryInfo = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
  537. #endif
  538. const int64_t input_shape_[3] = {1, num_frames, feat_dim};
  539. Ort::Value onnx_feats = Ort::Value::CreateTensor<float>(m_memoryInfo,
  540. wav_feats.data(),
  541. wav_feats.size(),
  542. input_shape_,
  543. 3);
  544. const int64_t paraformer_length_shape[1] = {1};
  545. std::vector<int32_t> paraformer_length;
  546. paraformer_length.emplace_back(num_frames);
  547. Ort::Value onnx_feats_len = Ort::Value::CreateTensor<int32_t>(
  548. m_memoryInfo, paraformer_length.data(), paraformer_length.size(), paraformer_length_shape, 1);
  549. std::vector<Ort::Value> input_onnx;
  550. input_onnx.emplace_back(std::move(onnx_feats));
  551. input_onnx.emplace_back(std::move(onnx_feats_len));
  552. std::vector<float> embedding;
  553. try{
  554. if (use_hotword) {
  555. if(hw_emb.size()<=0){
  556. LOG(ERROR) << "hw_emb is null";
  557. return "";
  558. }
  559. //PrintMat(hw_emb, "input_clas_emb");
  560. const int64_t hotword_shape[3] = {1, hw_emb.size(), hw_emb[0].size()};
  561. embedding.reserve(hw_emb.size() * hw_emb[0].size());
  562. for (auto item : hw_emb) {
  563. embedding.insert(embedding.end(), item.begin(), item.end());
  564. }
  565. //LOG(INFO) << "hotword shape " << hotword_shape[0] << " " << hotword_shape[1] << " " << hotword_shape[2] << " size " << embedding.size();
  566. Ort::Value onnx_hw_emb = Ort::Value::CreateTensor<float>(
  567. m_memoryInfo, embedding.data(), embedding.size(), hotword_shape, 3);
  568. input_onnx.emplace_back(std::move(onnx_hw_emb));
  569. }
  570. }catch (std::exception const &e)
  571. {
  572. LOG(ERROR)<<e.what();
  573. return "";
  574. }
  575. string result="";
  576. try {
  577. auto outputTensor = m_session_->Run(Ort::RunOptions{nullptr}, m_szInputNames.data(), input_onnx.data(), input_onnx.size(), m_szOutputNames.data(), m_szOutputNames.size());
  578. std::vector<int64_t> outputShape = outputTensor[0].GetTensorTypeAndShapeInfo().GetShape();
  579. //LOG(INFO) << "paraformer out shape " << outputShape[0] << " " << outputShape[1] << " " << outputShape[2];
  580. int64_t outputCount = std::accumulate(outputShape.begin(), outputShape.end(), 1, std::multiplies<int64_t>());
  581. float* floatData = outputTensor[0].GetTensorMutableData<float>();
  582. auto encoder_out_lens = outputTensor[1].GetTensorMutableData<int64_t>();
  583. // timestamp
  584. if(outputTensor.size() == 4){
  585. std::vector<int64_t> us_alphas_shape = outputTensor[2].GetTensorTypeAndShapeInfo().GetShape();
  586. float* us_alphas_data = outputTensor[2].GetTensorMutableData<float>();
  587. std::vector<float> us_alphas(us_alphas_shape[1]);
  588. for (int i = 0; i < us_alphas_shape[1]; i++) {
  589. us_alphas[i] = us_alphas_data[i];
  590. }
  591. std::vector<int64_t> us_peaks_shape = outputTensor[3].GetTensorTypeAndShapeInfo().GetShape();
  592. float* us_peaks_data = outputTensor[3].GetTensorMutableData<float>();
  593. std::vector<float> us_peaks(us_peaks_shape[1]);
  594. for (int i = 0; i < us_peaks_shape[1]; i++) {
  595. us_peaks[i] = us_peaks_data[i];
  596. }
  597. result = GreedySearch(floatData, *encoder_out_lens, outputShape[2], true, us_alphas, us_peaks);
  598. }else{
  599. result = GreedySearch(floatData, *encoder_out_lens, outputShape[2]);
  600. }
  601. // int pos = 0;
  602. // std::vector<std::vector<float>> logits;
  603. // for (int j = 0; j < outputShape[1]; j++)
  604. // {
  605. // std::vector<float> vec_token;
  606. // vec_token.insert(vec_token.begin(), floatData + pos, floatData + pos + outputShape[2]);
  607. // logits.push_back(vec_token);
  608. // pos += outputShape[2];
  609. // }
  610. // //PrintMat(logits, "logits_out");
  611. // result = GreedySearch(floatData, *encoder_out_lens, outputShape[2]);
  612. }
  613. catch (std::exception const &e)
  614. {
  615. LOG(ERROR)<<e.what();
  616. }
  617. return result;
  618. }
  619. std::vector<std::vector<float>> Paraformer::CompileHotwordEmbedding(std::string &hotwords) {
  620. int embedding_dim = encoder_size;
  621. std::vector<std::vector<float>> hw_emb;
  622. if (!use_hotword) {
  623. std::vector<float> vec(embedding_dim, 0);
  624. hw_emb.push_back(vec);
  625. return hw_emb;
  626. }
  627. int max_hotword_len = 10;
  628. std::vector<int32_t> hotword_matrix;
  629. std::vector<int32_t> lengths;
  630. int hotword_size = 1;
  631. int real_hw_size = 0;
  632. if (!hotwords.empty()) {
  633. std::vector<std::string> hotword_array = split(hotwords, ' ');
  634. hotword_size = hotword_array.size() + 1;
  635. hotword_matrix.reserve(hotword_size * max_hotword_len);
  636. for (auto hotword : hotword_array) {
  637. std::vector<std::string> chars;
  638. if (EncodeConverter::IsAllChineseCharactor((const U8CHAR_T*)hotword.c_str(), hotword.size())) {
  639. KeepChineseCharacterAndSplit(hotword, chars);
  640. } else {
  641. // for english
  642. std::vector<std::string> words = split(hotword, ' ');
  643. for (auto word : words) {
  644. std::vector<string> tokens = seg_dict->GetTokensByWord(word);
  645. chars.insert(chars.end(), tokens.begin(), tokens.end());
  646. }
  647. }
  648. if(chars.size()==0){
  649. continue;
  650. }
  651. std::vector<int32_t> hw_vector(max_hotword_len, 0);
  652. int vector_len = std::min(max_hotword_len, (int)chars.size());
  653. for (int i=0; i<chars.size(); i++) {
  654. std::cout << chars[i] << " ";
  655. hw_vector[i] = vocab->GetIdByToken(chars[i]);
  656. }
  657. std::cout << std::endl;
  658. lengths.push_back(vector_len);
  659. real_hw_size += 1;
  660. hotword_matrix.insert(hotword_matrix.end(), hw_vector.begin(), hw_vector.end());
  661. }
  662. hotword_size = real_hw_size + 1;
  663. }
  664. std::vector<int32_t> blank_vec(max_hotword_len, 0);
  665. blank_vec[0] = 1;
  666. hotword_matrix.insert(hotword_matrix.end(), blank_vec.begin(), blank_vec.end());
  667. lengths.push_back(1);
  668. #ifdef _WIN_X86
  669. Ort::MemoryInfo m_memoryInfo = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
  670. #else
  671. Ort::MemoryInfo m_memoryInfo = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
  672. #endif
  673. const int64_t input_shape_[2] = {hotword_size, max_hotword_len};
  674. Ort::Value onnx_hotword = Ort::Value::CreateTensor<int32_t>(m_memoryInfo,
  675. (int32_t*)hotword_matrix.data(),
  676. hotword_size * max_hotword_len,
  677. input_shape_,
  678. 2);
  679. LOG(INFO) << "clas shape " << hotword_size << " " << max_hotword_len << std::endl;
  680. std::vector<Ort::Value> input_onnx;
  681. input_onnx.emplace_back(std::move(onnx_hotword));
  682. std::vector<std::vector<float>> result;
  683. try {
  684. auto outputTensor = hw_m_session->Run(Ort::RunOptions{nullptr}, hw_m_szInputNames.data(), input_onnx.data(), input_onnx.size(), hw_m_szOutputNames.data(), hw_m_szOutputNames.size());
  685. std::vector<int64_t> outputShape = outputTensor[0].GetTensorTypeAndShapeInfo().GetShape();
  686. int64_t outputCount = std::accumulate(outputShape.begin(), outputShape.end(), 1, std::multiplies<int64_t>());
  687. float* floatData = outputTensor[0].GetTensorMutableData<float>(); // shape [max_hotword_len, hotword_size, dim]
  688. // get embedding by real hotword length
  689. assert(outputShape[0] == max_hotword_len);
  690. assert(outputShape[1] == hotword_size);
  691. embedding_dim = outputShape[2];
  692. for (int j = 0; j < hotword_size; j++)
  693. {
  694. int start_pos = hotword_size * (lengths[j] - 1) * embedding_dim + j * embedding_dim;
  695. std::vector<float> embedding;
  696. embedding.insert(embedding.begin(), floatData + start_pos, floatData + start_pos + embedding_dim);
  697. result.push_back(embedding);
  698. }
  699. }
  700. catch (std::exception const &e)
  701. {
  702. LOG(ERROR)<<e.what();
  703. }
  704. //PrintMat(result, "clas_embedding_output");
  705. return result;
  706. }
  707. string Paraformer::Rescoring()
  708. {
  709. LOG(ERROR)<<"Not Imp!!!!!!";
  710. return "";
  711. }
  712. } // namespace funasr