lstm-model.cc 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. /**
  2. * Copyright (c) 2022 Xiaomi Corporation (authors: Fangjun Kuang)
  3. *
  4. * See LICENSE for clarification regarding multiple authors
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the "License");
  7. * you may not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #include "sherpa-ncnn/csrc/lstm-model.h"
  19. #include <utility>
  20. #include <vector>
  21. namespace sherpa_ncnn {
  22. LstmModel::LstmModel(const ModelConfig &config)
  23. : num_threads_(config.num_threads) {
  24. InitEncoder(config.encoder_param, config.encoder_bin);
  25. InitDecoder(config.decoder_param, config.decoder_bin);
  26. InitJoiner(config.joiner_param, config.joiner_bin);
  27. InitEncoderInputOutputIndexes();
  28. InitDecoderInputOutputIndexes();
  29. InitJoinerInputOutputIndexes();
  30. }
  31. #if __ANDROID_API__ >= 9
  32. LstmModel::LstmModel(AAssetManager *mgr, const ModelConfig &config)
  33. : num_threads_(config.num_threads) {
  34. InitEncoder(mgr, config.encoder_param, config.encoder_bin);
  35. InitDecoder(mgr, config.decoder_param, config.decoder_bin);
  36. InitJoiner(mgr, config.joiner_param, config.joiner_bin);
  37. InitEncoderInputOutputIndexes();
  38. InitDecoderInputOutputIndexes();
  39. InitJoinerInputOutputIndexes();
  40. }
  41. #endif
  42. std::pair<ncnn::Mat, std::vector<ncnn::Mat>> LstmModel::RunEncoder(
  43. ncnn::Mat &features, const std::vector<ncnn::Mat> &states) {
  44. ncnn::Mat hx;
  45. ncnn::Mat cx;
  46. if (states.empty()) {
  47. auto s = GetEncoderInitStates();
  48. hx = s[0];
  49. cx = s[1];
  50. } else {
  51. hx = states[0];
  52. cx = states[1];
  53. }
  54. ncnn::Mat feature_length(1);
  55. feature_length[0] = features.h;
  56. ncnn::Extractor encoder_ex = encoder_.create_extractor();
  57. encoder_ex.set_num_threads(num_threads_);
  58. encoder_ex.input(encoder_input_indexes_[0], features);
  59. encoder_ex.input(encoder_input_indexes_[1], feature_length);
  60. encoder_ex.input(encoder_input_indexes_[2], hx);
  61. encoder_ex.input(encoder_input_indexes_[3], cx);
  62. ncnn::Mat encoder_out;
  63. encoder_ex.extract(encoder_output_indexes_[0], encoder_out);
  64. encoder_ex.extract(encoder_output_indexes_[1], hx);
  65. encoder_ex.extract(encoder_output_indexes_[2], cx);
  66. std::vector<ncnn::Mat> next_states = {hx, cx};
  67. return {encoder_out, next_states};
  68. }
  69. ncnn::Mat LstmModel::RunDecoder(ncnn::Mat &decoder_input) {
  70. ncnn::Extractor decoder_ex = decoder_.create_extractor();
  71. decoder_ex.set_num_threads(num_threads_);
  72. ncnn::Mat decoder_out;
  73. decoder_ex.input(decoder_input_indexes_[0], decoder_input);
  74. decoder_ex.extract(decoder_output_indexes_[0], decoder_out);
  75. decoder_out = decoder_out.reshape(decoder_out.w);
  76. return decoder_out;
  77. }
  78. ncnn::Mat LstmModel::RunJoiner(ncnn::Mat &encoder_out, ncnn::Mat &decoder_out) {
  79. auto joiner_ex = joiner_.create_extractor();
  80. joiner_ex.set_num_threads(num_threads_);
  81. joiner_ex.input(joiner_input_indexes_[0], encoder_out);
  82. joiner_ex.input(joiner_input_indexes_[1], decoder_out);
  83. ncnn::Mat joiner_out;
  84. joiner_ex.extract(joiner_output_indexes_[0], joiner_out);
  85. return joiner_out;
  86. }
  87. void LstmModel::InitEncoder(const std::string &encoder_param,
  88. const std::string &encoder_bin) {
  89. encoder_.opt.use_packing_layout = false;
  90. encoder_.opt.use_fp16_storage = false;
  91. InitNet(encoder_, encoder_param, encoder_bin);
  92. }
  93. void LstmModel::InitDecoder(const std::string &decoder_param,
  94. const std::string &decoder_bin) {
  95. InitNet(decoder_, decoder_param, decoder_bin);
  96. }
  97. void LstmModel::InitJoiner(const std::string &joiner_param,
  98. const std::string &joiner_bin) {
  99. InitNet(joiner_, joiner_param, joiner_bin);
  100. }
  101. #if __ANDROID_API__ >= 9
  102. void LstmModel::InitEncoder(AAssetManager *mgr,
  103. const std::string &encoder_param,
  104. const std::string &encoder_bin) {
  105. encoder_.opt.use_packing_layout = false;
  106. encoder_.opt.use_fp16_storage = false;
  107. InitNet(mgr, encoder_, encoder_param, encoder_bin);
  108. }
  109. void LstmModel::InitDecoder(AAssetManager *mgr,
  110. const std::string &decoder_param,
  111. const std::string &decoder_bin) {
  112. InitNet(mgr, decoder_, decoder_param, decoder_bin);
  113. }
  114. void LstmModel::InitJoiner(AAssetManager *mgr, const std::string &joiner_param,
  115. const std::string &joiner_bin) {
  116. InitNet(mgr, joiner_, joiner_param, joiner_bin);
  117. }
  118. #endif
  119. std::vector<ncnn::Mat> LstmModel::GetEncoderInitStates() const {
  120. int32_t num_encoder_layers = 12;
  121. int32_t d_model = 512;
  122. int32_t rnn_hidden_size = 1024;
  123. auto hx = ncnn::Mat(d_model, num_encoder_layers);
  124. auto cx = ncnn::Mat(rnn_hidden_size, num_encoder_layers);
  125. hx.fill(0);
  126. cx.fill(0);
  127. return {hx, cx};
  128. }
  129. void LstmModel::InitEncoderInputOutputIndexes() {
  130. // input indexes map
  131. // [0] -> in0, features,
  132. // [1] -> in1, features_length
  133. // [2] -> in2, hx
  134. // [3] -> in3, cx
  135. encoder_input_indexes_.resize(4);
  136. // output indexes map
  137. // [0] -> out0, encoder_out
  138. // [1] -> out2, hx
  139. // [2] -> out3, cx
  140. encoder_output_indexes_.resize(3);
  141. const auto &blobs = encoder_.blobs();
  142. for (int32_t i = 0; i != blobs.size(); ++i) {
  143. const auto &b = blobs[i];
  144. if (b.name == "in0") encoder_input_indexes_[0] = i;
  145. if (b.name == "in1") encoder_input_indexes_[1] = i;
  146. if (b.name == "in2") encoder_input_indexes_[2] = i;
  147. if (b.name == "in3") encoder_input_indexes_[3] = i;
  148. if (b.name == "out0") encoder_output_indexes_[0] = i;
  149. if (b.name == "out2") encoder_output_indexes_[1] = i;
  150. if (b.name == "out3") encoder_output_indexes_[2] = i;
  151. }
  152. }
  153. void LstmModel::InitDecoderInputOutputIndexes() {
  154. // input indexes map
  155. // [0] -> in0, decoder_input,
  156. decoder_input_indexes_.resize(1);
  157. // output indexes map
  158. // [0] -> out0, decoder_out,
  159. decoder_output_indexes_.resize(1);
  160. const auto &blobs = decoder_.blobs();
  161. for (int32_t i = 0; i != blobs.size(); ++i) {
  162. const auto &b = blobs[i];
  163. if (b.name == "in0") decoder_input_indexes_[0] = i;
  164. if (b.name == "out0") decoder_output_indexes_[0] = i;
  165. }
  166. }
  167. void LstmModel::InitJoinerInputOutputIndexes() {
  168. // input indexes map
  169. // [0] -> in0, encoder_input,
  170. // [1] -> in1, decoder_input,
  171. joiner_input_indexes_.resize(2);
  172. // output indexes map
  173. // [0] -> out0, joiner_out,
  174. joiner_output_indexes_.resize(1);
  175. const auto &blobs = joiner_.blobs();
  176. for (int32_t i = 0; i != blobs.size(); ++i) {
  177. const auto &b = blobs[i];
  178. if (b.name == "in0") joiner_input_indexes_[0] = i;
  179. if (b.name == "in1") joiner_input_indexes_[1] = i;
  180. if (b.name == "out0") joiner_output_indexes_[0] = i;
  181. }
  182. }
  183. } // namespace sherpa_ncnn