sherpa-ncnn-microphone.cc 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /**
  2. * Copyright (c) 2022 Xiaomi Corporation (authors: Fangjun Kuang)
  3. *
  4. * See LICENSE for clarification regarding multiple authors
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the "License");
  7. * you may not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #include <signal.h>
  19. #include <stdio.h>
  20. #include <stdlib.h>
  21. #include "portaudio.h" // NOLINT
  22. #include "sherpa-ncnn/csrc/decode.h"
  23. #include "sherpa-ncnn/csrc/features.h"
  24. #include "sherpa-ncnn/csrc/microphone.h"
  25. #include "sherpa-ncnn/csrc/model.h"
  26. #include "sherpa-ncnn/csrc/symbol-table.h"
  27. bool stop = false;
  28. static int RecordCallback(const void *input_buffer, void * /*output_buffer*/,
  29. unsigned long frames_per_buffer, // NOLINT
  30. const PaStreamCallbackTimeInfo * /*time_info*/,
  31. PaStreamCallbackFlags /*status_flags*/,
  32. void *user_data) {
  33. auto feature_extractor =
  34. reinterpret_cast<sherpa_ncnn::FeatureExtractor *>(user_data);
  35. feature_extractor->AcceptWaveform(
  36. 16000, reinterpret_cast<const float *>(input_buffer), frames_per_buffer);
  37. return stop ? paComplete : paContinue;
  38. }
  39. static void Handler(int sig) {
  40. stop = true;
  41. fprintf(stderr, "\nexiting...\n");
  42. };
  43. int main(int32_t argc, char *argv[]) {
  44. if (argc != 8 && argc != 9) {
  45. const char *usage = R"usage(
  46. Usage:
  47. ./bin/sherpa-ncnn-microphone \
  48. /path/to/tokens.txt \
  49. /path/to/encoder.ncnn.param \
  50. /path/to/encoder.ncnn.bin \
  51. /path/to/decoder.ncnn.param \
  52. /path/to/decoder.ncnn.bin \
  53. /path/to/joiner.ncnn.param \
  54. /path/to/joiner.ncnn.bin \
  55. [num_threads]
  56. You can download pre-trained models from the following repository:
  57. https://huggingface.co/csukuangfj/sherpa-ncnn-2022-09-05
  58. )usage";
  59. fprintf(stderr, "%s\n", usage);
  60. fprintf(stderr, "argc, %d\n", argc);
  61. return 0;
  62. }
  63. signal(SIGINT, Handler);
  64. sherpa_ncnn::ModelConfig config;
  65. std::string tokens = argv[1];
  66. config.encoder_param = argv[2];
  67. config.encoder_bin = argv[3];
  68. config.decoder_param = argv[4];
  69. config.decoder_bin = argv[5];
  70. config.joiner_param = argv[6];
  71. config.joiner_bin = argv[7];
  72. config.num_threads = 4;
  73. if (argc == 9) {
  74. config.num_threads = atoi(argv[8]);
  75. }
  76. sherpa_ncnn::SymbolTable sym(tokens);
  77. fprintf(stderr, "%s\n", config.ToString().c_str());
  78. auto model = sherpa_ncnn::Model::Create(config);
  79. if (!model) {
  80. fprintf(stderr, "Failed to create a model\n");
  81. exit(EXIT_FAILURE);
  82. }
  83. float sample_rate = 16000;
  84. sherpa_ncnn::Microphone mic;
  85. knf::FbankOptions fbank_opts;
  86. fbank_opts.frame_opts.dither = 0;
  87. fbank_opts.frame_opts.snip_edges = false;
  88. fbank_opts.frame_opts.samp_freq = sample_rate;
  89. fbank_opts.mel_opts.num_bins = 80;
  90. sherpa_ncnn::FeatureExtractor feature_extractor(fbank_opts);
  91. PaDeviceIndex num_devices = Pa_GetDeviceCount();
  92. fprintf(stderr, "Num devices: %d\n", num_devices);
  93. PaStreamParameters param;
  94. param.device = Pa_GetDefaultInputDevice();
  95. if (param.device == paNoDevice) {
  96. fprintf(stderr, "No default input device found\n");
  97. exit(EXIT_FAILURE);
  98. }
  99. fprintf(stderr, "Use default device: %d\n", param.device);
  100. const PaDeviceInfo *info = Pa_GetDeviceInfo(param.device);
  101. fprintf(stderr, " Name: %s\n", info->name);
  102. fprintf(stderr, " Max input channels: %d\n", info->maxInputChannels);
  103. param.channelCount = 1;
  104. param.sampleFormat = paFloat32;
  105. param.suggestedLatency = info->defaultLowInputLatency;
  106. param.hostApiSpecificStreamInfo = nullptr;
  107. PaStream *stream;
  108. PaError err =
  109. Pa_OpenStream(&stream, &param, nullptr, /* &outputParameters, */
  110. sample_rate,
  111. 0, // frames per buffer
  112. paClipOff, // we won't output out of range samples
  113. // so don't bother clipping them
  114. RecordCallback, &feature_extractor);
  115. if (err != paNoError) {
  116. fprintf(stderr, "portaudio error: %s\n", Pa_GetErrorText(err));
  117. exit(EXIT_FAILURE);
  118. }
  119. err = Pa_StartStream(stream);
  120. fprintf(stderr, "Started\n");
  121. if (err != paNoError) {
  122. fprintf(stderr, "portaudio error: %s\n", Pa_GetErrorText(err));
  123. exit(EXIT_FAILURE);
  124. }
  125. int32_t segment = model->Segment();
  126. int32_t offset = model->Offset();
  127. int32_t context_size = model->ContextSize();
  128. int32_t blank_id = model->BlankId();
  129. std::vector<int32_t> hyp(context_size, blank_id);
  130. ncnn::Mat decoder_input(context_size);
  131. for (int32_t i = 0; i != context_size; ++i) {
  132. static_cast<int32_t *>(decoder_input)[i] = blank_id;
  133. }
  134. ncnn::Mat decoder_out = model->RunDecoder(decoder_input);
  135. ncnn::Mat hx;
  136. ncnn::Mat cx;
  137. int32_t num_tokens = hyp.size();
  138. int32_t num_processed = 0;
  139. std::vector<ncnn::Mat> states;
  140. ncnn::Mat encoder_out;
  141. while (!stop) {
  142. while (feature_extractor.NumFramesReady() - num_processed >= segment) {
  143. ncnn::Mat features = feature_extractor.GetFrames(num_processed, segment);
  144. num_processed += offset;
  145. std::tie(encoder_out, states) = model->RunEncoder(features, states);
  146. GreedySearch(model.get(), encoder_out, &decoder_out, &hyp);
  147. }
  148. if (hyp.size() != num_tokens) {
  149. num_tokens = hyp.size();
  150. std::string text;
  151. for (int32_t i = context_size; i != hyp.size(); ++i) {
  152. text += sym[hyp[i]];
  153. }
  154. fprintf(stderr, "%s\n", text.c_str());
  155. }
  156. Pa_Sleep(20); // sleep for 20ms
  157. }
  158. err = Pa_CloseStream(stream);
  159. if (err != paNoError) {
  160. fprintf(stderr, "portaudio error: %s\n", Pa_GetErrorText(err));
  161. exit(EXIT_FAILURE);
  162. }
  163. return 0;
  164. }