Fangjun Kuang 2 лет назад
Родитель
Сommit
db5c5ac194
5 измененных файлов с 119 добавлено и 3 удалено
  1. 3 2
      CMakeLists.txt
  2. 4 1
      README.md
  3. 45 0
      cmake/ncnn.cmake
  4. 5 0
      sherpa-ncnn/csrc/CMakeLists.txt
  5. 62 0
      sherpa-ncnn/csrc/sherpa-ncnn.cc

+ 3 - 2
CMakeLists.txt

@@ -20,7 +20,7 @@ endif()
 set(CMAKE_INSTALL_RPATH ${SHERPA_NCNN_RPATH_ORIGIN})
 set(CMAKE_BUILD_RPATH ${SHERPA_NCNN_RPATH_ORIGIN})
 
-set(BUILD_SHARED_LIBS ON)
+set(BUILD_SHARED_LIBS OFF)
 if(WIN32)
   message(STATUS "Set BUILD_SHARED_LIBS to OFF for Windows")
   set(BUILD_SHARED_LIBS OFF CACHE BOOL "" FORCE)
@@ -38,6 +38,7 @@ set(CMAKE_CXX_EXTENSIONS OFF)
 list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/Modules)
 list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake)
 
-include(cmake/kaldi-native-fbank.cmake)
+include(kaldi-native-fbank)
+include(ncnn)
 
 add_subdirectory(sherpa-ncnn)

+ 4 - 1
README.md

@@ -5,6 +5,7 @@ See <https://github.com/k2-fsa/sherpa>
 This repo uses [ncnn](https://github.com/tencent/ncnn) for running the neural
 network model and does not depend on libtorch.
 
+
 # Usage
 
 ```bash
@@ -15,5 +16,7 @@ cd build
 cmake ..
 make -j
 
-./bin/online-fbank-test
+./bin/sherpa-ncnn
 ```
+
+[ncnn]: https://github.com/tencent/ncnn

+ 45 - 0
cmake/ncnn.cmake

@@ -0,0 +1,45 @@
+function(download_ncnn)
+  if(CMAKE_VERSION VERSION_LESS 3.11)
+    # FetchContent is available since 3.11,
+    # we've copied it to ${CMAKE_SOURCE_DIR}/cmake/Modules
+    # so that it can be used in lower CMake versions.
+    message(STATUS "Use FetchContent provided by sherpa-ncnn")
+    list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/Modules)
+  endif()
+
+  include(FetchContent)
+
+  set(ncnn_URL  "https://github.com/csukuangfj/ncnn/archive/refs/tags/sherpa-0.1.tar.gz")
+  set(ncnn_HASH "SHA256=bd9669798846a2727eaa05c4ce156d19e8c729a0f6ee9277d5c4ded33fd38dff")
+
+  FetchContent_Declare(ncnn
+    URL               ${ncnn_URL}
+    URL_HASH          ${ncnn_HASH}
+  )
+
+  set(NCNN_INSTALL_SDK OFF CACHE BOOL "" FORCE)
+  set(NCNN_PIXEL OFF CACHE BOOL "" FORCE)
+  set(NCNN_PIXEL_ROTATE OFF CACHE BOOL "" FORCE)
+  set(NCNN_PIXEL_AFFINE OFF CACHE BOOL "" FORCE)
+  set(NCNN_PIXEL_DRAWING OFF CACHE BOOL "" FORCE)
+  set(NCNN_BUILD_BENCHMARK OFF CACHE BOOL "" FORCE)
+
+  set(NCNN_INT8 OFF CACHE BOOL "" FORCE) # TODO(fangjun): enable it
+  set(NCNN_BF16 OFF CACHE BOOL "" FORCE) # TODO(fangjun): enable it
+
+  set(NCNN_BUILD_TOOLS OFF CACHE BOOL "" FORCE)
+  set(NCNN_BUILD_EXAMPLES OFF CACHE BOOL "" FORCE)
+  set(NCNN_BUILD_TESTS OFF CACHE BOOL "" FORCE)
+
+  FetchContent_GetProperties(ncnn)
+  if(NOT ncnn_POPULATED)
+    message(STATUS "Downloading ncnn")
+    FetchContent_Populate(ncnn)
+  endif()
+  message(STATUS "ncnn is downloaded to ${ncnn_SOURCE_DIR}")
+  message(STATUS "ncnn's binary dir is ${ncnn_BINARY_DIR}")
+
+  add_subdirectory(${ncnn_SOURCE_DIR} ${ncnn_BINARY_DIR} EXCLUDE_FROM_ALL)
+endfunction()
+
+download_ncnn()

+ 5 - 0
sherpa-ncnn/csrc/CMakeLists.txt

@@ -1,2 +1,7 @@
 add_executable(online-fbank-test online-fbank-test.cc)
 target_link_libraries(online-fbank-test kaldi-native-fbank-core)
+
+add_executable(sherpa-ncnn
+  sherpa-ncnn.cc
+)
+target_link_libraries(sherpa-ncnn ncnn)

+ 62 - 0
sherpa-ncnn/csrc/sherpa-ncnn.cc

@@ -0,0 +1,62 @@
+/**
+ * Copyright (c)  2022  Xiaomi Corporation (authors: Fangjun Kuang)
+ *
+ * See LICENSE for clarification regarding multiple authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "net.h"
+#include <iostream>
+
+static void InitNet(ncnn::Net &net, const std::string &param,
+                    const std::string &model) {
+  if (net.load_param(param.c_str())) {
+    std::cerr << "failed to load " << param << "\n";
+    exit(-1);
+  }
+
+  if (net.load_model(model.c_str())) {
+    std::cerr << "failed to load " << model << "\n";
+    exit(-1);
+  }
+}
+
+int main() {
+
+  std::string encoder_param =
+      "bar/encoder_jit_trace-iter-468000-avg-16-pnnx.ncnn.param";
+
+  std::string encoder_model =
+      "bar/encoder_jit_trace-iter-468000-avg-16-pnnx.ncnn.bin";
+
+  std::string decoder_param =
+      "bar/decoder_jit_trace-iter-468000-avg-16-pnnx.ncnn.param";
+
+  std::string decoder_model =
+      "bar/decoder_jit_trace-iter-468000-avg-16-pnnx.ncnn.bin";
+
+  std::string joiner_param =
+      "bar/joiner_jit_trace-iter-468000-avg-16-pnnx.ncnn.param";
+
+  std::string joiner_model =
+      "bar/joiner_jit_trace-iter-468000-avg-16-pnnx.ncnn.bin";
+
+  ncnn::Net encoder_net;
+  ncnn::Net decoder_net;
+  ncnn::Net joiner_net;
+
+  InitNet(encoder_net, encoder_param, encoder_model);
+  InitNet(decoder_net, decoder_param, decoder_model);
+  InitNet(joiner_net, joiner_param, joiner_model);
+}