speech-recognition-from-microphone-with-endpoint-detection.py 3.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. #!/usr/bin/env python3
  2. # Real-time speech recognition from a microphone with sherpa-ncnn Python API
  3. # with endpoint detection.
  4. #
  5. # Please refer to
  6. # https://k2-fsa.github.io/sherpa/ncnn/pretrained_models/index.html
  7. # to download pre-trained models
  8. import sys
  9. try:
  10. import sounddevice as sd
  11. except ImportError as e:
  12. print("Please install sounddevice first. You can use")
  13. print()
  14. print(" pip install sounddevice")
  15. print()
  16. print("to install it")
  17. sys.exit(-1)
  18. import sherpa_ncnn
  19. def create_recognizer():
  20. # Please replace the model files if needed.
  21. # See https://k2-fsa.github.io/sherpa/ncnn/pretrained_models/index.html
  22. # for download links.
  23. recognizer = sherpa_ncnn.Recognizer(
  24. tokens="./sherpa-ncnn-conv-emformer-transducer-2022-12-06/tokens.txt",
  25. encoder_param="./sherpa-ncnn-conv-emformer-transducer-2022-12-06/encoder_jit_trace-pnnx.ncnn.param",
  26. encoder_bin="./sherpa-ncnn-conv-emformer-transducer-2022-12-06/encoder_jit_trace-pnnx.ncnn.bin",
  27. decoder_param="./sherpa-ncnn-conv-emformer-transducer-2022-12-06/decoder_jit_trace-pnnx.ncnn.param",
  28. decoder_bin="./sherpa-ncnn-conv-emformer-transducer-2022-12-06/decoder_jit_trace-pnnx.ncnn.bin",
  29. joiner_param="./sherpa-ncnn-conv-emformer-transducer-2022-12-06/joiner_jit_trace-pnnx.ncnn.param",
  30. joiner_bin="./sherpa-ncnn-conv-emformer-transducer-2022-12-06/joiner_jit_trace-pnnx.ncnn.bin",
  31. num_threads=4,
  32. decoding_method="modified_beam_search",
  33. enable_endpoint_detection=True,
  34. rule1_min_trailing_silence=2.4,
  35. rule2_min_trailing_silence=1.2,
  36. rule3_min_utterance_length=300,
  37. hotwords_file="",
  38. hotwords_score=1.5,
  39. )
  40. return recognizer
  41. def main():
  42. print("Started! Please speak")
  43. recognizer = create_recognizer()
  44. sample_rate = recognizer.sample_rate
  45. samples_per_read = int(0.1 * sample_rate) # 0.1 second = 100 ms
  46. last_result = ""
  47. segment_id = 0
  48. with sd.InputStream(channels=1, dtype="float32", samplerate=sample_rate) as s:
  49. while True:
  50. samples, _ = s.read(samples_per_read) # a blocking read
  51. samples = samples.reshape(-1)
  52. recognizer.accept_waveform(sample_rate, samples)
  53. is_endpoint = recognizer.is_endpoint
  54. result = recognizer.text
  55. if result and (last_result != result):
  56. last_result = result
  57. print("\r{}:{}".format(segment_id, result), end="", flush=True)
  58. if is_endpoint:
  59. if result:
  60. print("\r{}:{}".format(segment_id, result), flush=True)
  61. segment_id += 1
  62. recognizer.reset()
  63. if __name__ == "__main__":
  64. devices = sd.query_devices()
  65. print(devices)
  66. default_input_device_idx = sd.default.device[0]
  67. print(f'Use default device: {devices[default_input_device_idx]["name"]}')
  68. try:
  69. main()
  70. except KeyboardInterrupt:
  71. print("\nCaught Ctrl + C. Exiting")