From f2456f8d936f7de3408506664e1f6b2bc389b55c Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 25 Sep 2022 22:35:26 +0300 Subject: [PATCH] Create README.md --- Makefile | 14 ++++--- README.md | 116 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 125 insertions(+), 5 deletions(-) create mode 100644 README.md diff --git a/Makefile b/Makefile index 50b68e1..1203e82 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,7 @@ run: main .PHONY: samples samples: @echo "Downloading samples..." - mkdir -p samples + @mkdir -p samples @wget --quiet --show-progress -O samples/gb0.ogg https://upload.wikimedia.org/wikipedia/commons/2/22/George_W._Bush%27s_weekly_radio_address_%28November_1%2C_2008%29.oga @wget --quiet --show-progress -O samples/gb1.ogg https://upload.wikimedia.org/wikipedia/commons/1/1f/George_W_Bush_Columbia_FINAL.ogg @wget --quiet --show-progress -O samples/hp0.ogg https://upload.wikimedia.org/wikipedia/en/d/d4/En.henryfphillips.ogg @@ -31,10 +31,11 @@ samples: .PHONY: tiny.en tiny.en: main @echo "Downloading tiny.en (75 MB just once)" - mkdir -p models + @mkdir -p models @if [ ! -f models/ggml-tiny.en.bin ]; then \ wget --quiet --show-progress -O models/ggml-tiny.en.bin https://ggml.ggerganov.com/ggml-model-whisper-tiny.en.bin ; \ fi + @echo "" @echo "===============================================" @echo "Running tiny.en on all samples in ./samples ..." @echo "===============================================" @@ -51,10 +52,11 @@ tiny.en: main .PHONY: base.en base.en: main @echo "Downloading base.en (142 MB just once)" - mkdir -p models + @mkdir -p models @if [ ! -f models/ggml-base.en.bin ]; then \ wget --quiet --show-progress -O models/ggml-base.en.bin https://ggml.ggerganov.com/ggml-model-whisper-base.en.bin ; \ fi + @echo "" @echo "===============================================" @echo "Running base.en on all samples in ./samples ..." @echo "===============================================" @@ -71,10 +73,11 @@ base.en: main .PHONY: small.en small.en: main @echo "Downloading small.en (466 MB just once)" - mkdir -p models + @mkdir -p models @if [ ! -f models/ggml-small.en.bin ]; then \ wget --quiet --show-progress -O models/ggml-small.en.bin https://ggml.ggerganov.com/ggml-model-whisper-small.en.bin ; \ fi + @echo "" @echo "===============================================" @echo "Running small.en on all samples in ./samples ..." @echo "===============================================" @@ -91,10 +94,11 @@ small.en: main .PHONY: medium.en medium.en: main @echo "Downloading medium.en (1.5 GB just once)" - mkdir -p models + @mkdir -p models @if [ ! -f models/ggml-medium.en.bin ]; then \ wget --quiet --show-progress -O models/ggml-medium.en.bin https://ggml.ggerganov.com/ggml-model-whisper-medium.en.bin ; \ fi + @echo "" @echo "===============================================" @echo "Running medium.en on all samples in ./samples ..." @echo "===============================================" diff --git a/README.md b/README.md new file mode 100644 index 0000000..cb6e603 --- /dev/null +++ b/README.md @@ -0,0 +1,116 @@ +# whisper.cpp + +C/C++ port of [OpenAI's Whisper](https://github.com/openai/whisper) speech-to-text model + +- Plain C/C++ implementation without dependencies +- ARM_NEON and AVX intrinsics support +- F16 support + +## Usage + +For a quick demo, simply run `make base.en`: + +```bash +$ make base.en + +Downloading base.en (142 MB just once) +mkdir -p models +models/ggml-base.en.bin 100%[=================================>] 141.11M 7.50MB/s in 19s + +=============================================== +Running base.en on all samples in ./samples ... +=============================================== + +---------------------------------------------- +[+] Running base.en on samples/jfk.wav ... (run 'ffplay samples/jfk.wav' to listen) +---------------------------------------------- + +whisper_model_load: loading model from 'models/ggml-base.en.bin' +whisper_model_load: n_vocab = 51864 +whisper_model_load: n_audio_ctx = 1500 +whisper_model_load: n_audio_state = 512 +whisper_model_load: n_audio_head = 8 +whisper_model_load: n_audio_layer = 6 +whisper_model_load: n_text_ctx = 448 +whisper_model_load: n_text_state = 512 +whisper_model_load: n_text_head = 8 +whisper_model_load: n_text_layer = 6 +whisper_model_load: n_mels = 80 +whisper_model_load: f16 = 1 +whisper_model_load: type = 2 +whisper_model_load: mem_required = 782.00 MB +whisper_model_load: adding 1607 extra tokens +whisper_model_load: ggml ctx size = 186.26 MB +whisper_model_load: memory size = 45.66 MB +whisper_model_load: model size = 140.54 MB +log_mel_spectrogram: n_sample = 176000, n_len = 1100 +log_mel_spectrogram: recording length: 11.000000 s + + And so my fellow Americans ask not what your country can do for you. Ask what you can do for your country. + +main: load time = 60.62 ms +main: mel time = 38.69 ms +main: sample time = 2.36 ms +main: encode time = 875.63 ms / 145.94 ms per layer +main: decode time = 103.17 ms +main: total time = 1081.13 ms + +``` + +The command downloads the `base.en` model converted to custom `ggml` format and runs the inference on all `.wav` samples in the folder `samples`. + +If you want some extra audio samples to play with, simply run: + +``` +make samples +``` + +This will download a few more audio files from Wikipedia and convert them to 16-bit WAV format via `ffmpeg`. + +You can download and run the other `.en` models as follows: + +``` +make tiny.en +make base.en +make small.en +make medium.en +``` + +For detailed usage instructions, run: `./main -h` + +Note that `whisper.cpp` runs only with 16-bit WAV files, so make sure to convert your input before running the tool. +For example, you can use `ffmpeg` like this: + +```bash +ffmpeg -i input.mp3 -ar 16000 -ac 1 -c:a pcm_s16le output.wav +``` + +## Limitations + +- Only `.en` models are supported +- Very basic greedy sampling scheme - always pick up the top token +- No timestamps +- English only +- Inference only +- Runs on the CPU +- Only mono-channel 16-bit WAV is supported + +## Memory usage + +| Model | Mem | +| --- | --- | +| tiny.en | ~600 MB | +| base.en | ~800 MB | +| small.en | ~1.6 GB | +| medium.en | ~3.5 GB | + +## ggml format + +The original models are converted to a custom binary format. This allows to pack everything needed into a single file: + +- model parameters +- mel filters +- vocabulary +- weights + +For more details, see the conversion script [convert-pt-to-ggml.py](convert-pt-to-ggml.py)