diff --git a/README.md b/README.md
index faa166d0..079c73b7 100644
--- a/README.md
+++ b/README.md
@@ -53,18 +53,6 @@ On Apple Silicon, the inference runs fully on the GPU via Metal:
https://github.com/ggerganov/whisper.cpp/assets/1991296/c82e8f86-60dc-49f2-b048-d2fdbd6b5225
-Or you can even run it straight in the browser: [talk.wasm](examples/talk.wasm)
-
-## Implementation details
-
-- The core tensor operations are implemented in C ([ggml.h](ggml/include/ggml.h) / [ggml.c](ggml/src/ggml.c))
-- The transformer model and the high-level C-style API are implemented in C++ ([whisper.h](include/whisper.h) / [whisper.cpp](src/whisper.cpp))
-- Sample usage is demonstrated in [main.cpp](examples/main)
-- Sample real-time audio transcription from the microphone is demonstrated in [stream.cpp](examples/stream)
-- Various other examples are available in the [examples](examples) folder
-
-The tensor operators are optimized heavily for Apple silicon CPUs. Depending on the computation size, Arm Neon SIMD intrinsics or CBLAS Accelerate framework routines are used. The latter are especially effective for bigger sizes since the Accelerate framework utilizes the special-purpose AMX coprocessor available in modern Apple products.
-
## Quick start
First clone the repository:
@@ -85,135 +73,26 @@ Then, download one of the Whisper [models](models/README.md) converted in [`ggml
sh ./models/download-ggml-model.sh base.en
```
-Now build the [main](examples/main) example and transcribe an audio file like this:
+Now build the [whisper-cli](examples/cli) example and transcribe an audio file like this:
```bash
-# build the main example
+# build the project
cmake -B build
cmake --build build --config Release
# transcribe an audio file
-./build/bin/main -f samples/jfk.wav
+./build/bin/whisper-cli -f samples/jfk.wav
```
---
-For a quick demo, simply run `make base.en`:
-
-```text
-$ make -j base.en
-
-cc -I. -O3 -std=c11 -pthread -DGGML_USE_ACCELERATE -c ggml.c -o ggml.o
-c++ -I. -I./examples -O3 -std=c++11 -pthread -c whisper.cpp -o whisper.o
-c++ -I. -I./examples -O3 -std=c++11 -pthread examples/main/main.cpp whisper.o ggml.o -o main -framework Accelerate
-./main -h
-
-usage: ./main [options] file0.wav file1.wav ...
-
-options:
- -h, --help [default] show this help message and exit
- -t N, --threads N [4 ] number of threads to use during computation
- -p N, --processors N [1 ] number of processors to use during computation
- -ot N, --offset-t N [0 ] time offset in milliseconds
- -on N, --offset-n N [0 ] segment index offset
- -d N, --duration N [0 ] duration of audio to process in milliseconds
- -mc N, --max-context N [-1 ] maximum number of text context tokens to store
- -ml N, --max-len N [0 ] maximum segment length in characters
- -sow, --split-on-word [false ] split on word rather than on token
- -bo N, --best-of N [5 ] number of best candidates to keep
- -bs N, --beam-size N [5 ] beam size for beam search
- -wt N, --word-thold N [0.01 ] word timestamp probability threshold
- -et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail
- -lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail
- -debug, --debug-mode [false ] enable debug mode (eg. dump log_mel)
- -tr, --translate [false ] translate from source language to english
- -di, --diarize [false ] stereo audio diarization
- -tdrz, --tinydiarize [false ] enable tinydiarize (requires a tdrz model)
- -nf, --no-fallback [false ] do not use temperature fallback while decoding
- -otxt, --output-txt [false ] output result in a text file
- -ovtt, --output-vtt [false ] output result in a vtt file
- -osrt, --output-srt [false ] output result in a srt file
- -olrc, --output-lrc [false ] output result in a lrc file
- -owts, --output-words [false ] output script for generating karaoke video
- -fp, --font-path [/System/Library/Fonts/Supplemental/Courier New Bold.ttf] path to a monospace font for karaoke video
- -ocsv, --output-csv [false ] output result in a CSV file
- -oj, --output-json [false ] output result in a JSON file
- -ojf, --output-json-full [false ] include more information in the JSON file
- -of FNAME, --output-file FNAME [ ] output file path (without file extension)
- -ps, --print-special [false ] print special tokens
- -pc, --print-colors [false ] print colors
- -pp, --print-progress [false ] print progress
- -nt, --no-timestamps [false ] do not print timestamps
- -l LANG, --language LANG [en ] spoken language ('auto' for auto-detect)
- -dl, --detect-language [false ] exit after automatically detecting language
- --prompt PROMPT [ ] initial prompt
- -m FNAME, --model FNAME [models/ggml-base.en.bin] model path
- -f FNAME, --file FNAME [ ] input WAV file path
- -oved D, --ov-e-device DNAME [CPU ] the OpenVINO device used for encode inference
- -ls, --log-score [false ] log best decoder scores of tokens
- -ng, --no-gpu [false ] disable GPU
-
-
-sh ./models/download-ggml-model.sh base.en
-Downloading ggml model base.en ...
-ggml-base.en.bin 100%[========================>] 141.11M 6.34MB/s in 24s
-Done! Model 'base.en' saved in 'models/ggml-base.en.bin'
-You can now use it like this:
-
- $ ./main -m models/ggml-base.en.bin -f samples/jfk.wav
-
-
-===============================================
-Running base.en on all samples in ./samples ...
-===============================================
-
-----------------------------------------------
-[+] Running base.en on samples/jfk.wav ... (run 'ffplay samples/jfk.wav' to listen)
-----------------------------------------------
-
-whisper_init_from_file: loading model from 'models/ggml-base.en.bin'
-whisper_model_load: loading model
-whisper_model_load: n_vocab = 51864
-whisper_model_load: n_audio_ctx = 1500
-whisper_model_load: n_audio_state = 512
-whisper_model_load: n_audio_head = 8
-whisper_model_load: n_audio_layer = 6
-whisper_model_load: n_text_ctx = 448
-whisper_model_load: n_text_state = 512
-whisper_model_load: n_text_head = 8
-whisper_model_load: n_text_layer = 6
-whisper_model_load: n_mels = 80
-whisper_model_load: f16 = 1
-whisper_model_load: type = 2
-whisper_model_load: mem required = 215.00 MB (+ 6.00 MB per decoder)
-whisper_model_load: kv self size = 5.25 MB
-whisper_model_load: kv cross size = 17.58 MB
-whisper_model_load: adding 1607 extra tokens
-whisper_model_load: model ctx = 140.60 MB
-whisper_model_load: model size = 140.54 MB
-
-system_info: n_threads = 4 / 10 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 |
-
-main: processing 'samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ...
-
-
-[00:00:00.000 --> 00:00:11.000] And so my fellow Americans, ask not what your country can do for you, ask what you can do for your country.
-
-
-whisper_print_timings: fallbacks = 0 p / 0 h
-whisper_print_timings: load time = 113.81 ms
-whisper_print_timings: mel time = 15.40 ms
-whisper_print_timings: sample time = 11.58 ms / 27 runs ( 0.43 ms per run)
-whisper_print_timings: encode time = 266.60 ms / 1 runs ( 266.60 ms per run)
-whisper_print_timings: decode time = 66.11 ms / 27 runs ( 2.45 ms per run)
-whisper_print_timings: total time = 476.31 ms
-```
+For a quick demo, simply run `make base.en`.
The command downloads the `base.en` model converted to custom `ggml` format and runs the inference on all `.wav` samples in the folder `samples`.
-For detailed usage instructions, run: `./main -h`
+For detailed usage instructions, run: `./build/bin/whisper-cli -h`
-Note that the [main](examples/main) example currently runs only with 16-bit WAV files, so make sure to convert your input before running the tool.
+Note that the [whisper-cli](examples/cli) example currently runs only with 16-bit WAV files, so make sure to convert your input before running the tool.
For example, you can use `ffmpeg` like this:
```bash
@@ -271,7 +150,7 @@ cmake --build build --config Release
./build/bin/quantize models/ggml-base.en.bin models/ggml-base.en-q5_0.bin q5_0
# run the examples as usual, specifying the quantized model file
-./build/bin/main -m models/ggml-base.en-q5_0.bin ./samples/gb0.wav
+./build/bin/whisper-cli -m models/ggml-base.en-q5_0.bin ./samples/gb0.wav
```
## Core ML support
@@ -313,7 +192,7 @@ speed-up - more than x3 faster compared with CPU-only execution. Here are the in
- Run the examples as usual. For example:
```text
- $ ./main -m models/ggml-base.en.bin -f samples/jfk.wav
+ $ ./build/bin/whisper-cli -m models/ggml-base.en.bin -f samples/jfk.wav
...
@@ -397,7 +276,7 @@ This can result in significant speedup in encoder performance. Here are the inst
- Run the examples as usual. For example:
```text
- $ ./main -m models/ggml-base.en.bin -f samples/jfk.wav
+ $ ./build/bin/whisper-cli -m models/ggml-base.en.bin -f samples/jfk.wav
...
@@ -473,7 +352,7 @@ cmake --build build -j --config Release
Run the inference examples as usual, for example:
```
-./build/bin/main -f samples/jfk.wav -m models/ggml-base.en.bin -t 8
+./build/bin/whisper-cli -f samples/jfk.wav -m models/ggml-base.en.bin -t 8
```
*Notes:*
@@ -481,38 +360,6 @@ Run the inference examples as usual, for example:
- If you have trouble with Ascend NPU device, please create a issue with **[CANN]** prefix/tag.
- If you run successfully with your Ascend NPU device, please help update the table `Verified devices`.
-## Docker
-
-### Prerequisites
-
-- Docker must be installed and running on your system.
-- Create a folder to store big models & intermediate files (ex. /whisper/models)
-
-### Images
-
-We have two Docker images available for this project:
-
-1. `ghcr.io/ggerganov/whisper.cpp:main`: This image includes the main executable file as well as `curl` and `ffmpeg`. (platforms: `linux/amd64`, `linux/arm64`)
-2. `ghcr.io/ggerganov/whisper.cpp:main-cuda`: Same as `main` but compiled with CUDA support. (platforms: `linux/amd64`)
-
-### Usage
-
-```shell
-# download model and persist it in a local folder
-docker run -it --rm \
- -v path/to/models:/models \
- whisper.cpp:main "./models/download-ggml-model.sh base /models"
-# transcribe an audio file
-docker run -it --rm \
- -v path/to/models:/models \
- -v path/to/audios:/audios \
- whisper.cpp:main "./main -m /models/ggml-base.bin -f /audios/jfk.wav"
-# transcribe an audio file in samples folder
-docker run -it --rm \
- -v path/to/models:/models \
- whisper.cpp:main "./main -m /models/ggml-base.bin -f ./samples/jfk.wav"
-```
-
## Installing with Conan
You can install pre-built binaries for whisper.cpp or build it from source using [Conan](https://conan.io/). Use the following command:
@@ -527,89 +374,6 @@ For detailed instructions on how to use Conan, please refer to the [Conan docume
- Inference only
-## Another example
-
-Here is another example of transcribing a [3:24 min speech](https://upload.wikimedia.org/wikipedia/commons/1/1f/George_W_Bush_Columbia_FINAL.ogg)
-in about half a minute on a MacBook M1 Pro, using `medium.en` model:
-
-
- Expand to see the result
-
-```text
-$ ./main -m models/ggml-medium.en.bin -f samples/gb1.wav -t 8
-
-whisper_init_from_file: loading model from 'models/ggml-medium.en.bin'
-whisper_model_load: loading model
-whisper_model_load: n_vocab = 51864
-whisper_model_load: n_audio_ctx = 1500
-whisper_model_load: n_audio_state = 1024
-whisper_model_load: n_audio_head = 16
-whisper_model_load: n_audio_layer = 24
-whisper_model_load: n_text_ctx = 448
-whisper_model_load: n_text_state = 1024
-whisper_model_load: n_text_head = 16
-whisper_model_load: n_text_layer = 24
-whisper_model_load: n_mels = 80
-whisper_model_load: f16 = 1
-whisper_model_load: type = 4
-whisper_model_load: mem required = 1720.00 MB (+ 43.00 MB per decoder)
-whisper_model_load: kv self size = 42.00 MB
-whisper_model_load: kv cross size = 140.62 MB
-whisper_model_load: adding 1607 extra tokens
-whisper_model_load: model ctx = 1462.35 MB
-whisper_model_load: model size = 1462.12 MB
-
-system_info: n_threads = 8 / 10 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 |
-
-main: processing 'samples/gb1.wav' (3179750 samples, 198.7 sec), 8 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ...
-
-
-[00:00:00.000 --> 00:00:08.000] My fellow Americans, this day has brought terrible news and great sadness to our country.
-[00:00:08.000 --> 00:00:17.000] At nine o'clock this morning, Mission Control in Houston lost contact with our Space Shuttle Columbia.
-[00:00:17.000 --> 00:00:23.000] A short time later, debris was seen falling from the skies above Texas.
-[00:00:23.000 --> 00:00:29.000] The Columbia's lost. There are no survivors.
-[00:00:29.000 --> 00:00:32.000] On board was a crew of seven.
-[00:00:32.000 --> 00:00:39.000] Colonel Rick Husband, Lieutenant Colonel Michael Anderson, Commander Laurel Clark,
-[00:00:39.000 --> 00:00:48.000] Captain David Brown, Commander William McCool, Dr. Kultna Shavla, and Ilan Ramon,
-[00:00:48.000 --> 00:00:52.000] a colonel in the Israeli Air Force.
-[00:00:52.000 --> 00:00:58.000] These men and women assumed great risk in the service to all humanity.
-[00:00:58.000 --> 00:01:03.000] In an age when space flight has come to seem almost routine,
-[00:01:03.000 --> 00:01:07.000] it is easy to overlook the dangers of travel by rocket
-[00:01:07.000 --> 00:01:12.000] and the difficulties of navigating the fierce outer atmosphere of the Earth.
-[00:01:12.000 --> 00:01:18.000] These astronauts knew the dangers, and they faced them willingly,
-[00:01:18.000 --> 00:01:23.000] knowing they had a high and noble purpose in life.
-[00:01:23.000 --> 00:01:31.000] Because of their courage and daring and idealism, we will miss them all the more.
-[00:01:31.000 --> 00:01:36.000] All Americans today are thinking as well of the families of these men and women
-[00:01:36.000 --> 00:01:40.000] who have been given this sudden shock and grief.
-[00:01:40.000 --> 00:01:45.000] You're not alone. Our entire nation grieves with you,
-[00:01:45.000 --> 00:01:52.000] and those you love will always have the respect and gratitude of this country.
-[00:01:52.000 --> 00:01:56.000] The cause in which they died will continue.
-[00:01:56.000 --> 00:02:04.000] Mankind is led into the darkness beyond our world by the inspiration of discovery
-[00:02:04.000 --> 00:02:11.000] and the longing to understand. Our journey into space will go on.
-[00:02:11.000 --> 00:02:16.000] In the skies today, we saw destruction and tragedy.
-[00:02:16.000 --> 00:02:22.000] Yet farther than we can see, there is comfort and hope.
-[00:02:22.000 --> 00:02:29.000] In the words of the prophet Isaiah, "Lift your eyes and look to the heavens
-[00:02:29.000 --> 00:02:35.000] who created all these. He who brings out the starry hosts one by one
-[00:02:35.000 --> 00:02:39.000] and calls them each by name."
-[00:02:39.000 --> 00:02:46.000] Because of His great power and mighty strength, not one of them is missing.
-[00:02:46.000 --> 00:02:55.000] The same Creator who names the stars also knows the names of the seven souls we mourn today.
-[00:02:55.000 --> 00:03:01.000] The crew of the shuttle Columbia did not return safely to earth,
-[00:03:01.000 --> 00:03:05.000] yet we can pray that all are safely home.
-[00:03:05.000 --> 00:03:13.000] May God bless the grieving families, and may God continue to bless America.
-[00:03:13.000 --> 00:03:19.000] [Silence]
-
-
-whisper_print_timings: fallbacks = 1 p / 0 h
-whisper_print_timings: load time = 569.03 ms
-whisper_print_timings: mel time = 146.85 ms
-whisper_print_timings: sample time = 238.66 ms / 553 runs ( 0.43 ms per run)
-whisper_print_timings: encode time = 18665.10 ms / 9 runs ( 2073.90 ms per run)
-whisper_print_timings: decode time = 13090.93 ms / 549 runs ( 23.85 ms per run)
-whisper_print_timings: total time = 32733.52 ms
-```
-
-
-
## Real-time audio input example
This is a naive example of performing real-time inference on audio from your microphone.
@@ -630,7 +394,7 @@ Adding the `--print-colors` argument will print the transcribed text using an ex
to highlight words with high or low confidence:
```bash
-./main -m models/ggml-base.en.bin -f samples/gb0.wav --print-colors
+./build/bin/whisper-cli -m models/ggml-base.en.bin -f samples/gb0.wav --print-colors
```
@@ -640,7 +404,7 @@ to highlight words with high or low confidence:
For example, to limit the line length to a maximum of 16 characters, simply add `-ml 16`:
```text
-$ ./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 16
+$ ./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 16
whisper_model_load: loading model from './models/ggml-base.en.bin'
...
@@ -664,7 +428,7 @@ main: processing './samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 pr
The `--max-len` argument can be used to obtain word-level timestamps. Simply use `-ml 1`:
```text
-$ ./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 1
+$ ./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 1
whisper_model_load: loading model from './models/ggml-base.en.bin'
...
@@ -711,7 +475,7 @@ Sample usage:
./models/download-ggml-model.sh small.en-tdrz
# run as usual, adding the "-tdrz" command-line argument
-./main -f ./samples/a13.wav -m ./models/ggml-small.en-tdrz.bin -tdrz
+./build/bin/whisper-cli -f ./samples/a13.wav -m ./models/ggml-small.en-tdrz.bin -tdrz
...
main: processing './samples/a13.wav' (480000 samples, 30.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, tdrz = 1, timestamps = 1 ...
...
@@ -728,14 +492,14 @@ main: processing './samples/a13.wav' (480000 samples, 30.0 sec), 4 threads, 1 pr
## Karaoke-style movie generation (experimental)
-The [main](examples/main) example provides support for output of karaoke-style movies, where the
+The [whisper-cli](examples/cli) example provides support for output of karaoke-style movies, where the
currently pronounced word is highlighted. Use the `-wts` argument and run the generated bash script.
This requires to have `ffmpeg` installed.
Here are a few _"typical"_ examples:
```bash
-./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -owts
+./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -owts
source ./samples/jfk.wav.wts
ffplay ./samples/jfk.wav.mp4
```
@@ -745,7 +509,7 @@ https://user-images.githubusercontent.com/1991296/199337465-dbee4b5e-9aeb-48a3-b
---
```bash
-./main -m ./models/ggml-base.en.bin -f ./samples/mm0.wav -owts
+./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/mm0.wav -owts
source ./samples/mm0.wav.wts
ffplay ./samples/mm0.wav.mp4
```
@@ -755,7 +519,7 @@ https://user-images.githubusercontent.com/1991296/199337504-cc8fd233-0cb7-4920-9
---
```bash
-./main -m ./models/ggml-base.en.bin -f ./samples/gb0.wav -owts
+./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/gb0.wav -owts
source ./samples/gb0.wav.wts
ffplay ./samples/gb0.wav.mp4
```
@@ -780,7 +544,7 @@ https://user-images.githubusercontent.com/1991296/223206245-2d36d903-cf8e-4f09-8
## Benchmarks
In order to have an objective comparison of the performance of the inference across different system configurations,
-use the [bench](examples/bench) tool. The tool simply runs the Encoder part of the model and prints how much time it
+use the [whisper-bench](examples/bench) tool. The tool simply runs the Encoder part of the model and prints how much time it
took to execute it. The results are summarized in the following Github issue:
[Benchmark results](https://github.com/ggerganov/whisper.cpp/issues/89)
@@ -843,13 +607,12 @@ Some of the examples are even ported to run in the browser using WebAssembly. Ch
| Example | Web | Description |
| --------------------------------------------------- | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
-| [main](examples/main) | [whisper.wasm](examples/whisper.wasm) | Tool for translating and transcribing audio using Whisper |
-| [bench](examples/bench) | [bench.wasm](examples/bench.wasm) | Benchmark the performance of Whisper on your machine |
-| [stream](examples/stream) | [stream.wasm](examples/stream.wasm) | Real-time transcription of raw microphone capture |
-| [command](examples/command) | [command.wasm](examples/command.wasm) | Basic voice assistant example for receiving voice commands from the mic |
-| [wchess](examples/wchess) | [wchess.wasm](examples/wchess) | Voice-controlled chess |
-| [talk](examples/talk) | [talk.wasm](examples/talk.wasm) | Talk with a GPT-2 bot |
-| [talk-llama](examples/talk-llama) | | Talk with a LLaMA bot |
+| [whisper-cli](examples/cli) | [whisper.wasm](examples/whisper.wasm) | Tool for translating and transcribing audio using Whisper |
+| [whisper-bench](examples/bench) | [bench.wasm](examples/bench.wasm) | Benchmark the performance of Whisper on your machine |
+| [whisper-stream](examples/stream) | [stream.wasm](examples/stream.wasm) | Real-time transcription of raw microphone capture |
+| [whisper-command](examples/command) | [command.wasm](examples/command.wasm) | Basic voice assistant example for receiving voice commands from the mic |
+| [whisper-server](examples/server) | | HTTP transcription server with OAI-like API |
+| [whisper-talk-llama](examples/talk-llama) | | Talk with a LLaMA bot |
| [whisper.objc](examples/whisper.objc) | | iOS mobile application using whisper.cpp |
| [whisper.swiftui](examples/whisper.swiftui) | | SwiftUI iOS / macOS application using whisper.cpp |
| [whisper.android](examples/whisper.android) | | Android mobile application using whisper.cpp |
@@ -857,7 +620,7 @@ Some of the examples are even ported to run in the browser using WebAssembly. Ch
| [generate-karaoke.sh](examples/generate-karaoke.sh) | | Helper script to easily [generate a karaoke video](https://youtu.be/uj7hVta4blM) of raw audio capture |
| [livestream.sh](examples/livestream.sh) | | [Livestream audio transcription](https://github.com/ggerganov/whisper.cpp/issues/185) |
| [yt-wsp.sh](examples/yt-wsp.sh) | | Download + transcribe and/or translate any VOD [(original)](https://gist.github.com/DaniruKun/96f763ec1a037cc92fe1a059b643b818) |
-| [server](examples/server) | | HTTP transcription server with OAI-like API |
+| [wchess](examples/wchess) | [wchess.wasm](examples/wchess) | Voice-controlled chess |
## [Discussions](https://github.com/ggerganov/whisper.cpp/discussions)
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index addfa1d4..3e03c95e 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -97,52 +97,33 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR})
if (EMSCRIPTEN)
add_subdirectory(whisper.wasm)
- set_target_properties(libmain PROPERTIES FOLDER "libs")
add_subdirectory(stream.wasm)
- set_target_properties(libstream PROPERTIES FOLDER "libs")
add_subdirectory(command.wasm)
- set_target_properties(libcommand PROPERTIES FOLDER "libs")
- #add_subdirectory(talk.wasm)
- #set_target_properties(libtalk PROPERTIES FOLDER "libs")
add_subdirectory(bench.wasm)
- set_target_properties(libbench PROPERTIES FOLDER "libs")
elseif(CMAKE_JS_VERSION)
add_subdirectory(addon.node)
- set_target_properties(addon.node PROPERTIES FOLDER "examples")
else()
- add_subdirectory(main)
- set_target_properties(main PROPERTIES FOLDER "examples")
+ add_subdirectory(cli)
if (WHISPER_SDL2)
add_subdirectory(stream)
- set_target_properties(stream PROPERTIES FOLDER "examples")
endif (WHISPER_SDL2)
add_subdirectory(server)
- set_target_properties(server PROPERTIES FOLDER "examples")
if (WHISPER_SDL2)
add_subdirectory(command)
- set_target_properties(command PROPERTIES FOLDER "examples")
endif (WHISPER_SDL2)
add_subdirectory(bench)
- set_target_properties(bench PROPERTIES FOLDER "examples")
add_subdirectory(quantize)
- set_target_properties(quantize PROPERTIES FOLDER "examples")
if (WHISPER_SDL2)
- # TODO: disabled until update
- # https://github.com/ggerganov/whisper.cpp/issues/1818
- #add_subdirectory(talk)
- #set_target_properties(talk PROPERTIES FOLDER "examples")
add_subdirectory(talk-llama)
- set_target_properties(talk-llama PROPERTIES FOLDER "examples")
add_subdirectory(lsp)
- set_target_properties(lsp PROPERTIES FOLDER "examples")
if (GGML_SYCL)
add_subdirectory(sycl)
- set_target_properties(ls-sycl-device PROPERTIES FOLDER "examples")
endif()
endif (WHISPER_SDL2)
endif()
if (WHISPER_SDL2)
add_subdirectory(wchess)
- set_target_properties(wchess PROPERTIES FOLDER "examples")
endif (WHISPER_SDL2)
+
+add_subdirectory(deprecation-warning)
diff --git a/examples/bench/CMakeLists.txt b/examples/bench/CMakeLists.txt
index f8a72ffd..f255f2dc 100644
--- a/examples/bench/CMakeLists.txt
+++ b/examples/bench/CMakeLists.txt
@@ -1,6 +1,8 @@
-set(TARGET bench)
+set(TARGET whisper-bench)
add_executable(${TARGET} bench.cpp)
include(DefaultTargetOptions)
target_link_libraries(${TARGET} PRIVATE whisper ${CMAKE_THREAD_LIBS_INIT})
+
+install(TARGETS ${TARGET} RUNTIME)
diff --git a/examples/bench/README.md b/examples/bench/README.md
index 5b42cb4d..cf58665a 100644
--- a/examples/bench/README.md
+++ b/examples/bench/README.md
@@ -1,4 +1,4 @@
-# bench
+# whisper.cpp/examples/bench
A very basic tool for benchmarking the inference performance on your device. The tool simply runs the Encoder part of
the transformer on some random audio data and records the execution time. This way we can have an objective comparison
@@ -7,11 +7,8 @@ of the performance of the model for various setups.
Benchmark results are tracked in the following Github issue: https://github.com/ggerganov/whisper.cpp/issues/89
```bash
-# build the bench tool
-$ make bench
-
-# run it on the small.en model using 4 threads
-$ ./bench -m ./models/ggml-small.en.bin -t 4
+# run the bench too on the small.en model using 4 threads
+$ ./build/bin/whisper-bench -m ./models/ggml-small.en.bin -t 4
whisper_model_load: loading model from './models/ggml-small.en.bin'
whisper_model_load: n_vocab = 51864
diff --git a/examples/main/CMakeLists.txt b/examples/cli/CMakeLists.txt
similarity index 58%
rename from examples/main/CMakeLists.txt
rename to examples/cli/CMakeLists.txt
index 1e66e4b5..3a73776c 100644
--- a/examples/main/CMakeLists.txt
+++ b/examples/cli/CMakeLists.txt
@@ -1,6 +1,8 @@
-set(TARGET main)
-add_executable(${TARGET} main.cpp)
+set(TARGET whisper-cli)
+add_executable(${TARGET} cli.cpp)
include(DefaultTargetOptions)
target_link_libraries(${TARGET} PRIVATE common whisper ${FFMPEG_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
+
+install(TARGETS ${TARGET} RUNTIME)
diff --git a/examples/main/README.md b/examples/cli/README.md
similarity index 75%
rename from examples/main/README.md
rename to examples/cli/README.md
index 2d868810..1847134e 100644
--- a/examples/main/README.md
+++ b/examples/cli/README.md
@@ -1,12 +1,12 @@
-# main
+# whisper.cpp/examples/cli
This is the main example demonstrating most of the functionality of the Whisper model.
It can be used as a reference for using the `whisper.cpp` library in other projects.
```
-./main -h
+./build/bin/whisper-cli -h
-usage: ./main [options] file0.wav file1.wav ...
+usage: ./build-pkg/bin/whisper-cli [options] file0.wav file1.wav ...
options:
-h, --help [default] show this help message and exit
@@ -20,9 +20,12 @@ options:
-sow, --split-on-word [false ] split on word rather than on token
-bo N, --best-of N [5 ] number of best candidates to keep
-bs N, --beam-size N [5 ] beam size for beam search
+ -ac N, --audio-ctx N [0 ] audio context size (0 - all)
-wt N, --word-thold N [0.01 ] word timestamp probability threshold
-et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail
-lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail
+ -tp, --temperature N [0.00 ] The sampling temperature, between 0 and 1
+ -tpi, --temperature-inc N [0.20 ] The increment of temperature, between 0 and 1
-debug, --debug-mode [false ] enable debug mode (eg. dump log_mel)
-tr, --translate [false ] translate from source language to english
-di, --diarize [false ] stereo audio diarization
@@ -38,16 +41,23 @@ options:
-oj, --output-json [false ] output result in a JSON file
-ojf, --output-json-full [false ] include more information in the JSON file
-of FNAME, --output-file FNAME [ ] output file path (without file extension)
+ -np, --no-prints [false ] do not print anything other than the results
-ps, --print-special [false ] print special tokens
-pc, --print-colors [false ] print colors
-pp, --print-progress [false ] print progress
-nt, --no-timestamps [false ] do not print timestamps
-l LANG, --language LANG [en ] spoken language ('auto' for auto-detect)
-dl, --detect-language [false ] exit after automatically detecting language
- --prompt PROMPT [ ] initial prompt
+ --prompt PROMPT [ ] initial prompt (max n_text_ctx/2 tokens)
-m FNAME, --model FNAME [models/ggml-base.en.bin] model path
-f FNAME, --file FNAME [ ] input WAV file path
-oved D, --ov-e-device DNAME [CPU ] the OpenVINO device used for encode inference
+ -dtw MODEL --dtw MODEL [ ] compute token-level timestamps
-ls, --log-score [false ] log best decoder scores of tokens
-ng, --no-gpu [false ] disable GPU
+ -fa, --flash-attn [false ] flash attention
+ --suppress-regex REGEX [ ] regular expression matching tokens to suppress
+ --grammar GRAMMAR [ ] GBNF grammar to guide decoding
+ --grammar-rule RULE [ ] top-level GBNF grammar rule name
+ --grammar-penalty N [100.0 ] scales down logits of nongrammar tokens
```
diff --git a/examples/main/main.cpp b/examples/cli/cli.cpp
similarity index 100%
rename from examples/main/main.cpp
rename to examples/cli/cli.cpp
diff --git a/examples/command/CMakeLists.txt b/examples/command/CMakeLists.txt
index 40f278c1..c929a6f5 100644
--- a/examples/command/CMakeLists.txt
+++ b/examples/command/CMakeLists.txt
@@ -1,9 +1,10 @@
if (WHISPER_SDL2)
- # command
- set(TARGET command)
+ set(TARGET whisper-command)
add_executable(${TARGET} command.cpp)
include(DefaultTargetOptions)
target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${CMAKE_THREAD_LIBS_INIT})
+
+ install(TARGETS ${TARGET} RUNTIME)
endif ()
diff --git a/examples/command/README.md b/examples/command/README.md
index 46b14e93..7eb2bb60 100644
--- a/examples/command/README.md
+++ b/examples/command/README.md
@@ -1,14 +1,14 @@
-# command
+# whisper.cpp/examples/command
This is a basic Voice Assistant example that accepts voice commands from the microphone.
More info is available in [issue #171](https://github.com/ggerganov/whisper.cpp/issues/171).
```bash
# Run with default arguments and small model
-./command -m ./models/ggml-small.en.bin -t 8
+./whisper-command -m ./models/ggml-small.en.bin -t 8
# On Raspberry Pi, use tiny or base models + "-ac 768" for better performance
-./command -m ./models/ggml-tiny.en.bin -ac 768 -t 3 -c 0
+./whisper-command -m ./models/ggml-tiny.en.bin -ac 768 -t 3 -c 0
```
https://user-images.githubusercontent.com/1991296/204038393-2f846eae-c255-4099-a76d-5735c25c49da.mp4
@@ -23,10 +23,10 @@ Initial tests show that this approach might be extremely efficient in terms of p
```bash
# Run in guided mode, the list of allowed commands is in commands.txt
-./command -m ./models/ggml-base.en.bin -cmd ./examples/command/commands.txt
+./whisper-command -m ./models/ggml-base.en.bin -cmd ./examples/command/commands.txt
# On Raspberry Pi, in guided mode you can use "-ac 128" for extra performance
-./command -m ./models/ggml-tiny.en.bin -cmd ./examples/command/commands.txt -ac 128 -t 3 -c 0
+./whisper-command -m ./models/ggml-tiny.en.bin -cmd ./examples/command/commands.txt -ac 128 -t 3 -c 0
```
https://user-images.githubusercontent.com/1991296/207435352-8fc4ed3f-bde5-4555-9b8b-aeeb76bee969.mp4
@@ -34,7 +34,7 @@ https://user-images.githubusercontent.com/1991296/207435352-8fc4ed3f-bde5-4555-9
## Building
-The `command` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
+The `whisper-command` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
```bash
# Install SDL2
@@ -47,5 +47,6 @@ sudo dnf install SDL2 SDL2-devel
# Install SDL2 on Mac OS
brew install sdl2
-make command
+cmake -B build -DWHISPER_SDL2=ON
+cmake --build build --config Release
```
diff --git a/examples/deprecation-warning/CMakeLists.txt b/examples/deprecation-warning/CMakeLists.txt
new file mode 100644
index 00000000..f845e6cc
--- /dev/null
+++ b/examples/deprecation-warning/CMakeLists.txt
@@ -0,0 +1,4 @@
+add_executable(main ./deprecation-warning.cpp)
+add_executable(bench ./deprecation-warning.cpp)
+add_executable(stream ./deprecation-warning.cpp)
+add_executable(command ./deprecation-warning.cpp)
diff --git a/examples/deprecation-warning/README.md b/examples/deprecation-warning/README.md
new file mode 100644
index 00000000..e07e134b
--- /dev/null
+++ b/examples/deprecation-warning/README.md
@@ -0,0 +1,17 @@
+# Migration notice for binary filenames
+
+> [!IMPORTANT]
+[2024 Dec 20] Binaries have been renamed w/ a `whisper-` prefix. `main` is now `whisper-cli`, `server` is `whisper-server`, etc (https://github.com/ggerganov/whisper.cpp/pull/2648)
+
+This migration was important, but it is a breaking change that may not always be immediately obvious to users.
+
+Please update all scripts and workflows to use the new binary names.
+
+| Old Filename | New Filename |
+| ---- | ---- |
+| main | whisper-cli |
+| bench | whisper-bench |
+| stream | whisper-stream |
+| command | whisper-command |
+| server | whisper-server |
+| talk-llama | whisper-talk-llama |
diff --git a/examples/deprecation-warning/deprecation-warning.cpp b/examples/deprecation-warning/deprecation-warning.cpp
new file mode 100644
index 00000000..7247f0e0
--- /dev/null
+++ b/examples/deprecation-warning/deprecation-warning.cpp
@@ -0,0 +1,34 @@
+// Warns users that this filename was deprecated, and provides a link for more information.
+
+#include
+#include
+
+// Main
+int main(int argc, char** argv) {
+ std::string filename = "main";
+ if (argc >= 1) {
+ filename = argv[0];
+ }
+
+ // Get only the program name from the full path
+ size_t pos = filename.find_last_of("/\\");
+ if (pos != std::string::npos) {
+ filename = filename.substr(pos+1);
+ }
+
+ // Append "whisper-" to the beginning of filename to get the replacemnt filename
+ std::string replacement_filename = "whisper-" + filename;
+
+ // The exception is if the filename is "main", then our replacement filename is "whisper-cli"
+ if (filename == "main") {
+ replacement_filename = "whisper-cli";
+ }
+
+ fprintf(stdout, "\n");
+ fprintf(stdout, "WARNING: The binary '%s' is deprecated.\n", filename.c_str());
+ fprintf(stdout, " Please use '%s' instead.\n", replacement_filename.c_str());
+ fprintf(stdout, " See https://github.com/ggerganov/whisper.cpp/tree/master/examples/deprecation-warning/README.md for more information.\n");
+ fprintf(stdout, "\n");
+
+ return EXIT_FAILURE;
+}
diff --git a/examples/generate-karaoke.sh b/examples/generate-karaoke.sh
index 7062c9a0..6c32970c 100755
--- a/examples/generate-karaoke.sh
+++ b/examples/generate-karaoke.sh
@@ -11,7 +11,7 @@
# Press Ctrl+C to stop recording
#
-executable="./main"
+executable="./build/bin/whisper-cli"
model="base.en"
model_path="models/ggml-$model.bin"
@@ -46,7 +46,7 @@ ffmpeg -y -i ./rec.wav -ar 16000 -ac 1 -c:a pcm_s16le ./rec16.wav > /dev/null 2>
# run Whisper
echo "Processing ..."
-./main -m models/ggml-base.en.bin rec16.wav -owts > /dev/null 2>&1
+${executable} -m models/ggml-base.en.bin rec16.wav -owts > /dev/null 2>&1
# generate Karaoke video
echo "Generating video ..."
diff --git a/examples/livestream.sh b/examples/livestream.sh
index 7c044199..14f6cf80 100755
--- a/examples/livestream.sh
+++ b/examples/livestream.sh
@@ -14,7 +14,7 @@ model="base.en"
check_requirements()
{
- if ! command -v ./main &>/dev/null; then
+ if ! command -v ./build/bin/whisper-cli &>/dev/null; then
echo "whisper.cpp main executable is required (make)"
exit 1
fi
@@ -100,7 +100,7 @@ while [ $running -eq 1 ]; do
err=$(cat /tmp/whisper-live.err | wc -l)
done
- ./main -t 8 -m ./models/ggml-${model}.bin -f /tmp/whisper-live.wav --no-timestamps -otxt 2> /tmp/whispererr | tail -n 1
+ ./build/bin/whisper-cli -t 8 -m ./models/ggml-${model}.bin -f /tmp/whisper-live.wav --no-timestamps -otxt 2> /tmp/whispererr | tail -n 1
while [ $SECONDS -lt $((($i+1)*$step_s)) ]; do
sleep 1
@@ -109,4 +109,4 @@ while [ $running -eq 1 ]; do
done
killall -v ffmpeg
-killall -v main
+killall -v whisper-cli
diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt
index 96dd97f7..4667c307 100644
--- a/examples/server/CMakeLists.txt
+++ b/examples/server/CMakeLists.txt
@@ -1,4 +1,4 @@
-set(TARGET server)
+set(TARGET whisper-server)
add_executable(${TARGET} server.cpp httplib.h)
include(DefaultTargetOptions)
@@ -8,3 +8,5 @@ target_link_libraries(${TARGET} PRIVATE common json_cpp whisper ${CMAKE_THREAD_L
if (WIN32)
target_link_libraries(${TARGET} PRIVATE ws2_32)
endif()
+
+install(TARGETS ${TARGET} RUNTIME)
diff --git a/examples/server/README.md b/examples/server/README.md
index 596fd769..4f2e4036 100644
--- a/examples/server/README.md
+++ b/examples/server/README.md
@@ -1,4 +1,4 @@
-# whisper.cpp http server
+# whisper.cpp/examples/server
Simple http server. WAV Files are passed to the inference model via http requests.
@@ -7,9 +7,9 @@ https://github.com/ggerganov/whisper.cpp/assets/1991296/e983ee53-8741-4eb5-9048-
## Usage
```
-./server -h
+./build/bin/whisper-server -h
-usage: ./bin/server [options]
+usage: ./build/bin/whisper-server [options]
options:
-h, --help [default] show this help message and exit
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index e7d7c8ae..799eabca 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -677,7 +677,17 @@ int main(int argc, char ** argv) {
if (sparams.ffmpeg_converter) {
// if file is not wav, convert to wav
// write to temporary file
- const std::string temp_filename_base = std::tmpnam(nullptr);
+ //const std::string temp_filename_base = std::tmpnam(nullptr); // note: this is unsafe
+ const std::string temp_filename_base = []() {
+ char temp_filename_template[] = "/tmp/tempfileXXXXXX";
+ int fd = mkstemp(temp_filename_template);
+ if (fd == -1) {
+ perror("mkstemp");
+ return std::string();
+ }
+ close(fd);
+ return std::string(temp_filename_template);
+ }();
const std::string temp_filename = temp_filename_base + ".wav";
std::ofstream temp_file{temp_filename, std::ios::binary};
temp_file << audio_file.content;
@@ -711,7 +721,6 @@ int main(int argc, char ** argv) {
}
}
-
printf("Successfully loaded %s\n", filename.c_str());
// print system information
diff --git a/examples/stream/CMakeLists.txt b/examples/stream/CMakeLists.txt
index 312d52c6..a6104839 100644
--- a/examples/stream/CMakeLists.txt
+++ b/examples/stream/CMakeLists.txt
@@ -1,9 +1,10 @@
if (WHISPER_SDL2)
- # stream
- set(TARGET stream)
+ set(TARGET whisper-stream)
add_executable(${TARGET} stream.cpp)
include(DefaultTargetOptions)
target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${CMAKE_THREAD_LIBS_INIT})
+
+ install(TARGETS ${TARGET} RUNTIME)
endif ()
diff --git a/examples/stream/README.md b/examples/stream/README.md
index a8bdf71d..f07cfb89 100644
--- a/examples/stream/README.md
+++ b/examples/stream/README.md
@@ -1,11 +1,11 @@
-# stream
+# whisper.cpp/examples/stream
This is a naive example of performing real-time inference on audio from your microphone.
-The `stream` tool samples the audio every half a second and runs the transcription continously.
+The `whisper-stream` tool samples the audio every half a second and runs the transcription continously.
More info is available in [issue #10](https://github.com/ggerganov/whisper.cpp/issues/10).
```bash
-./build/bin/stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000
+./build/bin/whisper-stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000
```
https://user-images.githubusercontent.com/1991296/194935793-76afede7-cfa8-48d8-a80f-28ba83be7d09.mp4
@@ -15,7 +15,7 @@ https://user-images.githubusercontent.com/1991296/194935793-76afede7-cfa8-48d8-a
Setting the `--step` argument to `0` enables the sliding window mode:
```bash
- ./build/bin/stream -m ./models/ggml-base.en.bin -t 6 --step 0 --length 30000 -vth 0.6
+ ./build/bin/whisper-stream -m ./models/ggml-base.en.bin -t 6 --step 0 --length 30000 -vth 0.6
```
In this mode, the tool will transcribe only after some speech activity is detected. A very
@@ -27,7 +27,7 @@ a transcription block that is suitable for parsing.
## Building
-The `stream` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
+The `whisper-stream` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
```bash
# Install SDL2
@@ -43,7 +43,7 @@ brew install sdl2
cmake -B build -DWHISPER_SDL2=ON
cmake --build build --config Release
-./build/bin/stream
+./build/bin/whisper-stream
```
## Web version
diff --git a/examples/talk-llama/CMakeLists.txt b/examples/talk-llama/CMakeLists.txt
index 56b4d0d7..ce51660c 100644
--- a/examples/talk-llama/CMakeLists.txt
+++ b/examples/talk-llama/CMakeLists.txt
@@ -1,6 +1,5 @@
if (WHISPER_SDL2)
- # talk-llama
- set(TARGET talk-llama)
+ set(TARGET whisper-talk-llama)
add_executable(${TARGET} talk-llama.cpp
llama.cpp
llama-vocab.cpp
diff --git a/examples/talk-llama/README.md b/examples/talk-llama/README.md
index f8f55440..7f9fa6df 100644
--- a/examples/talk-llama/README.md
+++ b/examples/talk-llama/README.md
@@ -1,4 +1,4 @@
-# talk-llama
+# whisper.cpp/examples/talk-llama
Talk with an LLaMA AI in your terminal
@@ -12,7 +12,7 @@ https://github.com/ggerganov/whisper.cpp/assets/1991296/d97a3788-bf2a-4756-9a43-
## Building
-The `talk-llama` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
+The `whisper-talk-llama` tool depends on SDL2 library to capture audio from the microphone. You can build it like this:
```bash
# Install SDL2
@@ -25,11 +25,12 @@ sudo dnf install SDL2 SDL2-devel
# Install SDL2 on Mac OS
brew install sdl2
-# Build the "talk-llama" executable
-make talk-llama
+# Build the "whisper-talk-llama" executable
+cmake -B build -S . -DWHISPER_SDL2=ON
+cmake --build build --config Release
# Run it
-./talk-llama -mw ./models/ggml-small.en.bin -ml ../llama.cpp/models/llama-13b/ggml-model-q4_0.gguf -p "Georgi" -t 8
+./build/bin/whisper-talk-llama -mw ./models/ggml-small.en.bin -ml ../llama.cpp/models/llama-13b/ggml-model-q4_0.gguf -p "Georgi" -t 8
```
- The `-mw` argument specifies the Whisper model that you would like to use. Recommended `base` or `small` for real-time experience
@@ -37,16 +38,16 @@ make talk-llama
## Session
-The `talk-llama` tool supports session management to enable more coherent and continuous conversations. By maintaining context from previous interactions, it can better understand and respond to user requests in a more natural way.
+The `whisper-talk-llama` tool supports session management to enable more coherent and continuous conversations. By maintaining context from previous interactions, it can better understand and respond to user requests in a more natural way.
-To enable session support, use the `--session FILE` command line option when running the program. The `talk-llama` model state will be saved to the specified file after each interaction. If the file does not exist, it will be created. If the file exists, the model state will be loaded from it, allowing you to resume a previous session.
+To enable session support, use the `--session FILE` command line option when running the program. The `whisper-talk-llama` model state will be saved to the specified file after each interaction. If the file does not exist, it will be created. If the file exists, the model state will be loaded from it, allowing you to resume a previous session.
This feature is especially helpful for maintaining context in long conversations or when interacting with the AI assistant across multiple sessions. It ensures that the assistant remembers the previous interactions and can provide more relevant and contextual responses.
Example usage:
```bash
-./talk-llama --session ./my-session-file -mw ./models/ggml-small.en.bin -ml ../llama.cpp/models/llama-13b/ggml-model-q4_0.gguf -p "Georgi" -t 8
+./build/bin/whisper-talk-llama --session ./my-session-file -mw ./models/ggml-small.en.bin -ml ../llama.cpp/models/llama-13b/ggml-model-q4_0.gguf -p "Georgi" -t 8
```
## TTS
diff --git a/examples/talk.wasm/CMakeLists.txt b/examples/talk.wasm/CMakeLists.txt
deleted file mode 100644
index 8f00eb48..00000000
--- a/examples/talk.wasm/CMakeLists.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-# libtalk
-#
-
-set(TARGET libtalk)
-
-add_executable(${TARGET}
- emscripten.cpp
- gpt-2.cpp
- )
-
-include(DefaultTargetOptions)
-
-target_link_libraries(${TARGET} PRIVATE
- whisper
- common
- )
-
-unset(EXTRA_FLAGS)
-
-if (WHISPER_WASM_SINGLE_FILE)
- set(EXTRA_FLAGS "-s SINGLE_FILE=1")
- message(STATUS "Embedding WASM inside talk.js")
-
- add_custom_command(
- TARGET ${TARGET} POST_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy
- ${CMAKE_BINARY_DIR}/bin/libtalk.js
- ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/talk.wasm/talk.js
- )
-endif()
-
-set_target_properties(${TARGET} PROPERTIES LINK_FLAGS " \
- --bind \
- -s USE_PTHREADS=1 \
- -s PTHREAD_POOL_SIZE=8 \
- -s INITIAL_MEMORY=1800MB \
- -s TOTAL_MEMORY=1800MB \
- -s FORCE_FILESYSTEM=1 \
- -s EXPORTED_RUNTIME_METHODS=\"['print', 'printErr', 'ccall', 'cwrap']\" \
- ${EXTRA_FLAGS} \
- ")
-
-#
-# talk.wasm
-#
-
-set(TARGET talk.wasm)
-
-configure_file(${CMAKE_CURRENT_SOURCE_DIR}/index-tmpl.html ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${TARGET}/index.html @ONLY)
-configure_file(${CMAKE_CURRENT_SOURCE_DIR}/../helpers.js ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${TARGET}/helpers.js @ONLY)
diff --git a/examples/talk.wasm/README.md b/examples/talk.wasm/README.md
deleted file mode 100644
index e656fee7..00000000
--- a/examples/talk.wasm/README.md
+++ /dev/null
@@ -1,74 +0,0 @@
-# talk.wasm
-
-Talk with an Artificial Intelligence in your browser:
-
-[https://user-images.githubusercontent.com/1991296/203411580-fedb4839-05e4-4474-8364-aaf1e9a9b615.mp4](https://user-images.githubusercontent.com/1991296/203845553-f7b44e13-9a15-4fc8-b518-ae8f4c6770fe.mp4)
-
-Online demo: https://whisper.ggerganov.com/talk/
-
-Terminal version: [examples/talk](/examples/talk)
-
-## How it works?
-
-This demo leverages 2 modern neural network models to create a high-quality voice chat directly in your browser:
-
-- [OpenAI's Whisper](https://github.com/openai/whisper) speech recognition model is used to process your voice and understand what you are saying
-- Upon receiving some voice input, the AI generates a text response using [OpenAI's GPT-2](https://github.com/openai/gpt-2) language model
-- The AI then vocalizes the response using the browser's [Web Speech API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Speech_API)
-
-The web page does the processing locally on your machine. The processing of these heavy neural network models in the
-browser is possible by implementing them efficiently in C/C++ and using the browser's WebAssembly SIMD capabilities for
-extra performance:
-
-- The Whisper C++ implementation is here: [whisper.h](/whisper.h) / [whisper.cpp](/whisper.cpp)
-- The GPT-2 C++ implementation is here: [gpt-2.h](gpt-2.h) / [gpt-2.cpp](gpt-2.cpp)
-- Both models use a custom tensor library implemented in C: [ggml.h](/ggml.h) / [ggml.c](/ggml.c)
-- The HTML/JS layer is here: [index-tmpl.html](index-tmpl.html)
-- The Emscripten bridge between C/C++ and JS is here: [emscripten.cpp](emscripten.cpp)
-
-In order to run the models, the web page first needs to download the model data which is about ~350 MB. The model data
-is then cached in your browser's cache and can be reused in future visits without downloading it again.
-
-## Requirements
-
-In order to run this demo efficiently, you need to have the following:
-
-- Latest Chrome or Firefox browser (Safari is not supported)
-- Run this on a desktop or laptop with modern CPU (a mobile phone will likely not be good enough)
-- Speak phrases that are no longer than 10 seconds - this is the audio context of the AI
-- The web-page uses about 1.8GB of RAM
-
-Notice that this demo is using the smallest GPT-2 model, so the generated text responses are not always very good.
-Also, the prompting strategy can likely be improved to achieve better results.
-
-The demo is quite computationally heavy, so you need a fast CPU. It's not usual to run these transformer models in a
-browser. Typically, they run on powerful GPUs.
-
-Currently, mobile browsers do not support the Fixed-width SIMD WebAssembly capability, so you cannot run this demo
-on a phone or a tablet. Hopefully, in the near future this will become supported.
-
-## Todo
-
-- Better UI (contributions are welcome)
-- Better GPT-2 prompting
-
-## Build instructions
-
-```bash
-# build using Emscripten (v3.1.2)
-git clone https://github.com/ggerganov/whisper.cpp
-cd whisper.cpp
-mkdir build-em && cd build-em
-emcmake cmake ..
-make -j
-
-# copy the produced page to your HTTP path
-cp bin/talk.wasm/* /path/to/html/
-cp bin/libtalk.worker.js /path/to/html/
-```
-
-## Feedback
-
-If you have any comments or ideas for improvement, please drop a comment in the following discussion:
-
-https://github.com/ggerganov/whisper.cpp/discussions/167
diff --git a/examples/talk.wasm/emscripten.cpp b/examples/talk.wasm/emscripten.cpp
deleted file mode 100644
index 53cb951e..00000000
--- a/examples/talk.wasm/emscripten.cpp
+++ /dev/null
@@ -1,368 +0,0 @@
-#include "ggml.h"
-#include "gpt-2.h"
-#include "whisper.h"
-
-#include
-#include
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-constexpr int N_THREAD = 8;
-
-struct gpt2_context * g_gpt2;
-std::vector g_contexts(4, nullptr);
-
-std::mutex g_mutex;
-std::thread g_worker;
-std::atomic g_running(false);
-
-bool g_force_speak = false;
-std::string g_text_to_speak = "";
-std::string g_status = "";
-std::string g_status_forced = "";
-
-std::vector g_pcmf32;
-
-void talk_set_status(const std::string & status) {
- std::lock_guard lock(g_mutex);
- g_status = status;
-}
-
-void talk_main(size_t index) {
- talk_set_status("loading data ...");
-
- struct whisper_full_params wparams = whisper_full_default_params(whisper_sampling_strategy::WHISPER_SAMPLING_GREEDY);
-
- wparams.n_threads = std::min(N_THREAD, (int) std::thread::hardware_concurrency());
- wparams.offset_ms = 0;
- wparams.translate = false;
- wparams.no_context = true;
- wparams.single_segment = true;
- wparams.print_realtime = false;
- wparams.print_progress = false;
- wparams.print_timestamps = true;
- wparams.print_special = false;
-
- wparams.max_tokens = 32;
- wparams.audio_ctx = 768; // partial encoder context for better performance
-
- wparams.language = "en";
-
- g_gpt2 = gpt2_init("gpt-2.bin");
-
- printf("talk: using %d threads\n", wparams.n_threads);
-
- std::vector pcmf32;
-
- // whisper context
- auto & ctx = g_contexts[index];
-
- const int64_t step_samples = 2*WHISPER_SAMPLE_RATE;
- const int64_t window_samples = 9*WHISPER_SAMPLE_RATE;
- const int64_t step_ms = (step_samples*1000)/WHISPER_SAMPLE_RATE;
-
- auto t_last = std::chrono::high_resolution_clock::now();
-
- talk_set_status("listening ...");
-
- while (g_running) {
-
- const auto t_now = std::chrono::high_resolution_clock::now();
- if (std::chrono::duration_cast(t_now - t_last).count() < step_ms) {
- {
- std::lock_guard lock(g_mutex);
- g_pcmf32.clear();
- }
- std::this_thread::sleep_for(std::chrono::milliseconds(10));
- continue;
- }
-
- talk_set_status("listening ...");
-
- {
- std::unique_lock lock(g_mutex);
-
- if (g_pcmf32.size() < step_samples) {
- lock.unlock();
-
- std::this_thread::sleep_for(std::chrono::milliseconds(10));
-
- continue;
- }
-
- pcmf32 = std::vector(g_pcmf32.end() - std::min((int64_t) g_pcmf32.size(), window_samples), g_pcmf32.end());
- }
-
- // VAD: if energy in during last second is above threshold, then skip
- {
- float energy_all = 0.0f;
- float energy_1s = 0.0f;
-
- for (size_t i = 0; i < pcmf32.size(); i++) {
- energy_all += fabsf(pcmf32[i]);
-
- if (i >= pcmf32.size() - WHISPER_SAMPLE_RATE) {
- energy_1s += fabsf(pcmf32[i]);
- }
- }
-
- energy_all /= pcmf32.size();
- energy_1s /= WHISPER_SAMPLE_RATE;
-
- if (energy_1s > 0.1f*energy_all && !g_force_speak) {
- std::this_thread::sleep_for(std::chrono::milliseconds(10));
- continue;
- }
- }
-
- talk_set_status("processing audio (whisper)...");
-
- t_last = t_now;
-
- if (!g_force_speak) {
- const auto t_start = std::chrono::high_resolution_clock::now();
-
- int ret = whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size());
- if (ret != 0) {
- printf("whisper_full() failed: %d\n", ret);
- break;
- }
-
- const auto t_end = std::chrono::high_resolution_clock::now();
-
- printf("whisper_full() returned %d in %f seconds\n", ret, std::chrono::duration(t_end - t_start).count());
- }
-
- {
- std::string text_heard;
-
- if (!g_force_speak) {
- const int n_segments = whisper_full_n_segments(ctx);
- for (int i = n_segments - 1; i < n_segments; ++i) {
- const char * text = whisper_full_get_segment_text(ctx, i);
-
- const int64_t t0 = whisper_full_get_segment_t0(ctx, i);
- const int64_t t1 = whisper_full_get_segment_t1(ctx, i);
-
- printf ("[%s --> %s] %s\n", to_timestamp(t0).c_str(), to_timestamp(t1).c_str(), text);
-
- text_heard += text;
- }
- }
-
- g_force_speak = false;
-
- // remove text between brackets using regex
- {
- std::regex re("\\[.*?\\]");
- text_heard = std::regex_replace(text_heard, re, "");
- }
-
- // remove text between brackets using regex
- {
- std::regex re("\\(.*?\\)");
- text_heard = std::regex_replace(text_heard, re, "");
- }
-
- // remove all characters, except for letters, numbers, punctuation and ':', '\'', '-', ' '
- text_heard = std::regex_replace(text_heard, std::regex("[^a-zA-Z0-9\\.,\\?!\\s\\:\\'\\-]"), "");
-
- // take first line
- text_heard = text_heard.substr(0, text_heard.find_first_of("\n"));
-
- // remove leading and trailing whitespace
- text_heard = std::regex_replace(text_heard, std::regex("^\\s+"), "");
- text_heard = std::regex_replace(text_heard, std::regex("\\s+$"), "");
-
- talk_set_status("'" + text_heard + "' - thinking how to respond (gpt-2) ...");
-
- const std::vector tokens = gpt2_tokenize(g_gpt2, text_heard.c_str());
-
- printf("whisper: number of tokens: %d, '%s'\n", (int) tokens.size(), text_heard.c_str());
-
- std::string text_to_speak;
- std::string prompt_base;
-
- {
- std::lock_guard lock(g_mutex);
- prompt_base = gpt2_get_prompt(g_gpt2);
- }
-
- if (tokens.size() > 0) {
- text_to_speak = gpt2_gen_text(g_gpt2, (prompt_base + text_heard + "\n").c_str(), 32);
- text_to_speak = std::regex_replace(text_to_speak, std::regex("[^a-zA-Z0-9\\.,\\?!\\s\\:\\'\\-]"), "");
- text_to_speak = text_to_speak.substr(0, text_to_speak.find_first_of("\n"));
-
- std::lock_guard lock(g_mutex);
-
- // remove first 2 lines of base prompt
- {
- const size_t pos = prompt_base.find_first_of("\n");
- if (pos != std::string::npos) {
- prompt_base = prompt_base.substr(pos + 1);
- }
- }
- {
- const size_t pos = prompt_base.find_first_of("\n");
- if (pos != std::string::npos) {
- prompt_base = prompt_base.substr(pos + 1);
- }
- }
- prompt_base += text_heard + "\n" + text_to_speak + "\n";
- } else {
- text_to_speak = gpt2_gen_text(g_gpt2, prompt_base.c_str(), 32);
- text_to_speak = std::regex_replace(text_to_speak, std::regex("[^a-zA-Z0-9\\.,\\?!\\s\\:\\'\\-]"), "");
- text_to_speak = text_to_speak.substr(0, text_to_speak.find_first_of("\n"));
-
- std::lock_guard lock(g_mutex);
-
- const size_t pos = prompt_base.find_first_of("\n");
- if (pos != std::string::npos) {
- prompt_base = prompt_base.substr(pos + 1);
- }
- prompt_base += text_to_speak + "\n";
- }
-
- printf("gpt-2: %s\n", text_to_speak.c_str());
-
- //printf("========================\n");
- //printf("gpt-2: prompt_base:\n'%s'\n", prompt_base.c_str());
- //printf("========================\n");
-
- {
- std::lock_guard lock(g_mutex);
- t_last = std::chrono::high_resolution_clock::now();
- g_text_to_speak = text_to_speak;
- g_pcmf32.clear();
- gpt2_set_prompt(g_gpt2, prompt_base.c_str());
- }
-
- talk_set_status("speaking ...");
- }
- }
-
- gpt2_free(g_gpt2);
-
- if (index < g_contexts.size()) {
- whisper_free(g_contexts[index]);
- g_contexts[index] = nullptr;
- }
-}
-
-EMSCRIPTEN_BINDINGS(talk) {
- emscripten::function("init", emscripten::optional_override([](const std::string & path_model) {
- for (size_t i = 0; i < g_contexts.size(); ++i) {
- if (g_contexts[i] == nullptr) {
- g_contexts[i] = whisper_init_from_file_with_params(path_model.c_str(), whisper_context_default_params());
- if (g_contexts[i] != nullptr) {
- g_running = true;
- if (g_worker.joinable()) {
- g_worker.join();
- }
- g_worker = std::thread([i]() {
- talk_main(i);
- });
-
- return i + 1;
- } else {
- return (size_t) 0;
- }
- }
- }
-
- return (size_t) 0;
- }));
-
- emscripten::function("free", emscripten::optional_override([](size_t index) {
- if (g_running) {
- g_running = false;
- }
- }));
-
- emscripten::function("set_audio", emscripten::optional_override([](size_t index, const emscripten::val & audio) {
- --index;
-
- if (index >= g_contexts.size()) {
- return -1;
- }
-
- if (g_contexts[index] == nullptr) {
- return -2;
- }
-
- {
- std::lock_guard lock(g_mutex);
- const int n = audio["length"].as();
-
- emscripten::val heap = emscripten::val::module_property("HEAPU8");
- emscripten::val memory = heap["buffer"];
-
- g_pcmf32.resize(n);
-
- emscripten::val memoryView = audio["constructor"].new_(memory, reinterpret_cast(g_pcmf32.data()), n);
- memoryView.call("set", audio);
- }
-
- return 0;
- }));
-
- emscripten::function("force_speak", emscripten::optional_override([](size_t index) {
- {
- std::lock_guard lock(g_mutex);
- g_force_speak = true;
- }
- }));
-
- emscripten::function("get_text_context", emscripten::optional_override([]() {
- std::string text_context;
-
- {
- std::lock_guard lock(g_mutex);
- text_context = gpt2_get_prompt(g_gpt2);
- }
-
- return text_context;
- }));
-
- emscripten::function("get_text_to_speak", emscripten::optional_override([]() {
- std::string text_to_speak;
-
- {
- std::lock_guard lock(g_mutex);
- text_to_speak = std::move(g_text_to_speak);
- }
-
- return text_to_speak;
- }));
-
- emscripten::function("get_status", emscripten::optional_override([]() {
- std::string status;
-
- {
- std::lock_guard lock(g_mutex);
- status = g_status_forced.empty() ? g_status : g_status_forced;
- }
-
- return status;
- }));
-
- emscripten::function("set_status", emscripten::optional_override([](const std::string & status) {
- {
- std::lock_guard lock(g_mutex);
- g_status_forced = status;
- }
- }));
-
- emscripten::function("set_prompt", emscripten::optional_override([](const std::string & prompt) {
- {
- std::lock_guard lock(g_mutex);
- gpt2_set_prompt(g_gpt2, prompt.c_str());
- }
- }));
-}
diff --git a/examples/talk.wasm/gpt-2.cpp b/examples/talk.wasm/gpt-2.cpp
deleted file mode 100644
index 22ec3354..00000000
--- a/examples/talk.wasm/gpt-2.cpp
+++ /dev/null
@@ -1,808 +0,0 @@
-#include "ggml.h"
-#include "common-ggml.h"
-
-#include "gpt-2.h"
-
-#include
-#include
-#include
-#include
-#include