diff --git a/README.md b/README.md index faa166d0..079c73b7 100644 --- a/README.md +++ b/README.md @@ -53,18 +53,6 @@ On Apple Silicon, the inference runs fully on the GPU via Metal: https://github.com/ggerganov/whisper.cpp/assets/1991296/c82e8f86-60dc-49f2-b048-d2fdbd6b5225 -Or you can even run it straight in the browser: [talk.wasm](examples/talk.wasm) - -## Implementation details - -- The core tensor operations are implemented in C ([ggml.h](ggml/include/ggml.h) / [ggml.c](ggml/src/ggml.c)) -- The transformer model and the high-level C-style API are implemented in C++ ([whisper.h](include/whisper.h) / [whisper.cpp](src/whisper.cpp)) -- Sample usage is demonstrated in [main.cpp](examples/main) -- Sample real-time audio transcription from the microphone is demonstrated in [stream.cpp](examples/stream) -- Various other examples are available in the [examples](examples) folder - -The tensor operators are optimized heavily for Apple silicon CPUs. Depending on the computation size, Arm Neon SIMD intrinsics or CBLAS Accelerate framework routines are used. The latter are especially effective for bigger sizes since the Accelerate framework utilizes the special-purpose AMX coprocessor available in modern Apple products. - ## Quick start First clone the repository: @@ -85,135 +73,26 @@ Then, download one of the Whisper [models](models/README.md) converted in [`ggml sh ./models/download-ggml-model.sh base.en ``` -Now build the [main](examples/main) example and transcribe an audio file like this: +Now build the [whisper-cli](examples/cli) example and transcribe an audio file like this: ```bash -# build the main example +# build the project cmake -B build cmake --build build --config Release # transcribe an audio file -./build/bin/main -f samples/jfk.wav +./build/bin/whisper-cli -f samples/jfk.wav ``` --- -For a quick demo, simply run `make base.en`: - -```text -$ make -j base.en - -cc -I. -O3 -std=c11 -pthread -DGGML_USE_ACCELERATE -c ggml.c -o ggml.o -c++ -I. -I./examples -O3 -std=c++11 -pthread -c whisper.cpp -o whisper.o -c++ -I. -I./examples -O3 -std=c++11 -pthread examples/main/main.cpp whisper.o ggml.o -o main -framework Accelerate -./main -h - -usage: ./main [options] file0.wav file1.wav ... - -options: - -h, --help [default] show this help message and exit - -t N, --threads N [4 ] number of threads to use during computation - -p N, --processors N [1 ] number of processors to use during computation - -ot N, --offset-t N [0 ] time offset in milliseconds - -on N, --offset-n N [0 ] segment index offset - -d N, --duration N [0 ] duration of audio to process in milliseconds - -mc N, --max-context N [-1 ] maximum number of text context tokens to store - -ml N, --max-len N [0 ] maximum segment length in characters - -sow, --split-on-word [false ] split on word rather than on token - -bo N, --best-of N [5 ] number of best candidates to keep - -bs N, --beam-size N [5 ] beam size for beam search - -wt N, --word-thold N [0.01 ] word timestamp probability threshold - -et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail - -lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail - -debug, --debug-mode [false ] enable debug mode (eg. dump log_mel) - -tr, --translate [false ] translate from source language to english - -di, --diarize [false ] stereo audio diarization - -tdrz, --tinydiarize [false ] enable tinydiarize (requires a tdrz model) - -nf, --no-fallback [false ] do not use temperature fallback while decoding - -otxt, --output-txt [false ] output result in a text file - -ovtt, --output-vtt [false ] output result in a vtt file - -osrt, --output-srt [false ] output result in a srt file - -olrc, --output-lrc [false ] output result in a lrc file - -owts, --output-words [false ] output script for generating karaoke video - -fp, --font-path [/System/Library/Fonts/Supplemental/Courier New Bold.ttf] path to a monospace font for karaoke video - -ocsv, --output-csv [false ] output result in a CSV file - -oj, --output-json [false ] output result in a JSON file - -ojf, --output-json-full [false ] include more information in the JSON file - -of FNAME, --output-file FNAME [ ] output file path (without file extension) - -ps, --print-special [false ] print special tokens - -pc, --print-colors [false ] print colors - -pp, --print-progress [false ] print progress - -nt, --no-timestamps [false ] do not print timestamps - -l LANG, --language LANG [en ] spoken language ('auto' for auto-detect) - -dl, --detect-language [false ] exit after automatically detecting language - --prompt PROMPT [ ] initial prompt - -m FNAME, --model FNAME [models/ggml-base.en.bin] model path - -f FNAME, --file FNAME [ ] input WAV file path - -oved D, --ov-e-device DNAME [CPU ] the OpenVINO device used for encode inference - -ls, --log-score [false ] log best decoder scores of tokens - -ng, --no-gpu [false ] disable GPU - - -sh ./models/download-ggml-model.sh base.en -Downloading ggml model base.en ... -ggml-base.en.bin 100%[========================>] 141.11M 6.34MB/s in 24s -Done! Model 'base.en' saved in 'models/ggml-base.en.bin' -You can now use it like this: - - $ ./main -m models/ggml-base.en.bin -f samples/jfk.wav - - -=============================================== -Running base.en on all samples in ./samples ... -=============================================== - ----------------------------------------------- -[+] Running base.en on samples/jfk.wav ... (run 'ffplay samples/jfk.wav' to listen) ----------------------------------------------- - -whisper_init_from_file: loading model from 'models/ggml-base.en.bin' -whisper_model_load: loading model -whisper_model_load: n_vocab = 51864 -whisper_model_load: n_audio_ctx = 1500 -whisper_model_load: n_audio_state = 512 -whisper_model_load: n_audio_head = 8 -whisper_model_load: n_audio_layer = 6 -whisper_model_load: n_text_ctx = 448 -whisper_model_load: n_text_state = 512 -whisper_model_load: n_text_head = 8 -whisper_model_load: n_text_layer = 6 -whisper_model_load: n_mels = 80 -whisper_model_load: f16 = 1 -whisper_model_load: type = 2 -whisper_model_load: mem required = 215.00 MB (+ 6.00 MB per decoder) -whisper_model_load: kv self size = 5.25 MB -whisper_model_load: kv cross size = 17.58 MB -whisper_model_load: adding 1607 extra tokens -whisper_model_load: model ctx = 140.60 MB -whisper_model_load: model size = 140.54 MB - -system_info: n_threads = 4 / 10 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 | - -main: processing 'samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ... - - -[00:00:00.000 --> 00:00:11.000] And so my fellow Americans, ask not what your country can do for you, ask what you can do for your country. - - -whisper_print_timings: fallbacks = 0 p / 0 h -whisper_print_timings: load time = 113.81 ms -whisper_print_timings: mel time = 15.40 ms -whisper_print_timings: sample time = 11.58 ms / 27 runs ( 0.43 ms per run) -whisper_print_timings: encode time = 266.60 ms / 1 runs ( 266.60 ms per run) -whisper_print_timings: decode time = 66.11 ms / 27 runs ( 2.45 ms per run) -whisper_print_timings: total time = 476.31 ms -``` +For a quick demo, simply run `make base.en`. The command downloads the `base.en` model converted to custom `ggml` format and runs the inference on all `.wav` samples in the folder `samples`. -For detailed usage instructions, run: `./main -h` +For detailed usage instructions, run: `./build/bin/whisper-cli -h` -Note that the [main](examples/main) example currently runs only with 16-bit WAV files, so make sure to convert your input before running the tool. +Note that the [whisper-cli](examples/cli) example currently runs only with 16-bit WAV files, so make sure to convert your input before running the tool. For example, you can use `ffmpeg` like this: ```bash @@ -271,7 +150,7 @@ cmake --build build --config Release ./build/bin/quantize models/ggml-base.en.bin models/ggml-base.en-q5_0.bin q5_0 # run the examples as usual, specifying the quantized model file -./build/bin/main -m models/ggml-base.en-q5_0.bin ./samples/gb0.wav +./build/bin/whisper-cli -m models/ggml-base.en-q5_0.bin ./samples/gb0.wav ``` ## Core ML support @@ -313,7 +192,7 @@ speed-up - more than x3 faster compared with CPU-only execution. Here are the in - Run the examples as usual. For example: ```text - $ ./main -m models/ggml-base.en.bin -f samples/jfk.wav + $ ./build/bin/whisper-cli -m models/ggml-base.en.bin -f samples/jfk.wav ... @@ -397,7 +276,7 @@ This can result in significant speedup in encoder performance. Here are the inst - Run the examples as usual. For example: ```text - $ ./main -m models/ggml-base.en.bin -f samples/jfk.wav + $ ./build/bin/whisper-cli -m models/ggml-base.en.bin -f samples/jfk.wav ... @@ -473,7 +352,7 @@ cmake --build build -j --config Release Run the inference examples as usual, for example: ``` -./build/bin/main -f samples/jfk.wav -m models/ggml-base.en.bin -t 8 +./build/bin/whisper-cli -f samples/jfk.wav -m models/ggml-base.en.bin -t 8 ``` *Notes:* @@ -481,38 +360,6 @@ Run the inference examples as usual, for example: - If you have trouble with Ascend NPU device, please create a issue with **[CANN]** prefix/tag. - If you run successfully with your Ascend NPU device, please help update the table `Verified devices`. -## Docker - -### Prerequisites - -- Docker must be installed and running on your system. -- Create a folder to store big models & intermediate files (ex. /whisper/models) - -### Images - -We have two Docker images available for this project: - -1. `ghcr.io/ggerganov/whisper.cpp:main`: This image includes the main executable file as well as `curl` and `ffmpeg`. (platforms: `linux/amd64`, `linux/arm64`) -2. `ghcr.io/ggerganov/whisper.cpp:main-cuda`: Same as `main` but compiled with CUDA support. (platforms: `linux/amd64`) - -### Usage - -```shell -# download model and persist it in a local folder -docker run -it --rm \ - -v path/to/models:/models \ - whisper.cpp:main "./models/download-ggml-model.sh base /models" -# transcribe an audio file -docker run -it --rm \ - -v path/to/models:/models \ - -v path/to/audios:/audios \ - whisper.cpp:main "./main -m /models/ggml-base.bin -f /audios/jfk.wav" -# transcribe an audio file in samples folder -docker run -it --rm \ - -v path/to/models:/models \ - whisper.cpp:main "./main -m /models/ggml-base.bin -f ./samples/jfk.wav" -``` - ## Installing with Conan You can install pre-built binaries for whisper.cpp or build it from source using [Conan](https://conan.io/). Use the following command: @@ -527,89 +374,6 @@ For detailed instructions on how to use Conan, please refer to the [Conan docume - Inference only -## Another example - -Here is another example of transcribing a [3:24 min speech](https://upload.wikimedia.org/wikipedia/commons/1/1f/George_W_Bush_Columbia_FINAL.ogg) -in about half a minute on a MacBook M1 Pro, using `medium.en` model: - -
- Expand to see the result - -```text -$ ./main -m models/ggml-medium.en.bin -f samples/gb1.wav -t 8 - -whisper_init_from_file: loading model from 'models/ggml-medium.en.bin' -whisper_model_load: loading model -whisper_model_load: n_vocab = 51864 -whisper_model_load: n_audio_ctx = 1500 -whisper_model_load: n_audio_state = 1024 -whisper_model_load: n_audio_head = 16 -whisper_model_load: n_audio_layer = 24 -whisper_model_load: n_text_ctx = 448 -whisper_model_load: n_text_state = 1024 -whisper_model_load: n_text_head = 16 -whisper_model_load: n_text_layer = 24 -whisper_model_load: n_mels = 80 -whisper_model_load: f16 = 1 -whisper_model_load: type = 4 -whisper_model_load: mem required = 1720.00 MB (+ 43.00 MB per decoder) -whisper_model_load: kv self size = 42.00 MB -whisper_model_load: kv cross size = 140.62 MB -whisper_model_load: adding 1607 extra tokens -whisper_model_load: model ctx = 1462.35 MB -whisper_model_load: model size = 1462.12 MB - -system_info: n_threads = 8 / 10 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 | - -main: processing 'samples/gb1.wav' (3179750 samples, 198.7 sec), 8 threads, 1 processors, lang = en, task = transcribe, timestamps = 1 ... - - -[00:00:00.000 --> 00:00:08.000] My fellow Americans, this day has brought terrible news and great sadness to our country. -[00:00:08.000 --> 00:00:17.000] At nine o'clock this morning, Mission Control in Houston lost contact with our Space Shuttle Columbia. -[00:00:17.000 --> 00:00:23.000] A short time later, debris was seen falling from the skies above Texas. -[00:00:23.000 --> 00:00:29.000] The Columbia's lost. There are no survivors. -[00:00:29.000 --> 00:00:32.000] On board was a crew of seven. -[00:00:32.000 --> 00:00:39.000] Colonel Rick Husband, Lieutenant Colonel Michael Anderson, Commander Laurel Clark, -[00:00:39.000 --> 00:00:48.000] Captain David Brown, Commander William McCool, Dr. Kultna Shavla, and Ilan Ramon, -[00:00:48.000 --> 00:00:52.000] a colonel in the Israeli Air Force. -[00:00:52.000 --> 00:00:58.000] These men and women assumed great risk in the service to all humanity. -[00:00:58.000 --> 00:01:03.000] In an age when space flight has come to seem almost routine, -[00:01:03.000 --> 00:01:07.000] it is easy to overlook the dangers of travel by rocket -[00:01:07.000 --> 00:01:12.000] and the difficulties of navigating the fierce outer atmosphere of the Earth. -[00:01:12.000 --> 00:01:18.000] These astronauts knew the dangers, and they faced them willingly, -[00:01:18.000 --> 00:01:23.000] knowing they had a high and noble purpose in life. -[00:01:23.000 --> 00:01:31.000] Because of their courage and daring and idealism, we will miss them all the more. -[00:01:31.000 --> 00:01:36.000] All Americans today are thinking as well of the families of these men and women -[00:01:36.000 --> 00:01:40.000] who have been given this sudden shock and grief. -[00:01:40.000 --> 00:01:45.000] You're not alone. Our entire nation grieves with you, -[00:01:45.000 --> 00:01:52.000] and those you love will always have the respect and gratitude of this country. -[00:01:52.000 --> 00:01:56.000] The cause in which they died will continue. -[00:01:56.000 --> 00:02:04.000] Mankind is led into the darkness beyond our world by the inspiration of discovery -[00:02:04.000 --> 00:02:11.000] and the longing to understand. Our journey into space will go on. -[00:02:11.000 --> 00:02:16.000] In the skies today, we saw destruction and tragedy. -[00:02:16.000 --> 00:02:22.000] Yet farther than we can see, there is comfort and hope. -[00:02:22.000 --> 00:02:29.000] In the words of the prophet Isaiah, "Lift your eyes and look to the heavens -[00:02:29.000 --> 00:02:35.000] who created all these. He who brings out the starry hosts one by one -[00:02:35.000 --> 00:02:39.000] and calls them each by name." -[00:02:39.000 --> 00:02:46.000] Because of His great power and mighty strength, not one of them is missing. -[00:02:46.000 --> 00:02:55.000] The same Creator who names the stars also knows the names of the seven souls we mourn today. -[00:02:55.000 --> 00:03:01.000] The crew of the shuttle Columbia did not return safely to earth, -[00:03:01.000 --> 00:03:05.000] yet we can pray that all are safely home. -[00:03:05.000 --> 00:03:13.000] May God bless the grieving families, and may God continue to bless America. -[00:03:13.000 --> 00:03:19.000] [Silence] - - -whisper_print_timings: fallbacks = 1 p / 0 h -whisper_print_timings: load time = 569.03 ms -whisper_print_timings: mel time = 146.85 ms -whisper_print_timings: sample time = 238.66 ms / 553 runs ( 0.43 ms per run) -whisper_print_timings: encode time = 18665.10 ms / 9 runs ( 2073.90 ms per run) -whisper_print_timings: decode time = 13090.93 ms / 549 runs ( 23.85 ms per run) -whisper_print_timings: total time = 32733.52 ms -``` - -
- ## Real-time audio input example This is a naive example of performing real-time inference on audio from your microphone. @@ -630,7 +394,7 @@ Adding the `--print-colors` argument will print the transcribed text using an ex to highlight words with high or low confidence: ```bash -./main -m models/ggml-base.en.bin -f samples/gb0.wav --print-colors +./build/bin/whisper-cli -m models/ggml-base.en.bin -f samples/gb0.wav --print-colors ``` image @@ -640,7 +404,7 @@ to highlight words with high or low confidence: For example, to limit the line length to a maximum of 16 characters, simply add `-ml 16`: ```text -$ ./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 16 +$ ./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 16 whisper_model_load: loading model from './models/ggml-base.en.bin' ... @@ -664,7 +428,7 @@ main: processing './samples/jfk.wav' (176000 samples, 11.0 sec), 4 threads, 1 pr The `--max-len` argument can be used to obtain word-level timestamps. Simply use `-ml 1`: ```text -$ ./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 1 +$ ./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -ml 1 whisper_model_load: loading model from './models/ggml-base.en.bin' ... @@ -711,7 +475,7 @@ Sample usage: ./models/download-ggml-model.sh small.en-tdrz # run as usual, adding the "-tdrz" command-line argument -./main -f ./samples/a13.wav -m ./models/ggml-small.en-tdrz.bin -tdrz +./build/bin/whisper-cli -f ./samples/a13.wav -m ./models/ggml-small.en-tdrz.bin -tdrz ... main: processing './samples/a13.wav' (480000 samples, 30.0 sec), 4 threads, 1 processors, lang = en, task = transcribe, tdrz = 1, timestamps = 1 ... ... @@ -728,14 +492,14 @@ main: processing './samples/a13.wav' (480000 samples, 30.0 sec), 4 threads, 1 pr ## Karaoke-style movie generation (experimental) -The [main](examples/main) example provides support for output of karaoke-style movies, where the +The [whisper-cli](examples/cli) example provides support for output of karaoke-style movies, where the currently pronounced word is highlighted. Use the `-wts` argument and run the generated bash script. This requires to have `ffmpeg` installed. Here are a few _"typical"_ examples: ```bash -./main -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -owts +./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/jfk.wav -owts source ./samples/jfk.wav.wts ffplay ./samples/jfk.wav.mp4 ``` @@ -745,7 +509,7 @@ https://user-images.githubusercontent.com/1991296/199337465-dbee4b5e-9aeb-48a3-b --- ```bash -./main -m ./models/ggml-base.en.bin -f ./samples/mm0.wav -owts +./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/mm0.wav -owts source ./samples/mm0.wav.wts ffplay ./samples/mm0.wav.mp4 ``` @@ -755,7 +519,7 @@ https://user-images.githubusercontent.com/1991296/199337504-cc8fd233-0cb7-4920-9 --- ```bash -./main -m ./models/ggml-base.en.bin -f ./samples/gb0.wav -owts +./build/bin/whisper-cli -m ./models/ggml-base.en.bin -f ./samples/gb0.wav -owts source ./samples/gb0.wav.wts ffplay ./samples/gb0.wav.mp4 ``` @@ -780,7 +544,7 @@ https://user-images.githubusercontent.com/1991296/223206245-2d36d903-cf8e-4f09-8 ## Benchmarks In order to have an objective comparison of the performance of the inference across different system configurations, -use the [bench](examples/bench) tool. The tool simply runs the Encoder part of the model and prints how much time it +use the [whisper-bench](examples/bench) tool. The tool simply runs the Encoder part of the model and prints how much time it took to execute it. The results are summarized in the following Github issue: [Benchmark results](https://github.com/ggerganov/whisper.cpp/issues/89) @@ -843,13 +607,12 @@ Some of the examples are even ported to run in the browser using WebAssembly. Ch | Example | Web | Description | | --------------------------------------------------- | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| [main](examples/main) | [whisper.wasm](examples/whisper.wasm) | Tool for translating and transcribing audio using Whisper | -| [bench](examples/bench) | [bench.wasm](examples/bench.wasm) | Benchmark the performance of Whisper on your machine | -| [stream](examples/stream) | [stream.wasm](examples/stream.wasm) | Real-time transcription of raw microphone capture | -| [command](examples/command) | [command.wasm](examples/command.wasm) | Basic voice assistant example for receiving voice commands from the mic | -| [wchess](examples/wchess) | [wchess.wasm](examples/wchess) | Voice-controlled chess | -| [talk](examples/talk) | [talk.wasm](examples/talk.wasm) | Talk with a GPT-2 bot | -| [talk-llama](examples/talk-llama) | | Talk with a LLaMA bot | +| [whisper-cli](examples/cli) | [whisper.wasm](examples/whisper.wasm) | Tool for translating and transcribing audio using Whisper | +| [whisper-bench](examples/bench) | [bench.wasm](examples/bench.wasm) | Benchmark the performance of Whisper on your machine | +| [whisper-stream](examples/stream) | [stream.wasm](examples/stream.wasm) | Real-time transcription of raw microphone capture | +| [whisper-command](examples/command) | [command.wasm](examples/command.wasm) | Basic voice assistant example for receiving voice commands from the mic | +| [whisper-server](examples/server) | | HTTP transcription server with OAI-like API | +| [whisper-talk-llama](examples/talk-llama) | | Talk with a LLaMA bot | | [whisper.objc](examples/whisper.objc) | | iOS mobile application using whisper.cpp | | [whisper.swiftui](examples/whisper.swiftui) | | SwiftUI iOS / macOS application using whisper.cpp | | [whisper.android](examples/whisper.android) | | Android mobile application using whisper.cpp | @@ -857,7 +620,7 @@ Some of the examples are even ported to run in the browser using WebAssembly. Ch | [generate-karaoke.sh](examples/generate-karaoke.sh) | | Helper script to easily [generate a karaoke video](https://youtu.be/uj7hVta4blM) of raw audio capture | | [livestream.sh](examples/livestream.sh) | | [Livestream audio transcription](https://github.com/ggerganov/whisper.cpp/issues/185) | | [yt-wsp.sh](examples/yt-wsp.sh) | | Download + transcribe and/or translate any VOD [(original)](https://gist.github.com/DaniruKun/96f763ec1a037cc92fe1a059b643b818) | -| [server](examples/server) | | HTTP transcription server with OAI-like API | +| [wchess](examples/wchess) | [wchess.wasm](examples/wchess) | Voice-controlled chess | ## [Discussions](https://github.com/ggerganov/whisper.cpp/discussions) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index addfa1d4..3e03c95e 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -97,52 +97,33 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR}) if (EMSCRIPTEN) add_subdirectory(whisper.wasm) - set_target_properties(libmain PROPERTIES FOLDER "libs") add_subdirectory(stream.wasm) - set_target_properties(libstream PROPERTIES FOLDER "libs") add_subdirectory(command.wasm) - set_target_properties(libcommand PROPERTIES FOLDER "libs") - #add_subdirectory(talk.wasm) - #set_target_properties(libtalk PROPERTIES FOLDER "libs") add_subdirectory(bench.wasm) - set_target_properties(libbench PROPERTIES FOLDER "libs") elseif(CMAKE_JS_VERSION) add_subdirectory(addon.node) - set_target_properties(addon.node PROPERTIES FOLDER "examples") else() - add_subdirectory(main) - set_target_properties(main PROPERTIES FOLDER "examples") + add_subdirectory(cli) if (WHISPER_SDL2) add_subdirectory(stream) - set_target_properties(stream PROPERTIES FOLDER "examples") endif (WHISPER_SDL2) add_subdirectory(server) - set_target_properties(server PROPERTIES FOLDER "examples") if (WHISPER_SDL2) add_subdirectory(command) - set_target_properties(command PROPERTIES FOLDER "examples") endif (WHISPER_SDL2) add_subdirectory(bench) - set_target_properties(bench PROPERTIES FOLDER "examples") add_subdirectory(quantize) - set_target_properties(quantize PROPERTIES FOLDER "examples") if (WHISPER_SDL2) - # TODO: disabled until update - # https://github.com/ggerganov/whisper.cpp/issues/1818 - #add_subdirectory(talk) - #set_target_properties(talk PROPERTIES FOLDER "examples") add_subdirectory(talk-llama) - set_target_properties(talk-llama PROPERTIES FOLDER "examples") add_subdirectory(lsp) - set_target_properties(lsp PROPERTIES FOLDER "examples") if (GGML_SYCL) add_subdirectory(sycl) - set_target_properties(ls-sycl-device PROPERTIES FOLDER "examples") endif() endif (WHISPER_SDL2) endif() if (WHISPER_SDL2) add_subdirectory(wchess) - set_target_properties(wchess PROPERTIES FOLDER "examples") endif (WHISPER_SDL2) + +add_subdirectory(deprecation-warning) diff --git a/examples/bench/CMakeLists.txt b/examples/bench/CMakeLists.txt index f8a72ffd..f255f2dc 100644 --- a/examples/bench/CMakeLists.txt +++ b/examples/bench/CMakeLists.txt @@ -1,6 +1,8 @@ -set(TARGET bench) +set(TARGET whisper-bench) add_executable(${TARGET} bench.cpp) include(DefaultTargetOptions) target_link_libraries(${TARGET} PRIVATE whisper ${CMAKE_THREAD_LIBS_INIT}) + +install(TARGETS ${TARGET} RUNTIME) diff --git a/examples/bench/README.md b/examples/bench/README.md index 5b42cb4d..cf58665a 100644 --- a/examples/bench/README.md +++ b/examples/bench/README.md @@ -1,4 +1,4 @@ -# bench +# whisper.cpp/examples/bench A very basic tool for benchmarking the inference performance on your device. The tool simply runs the Encoder part of the transformer on some random audio data and records the execution time. This way we can have an objective comparison @@ -7,11 +7,8 @@ of the performance of the model for various setups. Benchmark results are tracked in the following Github issue: https://github.com/ggerganov/whisper.cpp/issues/89 ```bash -# build the bench tool -$ make bench - -# run it on the small.en model using 4 threads -$ ./bench -m ./models/ggml-small.en.bin -t 4 +# run the bench too on the small.en model using 4 threads +$ ./build/bin/whisper-bench -m ./models/ggml-small.en.bin -t 4 whisper_model_load: loading model from './models/ggml-small.en.bin' whisper_model_load: n_vocab = 51864 diff --git a/examples/main/CMakeLists.txt b/examples/cli/CMakeLists.txt similarity index 58% rename from examples/main/CMakeLists.txt rename to examples/cli/CMakeLists.txt index 1e66e4b5..3a73776c 100644 --- a/examples/main/CMakeLists.txt +++ b/examples/cli/CMakeLists.txt @@ -1,6 +1,8 @@ -set(TARGET main) -add_executable(${TARGET} main.cpp) +set(TARGET whisper-cli) +add_executable(${TARGET} cli.cpp) include(DefaultTargetOptions) target_link_libraries(${TARGET} PRIVATE common whisper ${FFMPEG_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) + +install(TARGETS ${TARGET} RUNTIME) diff --git a/examples/main/README.md b/examples/cli/README.md similarity index 75% rename from examples/main/README.md rename to examples/cli/README.md index 2d868810..1847134e 100644 --- a/examples/main/README.md +++ b/examples/cli/README.md @@ -1,12 +1,12 @@ -# main +# whisper.cpp/examples/cli This is the main example demonstrating most of the functionality of the Whisper model. It can be used as a reference for using the `whisper.cpp` library in other projects. ``` -./main -h +./build/bin/whisper-cli -h -usage: ./main [options] file0.wav file1.wav ... +usage: ./build-pkg/bin/whisper-cli [options] file0.wav file1.wav ... options: -h, --help [default] show this help message and exit @@ -20,9 +20,12 @@ options: -sow, --split-on-word [false ] split on word rather than on token -bo N, --best-of N [5 ] number of best candidates to keep -bs N, --beam-size N [5 ] beam size for beam search + -ac N, --audio-ctx N [0 ] audio context size (0 - all) -wt N, --word-thold N [0.01 ] word timestamp probability threshold -et N, --entropy-thold N [2.40 ] entropy threshold for decoder fail -lpt N, --logprob-thold N [-1.00 ] log probability threshold for decoder fail + -tp, --temperature N [0.00 ] The sampling temperature, between 0 and 1 + -tpi, --temperature-inc N [0.20 ] The increment of temperature, between 0 and 1 -debug, --debug-mode [false ] enable debug mode (eg. dump log_mel) -tr, --translate [false ] translate from source language to english -di, --diarize [false ] stereo audio diarization @@ -38,16 +41,23 @@ options: -oj, --output-json [false ] output result in a JSON file -ojf, --output-json-full [false ] include more information in the JSON file -of FNAME, --output-file FNAME [ ] output file path (without file extension) + -np, --no-prints [false ] do not print anything other than the results -ps, --print-special [false ] print special tokens -pc, --print-colors [false ] print colors -pp, --print-progress [false ] print progress -nt, --no-timestamps [false ] do not print timestamps -l LANG, --language LANG [en ] spoken language ('auto' for auto-detect) -dl, --detect-language [false ] exit after automatically detecting language - --prompt PROMPT [ ] initial prompt + --prompt PROMPT [ ] initial prompt (max n_text_ctx/2 tokens) -m FNAME, --model FNAME [models/ggml-base.en.bin] model path -f FNAME, --file FNAME [ ] input WAV file path -oved D, --ov-e-device DNAME [CPU ] the OpenVINO device used for encode inference + -dtw MODEL --dtw MODEL [ ] compute token-level timestamps -ls, --log-score [false ] log best decoder scores of tokens -ng, --no-gpu [false ] disable GPU + -fa, --flash-attn [false ] flash attention + --suppress-regex REGEX [ ] regular expression matching tokens to suppress + --grammar GRAMMAR [ ] GBNF grammar to guide decoding + --grammar-rule RULE [ ] top-level GBNF grammar rule name + --grammar-penalty N [100.0 ] scales down logits of nongrammar tokens ``` diff --git a/examples/main/main.cpp b/examples/cli/cli.cpp similarity index 100% rename from examples/main/main.cpp rename to examples/cli/cli.cpp diff --git a/examples/command/CMakeLists.txt b/examples/command/CMakeLists.txt index 40f278c1..c929a6f5 100644 --- a/examples/command/CMakeLists.txt +++ b/examples/command/CMakeLists.txt @@ -1,9 +1,10 @@ if (WHISPER_SDL2) - # command - set(TARGET command) + set(TARGET whisper-command) add_executable(${TARGET} command.cpp) include(DefaultTargetOptions) target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${CMAKE_THREAD_LIBS_INIT}) + + install(TARGETS ${TARGET} RUNTIME) endif () diff --git a/examples/command/README.md b/examples/command/README.md index 46b14e93..7eb2bb60 100644 --- a/examples/command/README.md +++ b/examples/command/README.md @@ -1,14 +1,14 @@ -# command +# whisper.cpp/examples/command This is a basic Voice Assistant example that accepts voice commands from the microphone. More info is available in [issue #171](https://github.com/ggerganov/whisper.cpp/issues/171). ```bash # Run with default arguments and small model -./command -m ./models/ggml-small.en.bin -t 8 +./whisper-command -m ./models/ggml-small.en.bin -t 8 # On Raspberry Pi, use tiny or base models + "-ac 768" for better performance -./command -m ./models/ggml-tiny.en.bin -ac 768 -t 3 -c 0 +./whisper-command -m ./models/ggml-tiny.en.bin -ac 768 -t 3 -c 0 ``` https://user-images.githubusercontent.com/1991296/204038393-2f846eae-c255-4099-a76d-5735c25c49da.mp4 @@ -23,10 +23,10 @@ Initial tests show that this approach might be extremely efficient in terms of p ```bash # Run in guided mode, the list of allowed commands is in commands.txt -./command -m ./models/ggml-base.en.bin -cmd ./examples/command/commands.txt +./whisper-command -m ./models/ggml-base.en.bin -cmd ./examples/command/commands.txt # On Raspberry Pi, in guided mode you can use "-ac 128" for extra performance -./command -m ./models/ggml-tiny.en.bin -cmd ./examples/command/commands.txt -ac 128 -t 3 -c 0 +./whisper-command -m ./models/ggml-tiny.en.bin -cmd ./examples/command/commands.txt -ac 128 -t 3 -c 0 ``` https://user-images.githubusercontent.com/1991296/207435352-8fc4ed3f-bde5-4555-9b8b-aeeb76bee969.mp4 @@ -34,7 +34,7 @@ https://user-images.githubusercontent.com/1991296/207435352-8fc4ed3f-bde5-4555-9 ## Building -The `command` tool depends on SDL2 library to capture audio from the microphone. You can build it like this: +The `whisper-command` tool depends on SDL2 library to capture audio from the microphone. You can build it like this: ```bash # Install SDL2 @@ -47,5 +47,6 @@ sudo dnf install SDL2 SDL2-devel # Install SDL2 on Mac OS brew install sdl2 -make command +cmake -B build -DWHISPER_SDL2=ON +cmake --build build --config Release ``` diff --git a/examples/deprecation-warning/CMakeLists.txt b/examples/deprecation-warning/CMakeLists.txt new file mode 100644 index 00000000..f845e6cc --- /dev/null +++ b/examples/deprecation-warning/CMakeLists.txt @@ -0,0 +1,4 @@ +add_executable(main ./deprecation-warning.cpp) +add_executable(bench ./deprecation-warning.cpp) +add_executable(stream ./deprecation-warning.cpp) +add_executable(command ./deprecation-warning.cpp) diff --git a/examples/deprecation-warning/README.md b/examples/deprecation-warning/README.md new file mode 100644 index 00000000..e07e134b --- /dev/null +++ b/examples/deprecation-warning/README.md @@ -0,0 +1,17 @@ +# Migration notice for binary filenames + +> [!IMPORTANT] +[2024 Dec 20] Binaries have been renamed w/ a `whisper-` prefix. `main` is now `whisper-cli`, `server` is `whisper-server`, etc (https://github.com/ggerganov/whisper.cpp/pull/2648) + +This migration was important, but it is a breaking change that may not always be immediately obvious to users. + +Please update all scripts and workflows to use the new binary names. + +| Old Filename | New Filename | +| ---- | ---- | +| main | whisper-cli | +| bench | whisper-bench | +| stream | whisper-stream | +| command | whisper-command | +| server | whisper-server | +| talk-llama | whisper-talk-llama | diff --git a/examples/deprecation-warning/deprecation-warning.cpp b/examples/deprecation-warning/deprecation-warning.cpp new file mode 100644 index 00000000..7247f0e0 --- /dev/null +++ b/examples/deprecation-warning/deprecation-warning.cpp @@ -0,0 +1,34 @@ +// Warns users that this filename was deprecated, and provides a link for more information. + +#include +#include + +// Main +int main(int argc, char** argv) { + std::string filename = "main"; + if (argc >= 1) { + filename = argv[0]; + } + + // Get only the program name from the full path + size_t pos = filename.find_last_of("/\\"); + if (pos != std::string::npos) { + filename = filename.substr(pos+1); + } + + // Append "whisper-" to the beginning of filename to get the replacemnt filename + std::string replacement_filename = "whisper-" + filename; + + // The exception is if the filename is "main", then our replacement filename is "whisper-cli" + if (filename == "main") { + replacement_filename = "whisper-cli"; + } + + fprintf(stdout, "\n"); + fprintf(stdout, "WARNING: The binary '%s' is deprecated.\n", filename.c_str()); + fprintf(stdout, " Please use '%s' instead.\n", replacement_filename.c_str()); + fprintf(stdout, " See https://github.com/ggerganov/whisper.cpp/tree/master/examples/deprecation-warning/README.md for more information.\n"); + fprintf(stdout, "\n"); + + return EXIT_FAILURE; +} diff --git a/examples/generate-karaoke.sh b/examples/generate-karaoke.sh index 7062c9a0..6c32970c 100755 --- a/examples/generate-karaoke.sh +++ b/examples/generate-karaoke.sh @@ -11,7 +11,7 @@ # Press Ctrl+C to stop recording # -executable="./main" +executable="./build/bin/whisper-cli" model="base.en" model_path="models/ggml-$model.bin" @@ -46,7 +46,7 @@ ffmpeg -y -i ./rec.wav -ar 16000 -ac 1 -c:a pcm_s16le ./rec16.wav > /dev/null 2> # run Whisper echo "Processing ..." -./main -m models/ggml-base.en.bin rec16.wav -owts > /dev/null 2>&1 +${executable} -m models/ggml-base.en.bin rec16.wav -owts > /dev/null 2>&1 # generate Karaoke video echo "Generating video ..." diff --git a/examples/livestream.sh b/examples/livestream.sh index 7c044199..14f6cf80 100755 --- a/examples/livestream.sh +++ b/examples/livestream.sh @@ -14,7 +14,7 @@ model="base.en" check_requirements() { - if ! command -v ./main &>/dev/null; then + if ! command -v ./build/bin/whisper-cli &>/dev/null; then echo "whisper.cpp main executable is required (make)" exit 1 fi @@ -100,7 +100,7 @@ while [ $running -eq 1 ]; do err=$(cat /tmp/whisper-live.err | wc -l) done - ./main -t 8 -m ./models/ggml-${model}.bin -f /tmp/whisper-live.wav --no-timestamps -otxt 2> /tmp/whispererr | tail -n 1 + ./build/bin/whisper-cli -t 8 -m ./models/ggml-${model}.bin -f /tmp/whisper-live.wav --no-timestamps -otxt 2> /tmp/whispererr | tail -n 1 while [ $SECONDS -lt $((($i+1)*$step_s)) ]; do sleep 1 @@ -109,4 +109,4 @@ while [ $running -eq 1 ]; do done killall -v ffmpeg -killall -v main +killall -v whisper-cli diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt index 96dd97f7..4667c307 100644 --- a/examples/server/CMakeLists.txt +++ b/examples/server/CMakeLists.txt @@ -1,4 +1,4 @@ -set(TARGET server) +set(TARGET whisper-server) add_executable(${TARGET} server.cpp httplib.h) include(DefaultTargetOptions) @@ -8,3 +8,5 @@ target_link_libraries(${TARGET} PRIVATE common json_cpp whisper ${CMAKE_THREAD_L if (WIN32) target_link_libraries(${TARGET} PRIVATE ws2_32) endif() + +install(TARGETS ${TARGET} RUNTIME) diff --git a/examples/server/README.md b/examples/server/README.md index 596fd769..4f2e4036 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -1,4 +1,4 @@ -# whisper.cpp http server +# whisper.cpp/examples/server Simple http server. WAV Files are passed to the inference model via http requests. @@ -7,9 +7,9 @@ https://github.com/ggerganov/whisper.cpp/assets/1991296/e983ee53-8741-4eb5-9048- ## Usage ``` -./server -h +./build/bin/whisper-server -h -usage: ./bin/server [options] +usage: ./build/bin/whisper-server [options] options: -h, --help [default] show this help message and exit diff --git a/examples/server/server.cpp b/examples/server/server.cpp index e7d7c8ae..799eabca 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -677,7 +677,17 @@ int main(int argc, char ** argv) { if (sparams.ffmpeg_converter) { // if file is not wav, convert to wav // write to temporary file - const std::string temp_filename_base = std::tmpnam(nullptr); + //const std::string temp_filename_base = std::tmpnam(nullptr); // note: this is unsafe + const std::string temp_filename_base = []() { + char temp_filename_template[] = "/tmp/tempfileXXXXXX"; + int fd = mkstemp(temp_filename_template); + if (fd == -1) { + perror("mkstemp"); + return std::string(); + } + close(fd); + return std::string(temp_filename_template); + }(); const std::string temp_filename = temp_filename_base + ".wav"; std::ofstream temp_file{temp_filename, std::ios::binary}; temp_file << audio_file.content; @@ -711,7 +721,6 @@ int main(int argc, char ** argv) { } } - printf("Successfully loaded %s\n", filename.c_str()); // print system information diff --git a/examples/stream/CMakeLists.txt b/examples/stream/CMakeLists.txt index 312d52c6..a6104839 100644 --- a/examples/stream/CMakeLists.txt +++ b/examples/stream/CMakeLists.txt @@ -1,9 +1,10 @@ if (WHISPER_SDL2) - # stream - set(TARGET stream) + set(TARGET whisper-stream) add_executable(${TARGET} stream.cpp) include(DefaultTargetOptions) target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${CMAKE_THREAD_LIBS_INIT}) + + install(TARGETS ${TARGET} RUNTIME) endif () diff --git a/examples/stream/README.md b/examples/stream/README.md index a8bdf71d..f07cfb89 100644 --- a/examples/stream/README.md +++ b/examples/stream/README.md @@ -1,11 +1,11 @@ -# stream +# whisper.cpp/examples/stream This is a naive example of performing real-time inference on audio from your microphone. -The `stream` tool samples the audio every half a second and runs the transcription continously. +The `whisper-stream` tool samples the audio every half a second and runs the transcription continously. More info is available in [issue #10](https://github.com/ggerganov/whisper.cpp/issues/10). ```bash -./build/bin/stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000 +./build/bin/whisper-stream -m ./models/ggml-base.en.bin -t 8 --step 500 --length 5000 ``` https://user-images.githubusercontent.com/1991296/194935793-76afede7-cfa8-48d8-a80f-28ba83be7d09.mp4 @@ -15,7 +15,7 @@ https://user-images.githubusercontent.com/1991296/194935793-76afede7-cfa8-48d8-a Setting the `--step` argument to `0` enables the sliding window mode: ```bash - ./build/bin/stream -m ./models/ggml-base.en.bin -t 6 --step 0 --length 30000 -vth 0.6 + ./build/bin/whisper-stream -m ./models/ggml-base.en.bin -t 6 --step 0 --length 30000 -vth 0.6 ``` In this mode, the tool will transcribe only after some speech activity is detected. A very @@ -27,7 +27,7 @@ a transcription block that is suitable for parsing. ## Building -The `stream` tool depends on SDL2 library to capture audio from the microphone. You can build it like this: +The `whisper-stream` tool depends on SDL2 library to capture audio from the microphone. You can build it like this: ```bash # Install SDL2 @@ -43,7 +43,7 @@ brew install sdl2 cmake -B build -DWHISPER_SDL2=ON cmake --build build --config Release -./build/bin/stream +./build/bin/whisper-stream ``` ## Web version diff --git a/examples/talk-llama/CMakeLists.txt b/examples/talk-llama/CMakeLists.txt index 56b4d0d7..ce51660c 100644 --- a/examples/talk-llama/CMakeLists.txt +++ b/examples/talk-llama/CMakeLists.txt @@ -1,6 +1,5 @@ if (WHISPER_SDL2) - # talk-llama - set(TARGET talk-llama) + set(TARGET whisper-talk-llama) add_executable(${TARGET} talk-llama.cpp llama.cpp llama-vocab.cpp diff --git a/examples/talk-llama/README.md b/examples/talk-llama/README.md index f8f55440..7f9fa6df 100644 --- a/examples/talk-llama/README.md +++ b/examples/talk-llama/README.md @@ -1,4 +1,4 @@ -# talk-llama +# whisper.cpp/examples/talk-llama Talk with an LLaMA AI in your terminal @@ -12,7 +12,7 @@ https://github.com/ggerganov/whisper.cpp/assets/1991296/d97a3788-bf2a-4756-9a43- ## Building -The `talk-llama` tool depends on SDL2 library to capture audio from the microphone. You can build it like this: +The `whisper-talk-llama` tool depends on SDL2 library to capture audio from the microphone. You can build it like this: ```bash # Install SDL2 @@ -25,11 +25,12 @@ sudo dnf install SDL2 SDL2-devel # Install SDL2 on Mac OS brew install sdl2 -# Build the "talk-llama" executable -make talk-llama +# Build the "whisper-talk-llama" executable +cmake -B build -S . -DWHISPER_SDL2=ON +cmake --build build --config Release # Run it -./talk-llama -mw ./models/ggml-small.en.bin -ml ../llama.cpp/models/llama-13b/ggml-model-q4_0.gguf -p "Georgi" -t 8 +./build/bin/whisper-talk-llama -mw ./models/ggml-small.en.bin -ml ../llama.cpp/models/llama-13b/ggml-model-q4_0.gguf -p "Georgi" -t 8 ``` - The `-mw` argument specifies the Whisper model that you would like to use. Recommended `base` or `small` for real-time experience @@ -37,16 +38,16 @@ make talk-llama ## Session -The `talk-llama` tool supports session management to enable more coherent and continuous conversations. By maintaining context from previous interactions, it can better understand and respond to user requests in a more natural way. +The `whisper-talk-llama` tool supports session management to enable more coherent and continuous conversations. By maintaining context from previous interactions, it can better understand and respond to user requests in a more natural way. -To enable session support, use the `--session FILE` command line option when running the program. The `talk-llama` model state will be saved to the specified file after each interaction. If the file does not exist, it will be created. If the file exists, the model state will be loaded from it, allowing you to resume a previous session. +To enable session support, use the `--session FILE` command line option when running the program. The `whisper-talk-llama` model state will be saved to the specified file after each interaction. If the file does not exist, it will be created. If the file exists, the model state will be loaded from it, allowing you to resume a previous session. This feature is especially helpful for maintaining context in long conversations or when interacting with the AI assistant across multiple sessions. It ensures that the assistant remembers the previous interactions and can provide more relevant and contextual responses. Example usage: ```bash -./talk-llama --session ./my-session-file -mw ./models/ggml-small.en.bin -ml ../llama.cpp/models/llama-13b/ggml-model-q4_0.gguf -p "Georgi" -t 8 +./build/bin/whisper-talk-llama --session ./my-session-file -mw ./models/ggml-small.en.bin -ml ../llama.cpp/models/llama-13b/ggml-model-q4_0.gguf -p "Georgi" -t 8 ``` ## TTS diff --git a/examples/talk.wasm/CMakeLists.txt b/examples/talk.wasm/CMakeLists.txt deleted file mode 100644 index 8f00eb48..00000000 --- a/examples/talk.wasm/CMakeLists.txt +++ /dev/null @@ -1,51 +0,0 @@ -# -# libtalk -# - -set(TARGET libtalk) - -add_executable(${TARGET} - emscripten.cpp - gpt-2.cpp - ) - -include(DefaultTargetOptions) - -target_link_libraries(${TARGET} PRIVATE - whisper - common - ) - -unset(EXTRA_FLAGS) - -if (WHISPER_WASM_SINGLE_FILE) - set(EXTRA_FLAGS "-s SINGLE_FILE=1") - message(STATUS "Embedding WASM inside talk.js") - - add_custom_command( - TARGET ${TARGET} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy - ${CMAKE_BINARY_DIR}/bin/libtalk.js - ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/talk.wasm/talk.js - ) -endif() - -set_target_properties(${TARGET} PROPERTIES LINK_FLAGS " \ - --bind \ - -s USE_PTHREADS=1 \ - -s PTHREAD_POOL_SIZE=8 \ - -s INITIAL_MEMORY=1800MB \ - -s TOTAL_MEMORY=1800MB \ - -s FORCE_FILESYSTEM=1 \ - -s EXPORTED_RUNTIME_METHODS=\"['print', 'printErr', 'ccall', 'cwrap']\" \ - ${EXTRA_FLAGS} \ - ") - -# -# talk.wasm -# - -set(TARGET talk.wasm) - -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/index-tmpl.html ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${TARGET}/index.html @ONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/../helpers.js ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${TARGET}/helpers.js @ONLY) diff --git a/examples/talk.wasm/README.md b/examples/talk.wasm/README.md deleted file mode 100644 index e656fee7..00000000 --- a/examples/talk.wasm/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# talk.wasm - -Talk with an Artificial Intelligence in your browser: - -[https://user-images.githubusercontent.com/1991296/203411580-fedb4839-05e4-4474-8364-aaf1e9a9b615.mp4](https://user-images.githubusercontent.com/1991296/203845553-f7b44e13-9a15-4fc8-b518-ae8f4c6770fe.mp4) - -Online demo: https://whisper.ggerganov.com/talk/ - -Terminal version: [examples/talk](/examples/talk) - -## How it works? - -This demo leverages 2 modern neural network models to create a high-quality voice chat directly in your browser: - -- [OpenAI's Whisper](https://github.com/openai/whisper) speech recognition model is used to process your voice and understand what you are saying -- Upon receiving some voice input, the AI generates a text response using [OpenAI's GPT-2](https://github.com/openai/gpt-2) language model -- The AI then vocalizes the response using the browser's [Web Speech API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Speech_API) - -The web page does the processing locally on your machine. The processing of these heavy neural network models in the -browser is possible by implementing them efficiently in C/C++ and using the browser's WebAssembly SIMD capabilities for -extra performance: - -- The Whisper C++ implementation is here: [whisper.h](/whisper.h) / [whisper.cpp](/whisper.cpp) -- The GPT-2 C++ implementation is here: [gpt-2.h](gpt-2.h) / [gpt-2.cpp](gpt-2.cpp) -- Both models use a custom tensor library implemented in C: [ggml.h](/ggml.h) / [ggml.c](/ggml.c) -- The HTML/JS layer is here: [index-tmpl.html](index-tmpl.html) -- The Emscripten bridge between C/C++ and JS is here: [emscripten.cpp](emscripten.cpp) - -In order to run the models, the web page first needs to download the model data which is about ~350 MB. The model data -is then cached in your browser's cache and can be reused in future visits without downloading it again. - -## Requirements - -In order to run this demo efficiently, you need to have the following: - -- Latest Chrome or Firefox browser (Safari is not supported) -- Run this on a desktop or laptop with modern CPU (a mobile phone will likely not be good enough) -- Speak phrases that are no longer than 10 seconds - this is the audio context of the AI -- The web-page uses about 1.8GB of RAM - -Notice that this demo is using the smallest GPT-2 model, so the generated text responses are not always very good. -Also, the prompting strategy can likely be improved to achieve better results. - -The demo is quite computationally heavy, so you need a fast CPU. It's not usual to run these transformer models in a -browser. Typically, they run on powerful GPUs. - -Currently, mobile browsers do not support the Fixed-width SIMD WebAssembly capability, so you cannot run this demo -on a phone or a tablet. Hopefully, in the near future this will become supported. - -## Todo - -- Better UI (contributions are welcome) -- Better GPT-2 prompting - -## Build instructions - -```bash -# build using Emscripten (v3.1.2) -git clone https://github.com/ggerganov/whisper.cpp -cd whisper.cpp -mkdir build-em && cd build-em -emcmake cmake .. -make -j - -# copy the produced page to your HTTP path -cp bin/talk.wasm/* /path/to/html/ -cp bin/libtalk.worker.js /path/to/html/ -``` - -## Feedback - -If you have any comments or ideas for improvement, please drop a comment in the following discussion: - -https://github.com/ggerganov/whisper.cpp/discussions/167 diff --git a/examples/talk.wasm/emscripten.cpp b/examples/talk.wasm/emscripten.cpp deleted file mode 100644 index 53cb951e..00000000 --- a/examples/talk.wasm/emscripten.cpp +++ /dev/null @@ -1,368 +0,0 @@ -#include "ggml.h" -#include "gpt-2.h" -#include "whisper.h" - -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -constexpr int N_THREAD = 8; - -struct gpt2_context * g_gpt2; -std::vector g_contexts(4, nullptr); - -std::mutex g_mutex; -std::thread g_worker; -std::atomic g_running(false); - -bool g_force_speak = false; -std::string g_text_to_speak = ""; -std::string g_status = ""; -std::string g_status_forced = ""; - -std::vector g_pcmf32; - -void talk_set_status(const std::string & status) { - std::lock_guard lock(g_mutex); - g_status = status; -} - -void talk_main(size_t index) { - talk_set_status("loading data ..."); - - struct whisper_full_params wparams = whisper_full_default_params(whisper_sampling_strategy::WHISPER_SAMPLING_GREEDY); - - wparams.n_threads = std::min(N_THREAD, (int) std::thread::hardware_concurrency()); - wparams.offset_ms = 0; - wparams.translate = false; - wparams.no_context = true; - wparams.single_segment = true; - wparams.print_realtime = false; - wparams.print_progress = false; - wparams.print_timestamps = true; - wparams.print_special = false; - - wparams.max_tokens = 32; - wparams.audio_ctx = 768; // partial encoder context for better performance - - wparams.language = "en"; - - g_gpt2 = gpt2_init("gpt-2.bin"); - - printf("talk: using %d threads\n", wparams.n_threads); - - std::vector pcmf32; - - // whisper context - auto & ctx = g_contexts[index]; - - const int64_t step_samples = 2*WHISPER_SAMPLE_RATE; - const int64_t window_samples = 9*WHISPER_SAMPLE_RATE; - const int64_t step_ms = (step_samples*1000)/WHISPER_SAMPLE_RATE; - - auto t_last = std::chrono::high_resolution_clock::now(); - - talk_set_status("listening ..."); - - while (g_running) { - - const auto t_now = std::chrono::high_resolution_clock::now(); - if (std::chrono::duration_cast(t_now - t_last).count() < step_ms) { - { - std::lock_guard lock(g_mutex); - g_pcmf32.clear(); - } - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - continue; - } - - talk_set_status("listening ..."); - - { - std::unique_lock lock(g_mutex); - - if (g_pcmf32.size() < step_samples) { - lock.unlock(); - - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - - continue; - } - - pcmf32 = std::vector(g_pcmf32.end() - std::min((int64_t) g_pcmf32.size(), window_samples), g_pcmf32.end()); - } - - // VAD: if energy in during last second is above threshold, then skip - { - float energy_all = 0.0f; - float energy_1s = 0.0f; - - for (size_t i = 0; i < pcmf32.size(); i++) { - energy_all += fabsf(pcmf32[i]); - - if (i >= pcmf32.size() - WHISPER_SAMPLE_RATE) { - energy_1s += fabsf(pcmf32[i]); - } - } - - energy_all /= pcmf32.size(); - energy_1s /= WHISPER_SAMPLE_RATE; - - if (energy_1s > 0.1f*energy_all && !g_force_speak) { - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - continue; - } - } - - talk_set_status("processing audio (whisper)..."); - - t_last = t_now; - - if (!g_force_speak) { - const auto t_start = std::chrono::high_resolution_clock::now(); - - int ret = whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()); - if (ret != 0) { - printf("whisper_full() failed: %d\n", ret); - break; - } - - const auto t_end = std::chrono::high_resolution_clock::now(); - - printf("whisper_full() returned %d in %f seconds\n", ret, std::chrono::duration(t_end - t_start).count()); - } - - { - std::string text_heard; - - if (!g_force_speak) { - const int n_segments = whisper_full_n_segments(ctx); - for (int i = n_segments - 1; i < n_segments; ++i) { - const char * text = whisper_full_get_segment_text(ctx, i); - - const int64_t t0 = whisper_full_get_segment_t0(ctx, i); - const int64_t t1 = whisper_full_get_segment_t1(ctx, i); - - printf ("[%s --> %s] %s\n", to_timestamp(t0).c_str(), to_timestamp(t1).c_str(), text); - - text_heard += text; - } - } - - g_force_speak = false; - - // remove text between brackets using regex - { - std::regex re("\\[.*?\\]"); - text_heard = std::regex_replace(text_heard, re, ""); - } - - // remove text between brackets using regex - { - std::regex re("\\(.*?\\)"); - text_heard = std::regex_replace(text_heard, re, ""); - } - - // remove all characters, except for letters, numbers, punctuation and ':', '\'', '-', ' ' - text_heard = std::regex_replace(text_heard, std::regex("[^a-zA-Z0-9\\.,\\?!\\s\\:\\'\\-]"), ""); - - // take first line - text_heard = text_heard.substr(0, text_heard.find_first_of("\n")); - - // remove leading and trailing whitespace - text_heard = std::regex_replace(text_heard, std::regex("^\\s+"), ""); - text_heard = std::regex_replace(text_heard, std::regex("\\s+$"), ""); - - talk_set_status("'" + text_heard + "' - thinking how to respond (gpt-2) ..."); - - const std::vector tokens = gpt2_tokenize(g_gpt2, text_heard.c_str()); - - printf("whisper: number of tokens: %d, '%s'\n", (int) tokens.size(), text_heard.c_str()); - - std::string text_to_speak; - std::string prompt_base; - - { - std::lock_guard lock(g_mutex); - prompt_base = gpt2_get_prompt(g_gpt2); - } - - if (tokens.size() > 0) { - text_to_speak = gpt2_gen_text(g_gpt2, (prompt_base + text_heard + "\n").c_str(), 32); - text_to_speak = std::regex_replace(text_to_speak, std::regex("[^a-zA-Z0-9\\.,\\?!\\s\\:\\'\\-]"), ""); - text_to_speak = text_to_speak.substr(0, text_to_speak.find_first_of("\n")); - - std::lock_guard lock(g_mutex); - - // remove first 2 lines of base prompt - { - const size_t pos = prompt_base.find_first_of("\n"); - if (pos != std::string::npos) { - prompt_base = prompt_base.substr(pos + 1); - } - } - { - const size_t pos = prompt_base.find_first_of("\n"); - if (pos != std::string::npos) { - prompt_base = prompt_base.substr(pos + 1); - } - } - prompt_base += text_heard + "\n" + text_to_speak + "\n"; - } else { - text_to_speak = gpt2_gen_text(g_gpt2, prompt_base.c_str(), 32); - text_to_speak = std::regex_replace(text_to_speak, std::regex("[^a-zA-Z0-9\\.,\\?!\\s\\:\\'\\-]"), ""); - text_to_speak = text_to_speak.substr(0, text_to_speak.find_first_of("\n")); - - std::lock_guard lock(g_mutex); - - const size_t pos = prompt_base.find_first_of("\n"); - if (pos != std::string::npos) { - prompt_base = prompt_base.substr(pos + 1); - } - prompt_base += text_to_speak + "\n"; - } - - printf("gpt-2: %s\n", text_to_speak.c_str()); - - //printf("========================\n"); - //printf("gpt-2: prompt_base:\n'%s'\n", prompt_base.c_str()); - //printf("========================\n"); - - { - std::lock_guard lock(g_mutex); - t_last = std::chrono::high_resolution_clock::now(); - g_text_to_speak = text_to_speak; - g_pcmf32.clear(); - gpt2_set_prompt(g_gpt2, prompt_base.c_str()); - } - - talk_set_status("speaking ..."); - } - } - - gpt2_free(g_gpt2); - - if (index < g_contexts.size()) { - whisper_free(g_contexts[index]); - g_contexts[index] = nullptr; - } -} - -EMSCRIPTEN_BINDINGS(talk) { - emscripten::function("init", emscripten::optional_override([](const std::string & path_model) { - for (size_t i = 0; i < g_contexts.size(); ++i) { - if (g_contexts[i] == nullptr) { - g_contexts[i] = whisper_init_from_file_with_params(path_model.c_str(), whisper_context_default_params()); - if (g_contexts[i] != nullptr) { - g_running = true; - if (g_worker.joinable()) { - g_worker.join(); - } - g_worker = std::thread([i]() { - talk_main(i); - }); - - return i + 1; - } else { - return (size_t) 0; - } - } - } - - return (size_t) 0; - })); - - emscripten::function("free", emscripten::optional_override([](size_t index) { - if (g_running) { - g_running = false; - } - })); - - emscripten::function("set_audio", emscripten::optional_override([](size_t index, const emscripten::val & audio) { - --index; - - if (index >= g_contexts.size()) { - return -1; - } - - if (g_contexts[index] == nullptr) { - return -2; - } - - { - std::lock_guard lock(g_mutex); - const int n = audio["length"].as(); - - emscripten::val heap = emscripten::val::module_property("HEAPU8"); - emscripten::val memory = heap["buffer"]; - - g_pcmf32.resize(n); - - emscripten::val memoryView = audio["constructor"].new_(memory, reinterpret_cast(g_pcmf32.data()), n); - memoryView.call("set", audio); - } - - return 0; - })); - - emscripten::function("force_speak", emscripten::optional_override([](size_t index) { - { - std::lock_guard lock(g_mutex); - g_force_speak = true; - } - })); - - emscripten::function("get_text_context", emscripten::optional_override([]() { - std::string text_context; - - { - std::lock_guard lock(g_mutex); - text_context = gpt2_get_prompt(g_gpt2); - } - - return text_context; - })); - - emscripten::function("get_text_to_speak", emscripten::optional_override([]() { - std::string text_to_speak; - - { - std::lock_guard lock(g_mutex); - text_to_speak = std::move(g_text_to_speak); - } - - return text_to_speak; - })); - - emscripten::function("get_status", emscripten::optional_override([]() { - std::string status; - - { - std::lock_guard lock(g_mutex); - status = g_status_forced.empty() ? g_status : g_status_forced; - } - - return status; - })); - - emscripten::function("set_status", emscripten::optional_override([](const std::string & status) { - { - std::lock_guard lock(g_mutex); - g_status_forced = status; - } - })); - - emscripten::function("set_prompt", emscripten::optional_override([](const std::string & prompt) { - { - std::lock_guard lock(g_mutex); - gpt2_set_prompt(g_gpt2, prompt.c_str()); - } - })); -} diff --git a/examples/talk.wasm/gpt-2.cpp b/examples/talk.wasm/gpt-2.cpp deleted file mode 100644 index 22ec3354..00000000 --- a/examples/talk.wasm/gpt-2.cpp +++ /dev/null @@ -1,808 +0,0 @@ -#include "ggml.h" -#include "common-ggml.h" - -#include "gpt-2.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/////////////////////// GPT-2 BEGIN ///////////////////////// - -// default hparams (GPT-2 117M) -struct gpt2_hparams { - int32_t n_vocab = 50257; - int32_t n_ctx = 1024; - int32_t n_embd = 768; - int32_t n_head = 12; - int32_t n_layer = 12; - int32_t ftype = 1; -}; - -struct gpt2_layer { - // normalization - struct ggml_tensor * ln_1_g; - struct ggml_tensor * ln_1_b; - - struct ggml_tensor * ln_2_g; - struct ggml_tensor * ln_2_b; - - // attention - struct ggml_tensor * c_attn_attn_w; - struct ggml_tensor * c_attn_attn_b; - - struct ggml_tensor * c_attn_proj_w; - struct ggml_tensor * c_attn_proj_b; - - // mlp - struct ggml_tensor * c_mlp_fc_w; - struct ggml_tensor * c_mlp_fc_b; - - struct ggml_tensor * c_mlp_proj_w; - struct ggml_tensor * c_mlp_proj_b; -}; - -struct gpt2_model { - gpt2_hparams hparams; - - // normalization - struct ggml_tensor * ln_f_g; - struct ggml_tensor * ln_f_b; - - struct ggml_tensor * wte; // position embedding - struct ggml_tensor * wpe; // token embedding - struct ggml_tensor * lm_head; // language model head - - std::vector layers; - - // key + value memory - struct ggml_tensor * memory_k; - struct ggml_tensor * memory_v; - - // - struct ggml_context * ctx; - std::map tensors; -}; - -// load the model's weights from a file -bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab & vocab) { - printf("%s: loading model from '%s'\n", __func__, fname.c_str()); - - auto fin = std::ifstream(fname, std::ios::binary); - if (!fin) { - fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); - return false; - } - - // verify magic - { - uint32_t magic; - fin.read((char *) &magic, sizeof(magic)); - if (magic != 0x67676d6c) { - fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str()); - return false; - } - } - - // load hparams - { - auto & hparams = model.hparams; - - fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); - fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); - fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd)); - fin.read((char *) &hparams.n_head, sizeof(hparams.n_head)); - fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); - fin.read((char *) &hparams.ftype, sizeof(hparams.ftype)); - - printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); - printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); - printf("%s: n_embd = %d\n", __func__, hparams.n_embd); - printf("%s: n_head = %d\n", __func__, hparams.n_head); - printf("%s: n_layer = %d\n", __func__, hparams.n_layer); - printf("%s: ftype = %d\n", __func__, hparams.ftype); - } - - // load vocab - { - int32_t n_vocab = 0; - fin.read((char *) &n_vocab, sizeof(n_vocab)); - - if (n_vocab != model.hparams.n_vocab) { - fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", - __func__, fname.c_str(), n_vocab, model.hparams.n_vocab); - return false; - } - - std::string word; - for (int i = 0; i < n_vocab; i++) { - uint32_t len; - fin.read((char *) &len, sizeof(len)); - - word.resize(len); - fin.read((char *) word.data(), len); - - vocab.token_to_id[word] = i; - vocab.id_to_token[i] = word; - } - } - - // for the big tensors, we have the option to store the data in 16-bit floats or quantized - // in order to save memory and also to speed up the computation - ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model.hparams.ftype)); - if (wtype == GGML_TYPE_COUNT) { - fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n", - __func__, fname.c_str(), model.hparams.ftype); - return false; - } - - auto & ctx = model.ctx; - - size_t ctx_size = 0; - - { - const auto & hparams = model.hparams; - - const int n_embd = hparams.n_embd; - const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; - const int n_vocab = hparams.n_vocab; - - ctx_size += ggml_row_size(GGML_TYPE_F32, n_embd); // ln_f_g - ctx_size += ggml_row_size(GGML_TYPE_F32, n_embd); // ln_f_b - - ctx_size += n_vocab*ggml_row_size(wtype, n_embd); // wte - ctx_size += n_ctx*ggml_row_size(GGML_TYPE_F32, n_embd); // wpe - ctx_size += n_vocab*ggml_row_size(wtype, n_embd); // lm_head - - ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_1_g - ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_1_b - - ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_2_g - ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_2_b - - ctx_size += n_layer*(ggml_row_size(wtype, 3*n_embd*n_embd)); // c_attn_attn_w - ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, 3*n_embd)); // c_attn_attn_b - - ctx_size += n_layer*(ggml_row_size(wtype, n_embd*n_embd)); // c_attn_proj_w - ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // c_attn_proj_b - - ctx_size += n_layer*(ggml_row_size(wtype, 4*n_embd*n_embd)); // c_mlp_fc_w - ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, 4*n_embd)); // c_mlp_fc_b - - ctx_size += n_layer*(ggml_row_size(wtype, 4*n_embd*n_embd)); // c_mlp_proj_w - ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // c_mlp_proj_b - - ctx_size += n_ctx*n_layer*ggml_row_size(GGML_TYPE_F32, n_embd); // memory_k - ctx_size += n_ctx*n_layer*ggml_row_size(GGML_TYPE_F32, n_embd); // memory_v - - ctx_size += (6 + 12*n_layer)*256; // object overhead - - printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); - } - - // create the ggml context - { - struct ggml_init_params params = { - /*.mem_size =*/ ctx_size, - /*.mem_buffer =*/ NULL, - /*.no_alloc =*/ false, - }; - - model.ctx = ggml_init(params); - if (!model.ctx) { - fprintf(stderr, "%s: ggml_init() failed\n", __func__); - return false; - } - } - - // prepare memory for the weights - { - const auto & hparams = model.hparams; - - const int n_embd = hparams.n_embd; - const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; - const int n_vocab = hparams.n_vocab; - - model.layers.resize(n_layer); - - model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - model.wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); - model.wpe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ctx); - model.lm_head = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); - - // map by name - model.tensors["model/ln_f/g"] = model.ln_f_g; - model.tensors["model/ln_f/b"] = model.ln_f_b; - - model.tensors["model/wte"] = model.wte; - model.tensors["model/wpe"] = model.wpe; - model.tensors["model/lm_head"] = model.lm_head; - - for (int i = 0; i < n_layer; ++i) { - auto & layer = model.layers[i]; - - layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - layer.ln_2_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - layer.ln_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 3*n_embd); - layer.c_attn_attn_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 3*n_embd); - - layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); - layer.c_attn_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd); - layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd); - - layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd); - layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - // map by name - model.tensors["model/h" + std::to_string(i) + "/ln_1/g"] = layer.ln_1_g; - model.tensors["model/h" + std::to_string(i) + "/ln_1/b"] = layer.ln_1_b; - - model.tensors["model/h" + std::to_string(i) + "/ln_2/g"] = layer.ln_2_g; - model.tensors["model/h" + std::to_string(i) + "/ln_2/b"] = layer.ln_2_b; - - model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/w"] = layer.c_attn_attn_w; - model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/b"] = layer.c_attn_attn_b; - - model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/w"] = layer.c_attn_proj_w; - model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/b"] = layer.c_attn_proj_b; - - model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/w"] = layer.c_mlp_fc_w; - model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/b"] = layer.c_mlp_fc_b; - - model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/w"] = layer.c_mlp_proj_w; - model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/b"] = layer.c_mlp_proj_b; - } - } - - // key + value memory - { - const auto & hparams = model.hparams; - - const int n_embd = hparams.n_embd; - const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; - - const int n_mem = n_layer*n_ctx; - const int n_elements = n_embd*n_mem; - - model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); - model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); - - const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v); - - printf("%s: memory size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem); - } - - // load weights - { - size_t total_size = 0; - - bool has_lm_head = false; - - while (true) { - int32_t n_dims; - int32_t length; - int32_t ttype; - - fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); - fin.read(reinterpret_cast(&length), sizeof(length)); - fin.read(reinterpret_cast(&ttype), sizeof(ttype)); - - if (fin.eof()) { - break; - } - - int32_t nelements = 1; - int32_t ne[2] = { 1, 1 }; - for (int i = 0; i < n_dims; ++i) { - fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); - nelements *= ne[i]; - } - - std::string name(length, 0); - fin.read(&name[0], length); - - if (model.tensors.find(name.data()) == model.tensors.end()) { - fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data()); - return false; - } - - auto tensor = model.tensors[name.data()]; - if (ggml_nelements(tensor) != nelements) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data()); - return false; - } - - if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { - fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", - __func__, name.data(), (int) tensor->ne[0], (int) tensor->ne[1], ne[0], ne[1]); - return false; - } - - // for debugging - if (0) { - printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); - } - - const size_t bpe = ggml_type_size(ggml_type(ttype)); - - if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", - __func__, name.data(), ggml_nbytes(tensor), nelements*bpe); - return false; - } - - fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); - - // GPT-2 models share the WTE tensor as the LM head - if (name == "model/wte" && has_lm_head == false) { - memcpy(model.lm_head->data, tensor->data, ggml_nbytes(tensor)); - } - - if (name == "model/lm_head") { - has_lm_head = true; - } - - total_size += ggml_nbytes(tensor); - } - - printf("%s: model size = %8.2f MB\n", __func__, total_size/1024.0/1024.0); - } - - fin.close(); - - return true; -} - -// evaluate the transformer -// -// - model: the model -// - n_threads: number of threads to use -// - n_past: the context size so far -// - embd_inp: the embeddings of the tokens in the context -// - embd_w: the predicted logits for the next token -// -bool gpt2_eval( - const gpt2_model & model, - const int n_threads, - const int n_past, - const std::vector & embd_inp, - std::vector & embd_w, - size_t & mem_per_token) { - const int N = embd_inp.size(); - - const auto & hparams = model.hparams; - - const int n_embd = hparams.n_embd; - const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; - const int n_head = hparams.n_head; - const int n_vocab = hparams.n_vocab; - - static size_t buf_size = 512u*1024*1024; - static void * buf = malloc(buf_size); - - if (mem_per_token > 0 && mem_per_token*N > buf_size) { - const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead - //printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new); - - // reallocate - buf_size = buf_size_new; - buf = realloc(buf, buf_size); - if (buf == nullptr) { - fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size); - return false; - } - } - - struct ggml_init_params params = { - /*.mem_size =*/ buf_size, - /*.mem_buffer =*/ buf, - /*.no_alloc =*/ false, - }; - - struct ggml_context * ctx0 = ggml_init(params); - struct ggml_cgraph gf = {}; - - struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); - memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd)); - - struct ggml_tensor * position = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); - for (int i = 0; i < N; ++i) { - ((int32_t *) position->data)[i] = n_past + i; - } - - // wte + wpe - struct ggml_tensor * inpL = - ggml_add(ctx0, - ggml_get_rows(ctx0, model.wte, embd), - ggml_get_rows(ctx0, model.wpe, position)); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * cur; - - // norm - { - // [ 768, N] - cur = ggml_norm(ctx0, inpL, 1e-5f); - - // cur = ln_1_g*cur + ln_1_b - // [ 768, N] - cur = ggml_add(ctx0, - ggml_mul(ctx0, - ggml_repeat(ctx0, model.layers[il].ln_1_g, cur), - cur), - ggml_repeat(ctx0, model.layers[il].ln_1_b, cur)); - } - - // attn - // [2304, 768] - model.layers[il].c_attn_attn_w - // [2304, 1] - model.layers[il].c_attn_attn_b - // [ 768, N] - cur (in) - // [2304, N] - cur (out) - // - // cur = attn_w*cur + attn_b - // [2304, N] - { - cur = ggml_mul_mat(ctx0, - model.layers[il].c_attn_attn_w, - cur); - - cur = ggml_add(ctx0, - ggml_repeat(ctx0, model.layers[il].c_attn_attn_b, cur), - cur); - } - - // self-attention - { - struct ggml_tensor * Qcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0*sizeof(float)*n_embd); - struct ggml_tensor * Kcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1*sizeof(float)*n_embd); - struct ggml_tensor * Vcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2*sizeof(float)*n_embd); - - // store key and value to memory - if (N >= 1) { - struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past)); - struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_v, N*n_embd, (ggml_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past)); - - ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k)); - ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v)); - } - - // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3) - // [64, N, 12] - struct ggml_tensor * Q = - ggml_permute(ctx0, - ggml_cpy(ctx0, - Qcur, - ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)), - 0, 2, 1, 3); - - // K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3) - // [64, n_past + N, 12] - struct ggml_tensor * K = - ggml_permute(ctx0, - ggml_reshape_3d(ctx0, - ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd), - n_embd/n_head, n_head, n_past + N), - 0, 2, 1, 3); - - // GG: flash attention - //struct ggml_tensor * V = - // ggml_cpy(ctx0, - // ggml_permute(ctx0, - // ggml_reshape_3d(ctx0, - // ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd), - // n_embd/n_head, n_head, n_past + N), - // 1, 2, 0, 3), - // ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_past + N, n_embd/n_head, n_head)); - - //struct ggml_tensor * KQV = ggml_flash_attn(ctx0, Q, K, V, true); - - // K * Q - // [n_past + N, N, 12] - struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); - - // KQ_scaled = KQ / sqrt(n_embd/n_head) - // [n_past + N, N, 12] - struct ggml_tensor * KQ_scaled = - ggml_scale(ctx0, - KQ, - 1.0f/sqrt(float(n_embd)/n_head)); - - // KQ_masked = mask_past(KQ_scaled) - // [n_past + N, N, 12] - struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past); - - // KQ = soft_max(KQ_masked) - // [n_past + N, N, 12] - struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); - - // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() - // [n_past + N, 64, 12] - struct ggml_tensor * V_trans = - ggml_cpy(ctx0, - ggml_permute(ctx0, - ggml_reshape_3d(ctx0, - ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd), - n_embd/n_head, n_head, n_past + N), - 1, 2, 0, 3), - ggml_new_tensor_3d(ctx0, model.memory_v->type, n_past + N, n_embd/n_head, n_head)); - - // KQV = transpose(V) * KQ_soft_max - // [64, N, 12] - struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max); - - // KQV_merged = KQV.permute(0, 2, 1, 3) - // [64, 12, N] - struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); - - // cur = KQV_merged.contiguous().view(n_embd, N) - // [768, N] - cur = ggml_cpy(ctx0, - KQV_merged, - ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); - } - - // projection - // [ 768, 768] - model.layers[il].c_attn_proj_w - // [ 768, 1] - model.layers[il].c_attn_proj_b - // [ 768, N] - cur (in) - // [ 768, N] - cur (out) - // - // cur = proj_w*cur + proj_b - // [768, N] - { - cur = ggml_mul_mat(ctx0, - model.layers[il].c_attn_proj_w, - cur); - - cur = ggml_add(ctx0, - ggml_repeat(ctx0, model.layers[il].c_attn_proj_b, cur), - cur); - } - - // add the input - cur = ggml_add(ctx0, cur, inpL); - - struct ggml_tensor * inpFF = cur; - - // feed-forward network - { - // norm - { - cur = ggml_norm(ctx0, inpFF, 1e-5f); - - // cur = ln_2_g*cur + ln_2_b - // [ 768, N] - cur = ggml_add(ctx0, - ggml_mul(ctx0, - ggml_repeat(ctx0, model.layers[il].ln_2_g, cur), - cur), - ggml_repeat(ctx0, model.layers[il].ln_2_b, cur)); - } - - // fully connected - // [3072, 768] - model.layers[il].c_mlp_fc_w - // [3072, 1] - model.layers[il].c_mlp_fc_b - // [ 768, N] - cur (in) - // [3072, N] - cur (out) - // - // cur = fc_w*cur + fc_b - // [3072, N] - cur = ggml_mul_mat(ctx0, - model.layers[il].c_mlp_fc_w, - cur); - - cur = ggml_add(ctx0, - ggml_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur), - cur); - - // GELU activation - // [3072, N] - cur = ggml_gelu(ctx0, cur); - - // projection - // [ 768, 3072] - model.layers[il].c_mlp_proj_w - // [ 768, 1] - model.layers[il].c_mlp_proj_b - // [3072, N] - cur (in) - // [ 768, N] - cur (out) - // - // cur = proj_w*cur + proj_b - // [768, N] - cur = ggml_mul_mat(ctx0, - model.layers[il].c_mlp_proj_w, - cur); - - cur = ggml_add(ctx0, - ggml_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur), - cur); - } - - // input for next layer - inpL = ggml_add(ctx0, cur, inpFF); - } - - // norm - { - // [ 768, N] - inpL = ggml_norm(ctx0, inpL, 1e-5f); - - // inpL = ln_f_g*inpL + ln_f_b - // [ 768, N] - inpL = ggml_add(ctx0, - ggml_mul(ctx0, - ggml_repeat(ctx0, model.ln_f_g, inpL), - inpL), - ggml_repeat(ctx0, model.ln_f_b, inpL)); - } - - // inpL = WTE * inpL - // [ 768, 50257] - model.lm_head - // [ 768, N] - inpL - inpL = ggml_mul_mat(ctx0, model.lm_head, inpL); - - // logits -> probs - //inpL = ggml_soft_max(ctx0, inpL); - - // run the computation - ggml_build_forward_expand (&gf, inpL); - ggml_graph_compute_with_ctx(ctx0, &gf, n_threads); - - //if (n_past%100 == 0) { - // ggml_graph_print (&gf); - // ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot"); - //} - - //embd_w.resize(n_vocab*N); - //memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N); - - // return result just for the last token - embd_w.resize(n_vocab); - memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab); - - if (mem_per_token == 0) { - mem_per_token = ggml_used_mem(ctx0)/N; - } - //printf("used_mem = %zu\n", ggml_used_mem(ctx0)); - - ggml_free(ctx0); - - return true; -} - -/////////////////////////////// GPT-2 END //////////////////////////////// - -constexpr int N_THREAD = 8; - -struct gpt2_context { - std::string prompt_base = R"(Hello, how are you? -I'm fine, thanks. How are you? -Thanks, I'm fine too. What are you doing? -I'm just sitting here. -It's a lovely day, isn't it? -Yes, it is. I love the weather this time of year. -I wish it would rain a little bit. -Me too. -)"; - - std::mt19937 rng; - - gpt_vocab vocab; - gpt2_model model; - - int32_t n_threads = std::min(N_THREAD, (int) std::thread::hardware_concurrency()); - - // sampling parameters - int32_t top_k = 5; - float top_p = 0.9f; - float temp = 1.0f; -}; - -struct gpt2_context * gpt2_init(const char * path_model) { - gpt2_context * ctx = new gpt2_context; - - ctx->rng = std::mt19937(time(nullptr)); - - // load the model - { - const int64_t t_start_us = ggml_time_us(); - - if (!gpt2_model_load(path_model, ctx->model, ctx->vocab)) { - fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, path_model); - delete ctx; - return nullptr; - } - - const int64_t t_load_us = ggml_time_us() - t_start_us; - - printf("gpt-2: model loaded in %d ms\n", (int) (t_load_us/1000)); - } - - return ctx; -} - -void gpt2_free(struct gpt2_context * ctx) { - delete ctx; -} - -const char * gpt2_get_prompt(struct gpt2_context * ctx) { - return ctx->prompt_base.c_str(); -} - -void gpt2_set_prompt(struct gpt2_context * ctx, const char * prompt) { - ctx->prompt_base = prompt; -} - -std::vector gpt2_tokenize(const gpt2_context * ctx, const char * text) { - return ::gpt_tokenize(ctx->vocab, text); -} - -std::string gpt2_gen_text(gpt2_context * ctx, const char * text, int max_tokens) { - int n_past = 0; - - std::vector embd_w; - - // tokenize the prompt - std::vector embd_inp = ::gpt2_tokenize(ctx, text); - - int n_predict = std::min(max_tokens, ctx->model.hparams.n_ctx - (int) embd_inp.size()); - - std::vector embd = embd_inp; - - size_t mem_per_token = 3000000; - - std::string result; - - for (int i = embd.size(); i < (int) embd_inp.size() + n_predict; i++) { - // predict - if (!embd.empty()) { - if (!gpt2_eval(ctx->model, ctx->n_threads, n_past, embd, embd_w, mem_per_token)) { - printf("gpt-2: failed to generate text\n"); - return ""; - } - } - - n_past += embd.size(); - embd.clear(); - - { - // sample next token - const int top_k = ctx->top_k; - const float top_p = ctx->top_p; - const float temp = ctx->temp; - - const int n_vocab = ctx->model.hparams.n_vocab; - - const gpt_vocab::id id = gpt_sample_top_k_top_p(ctx->vocab, embd_w.data() + (embd_w.size() - n_vocab), top_k, top_p, temp, ctx->rng); - - // add it to the context - embd.push_back(id); - } - - result += ctx->vocab.id_to_token[embd[0]]; - - // end of text token - if (embd.back() == 50256) { - break; - } - } - - return result; -} diff --git a/examples/talk.wasm/gpt-2.h b/examples/talk.wasm/gpt-2.h deleted file mode 100644 index 756fbfa9..00000000 --- a/examples/talk.wasm/gpt-2.h +++ /dev/null @@ -1,21 +0,0 @@ -#pragma once - -// TODO: Change to C-style API and move to ./examples for easy reuse. - -#include "common.h" - -#include -#include -#include - -struct gpt2_context; - -struct gpt2_context * gpt2_init(const char * path_model); -void gpt2_free(struct gpt2_context * ctx); - -const char * gpt2_get_prompt(struct gpt2_context * ctx); -void gpt2_set_prompt(struct gpt2_context * ctx, const char * prompt); - -std::vector gpt2_tokenize(const gpt2_context * ctx, const char * text); - -std::string gpt2_gen_text(gpt2_context * ctx, const char * text, int max_tokens); diff --git a/examples/talk.wasm/index-tmpl.html b/examples/talk.wasm/index-tmpl.html deleted file mode 100644 index 51243929..00000000 --- a/examples/talk.wasm/index-tmpl.html +++ /dev/null @@ -1,856 +0,0 @@ - - - - Talk - GPT-2 meets Whisper in WebAssembly - - - - -
- Talk - GPT-2 meets Whisper in WebAssembly - -

- - Talk with an Artificial Intelligence in your browser. This demo uses: - - - - All of this runs locally in your browser using WebAssembly.
- You can find more about this project on GitHub. - -

- - More examples: - main | - bench | - stream | - command | - talk | - -

- -
- - Select the models you would like to use and click the "Start" button to begin the conversation - -

- -
- Whisper model: - - -

- Quantized models:

- - - - - -
- -
- -
- GPT-2 model: - - - - - -
- -
- -
- - - - - - - -
- -
- -
- Status: not started - -
[The text context will be displayed here]
-
- -
- - Debug output: - - -
- - Troubleshooting - -

- - The page does some heavy computations, so make sure: - -
    -
  • To use a modern web browser (e.g. Chrome, Firefox)
  • -
  • To use a fast desktop or laptop computer (i.e. not a mobile phone)
  • -
  • Your browser supports WASM Fixed-width SIMD
  • -
- - Note that these neural network models were not meant to be used in a browser, so the performance and
- quality of the results may not be optimal. If you have any questions or suggestions, checkout the following - discussion. - -

- - Here is a short video of the demo in action: https://youtu.be/LeWKl8t1-Hc - -

- -
- - | - Build time: @GIT_DATE@ | - Commit hash: @GIT_SHA1@ | - Commit subject: @GIT_COMMIT_SUBJECT@ | - Source Code | - -
-
- - - - - - diff --git a/examples/talk/.gitignore b/examples/talk/.gitignore deleted file mode 100644 index 9c08e1f4..00000000 --- a/examples/talk/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -audio.mp3 -to_speak.txt diff --git a/examples/talk/CMakeLists.txt b/examples/talk/CMakeLists.txt deleted file mode 100644 index e099e2cd..00000000 --- a/examples/talk/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -if (WHISPER_SDL2) - # talk - set(TARGET talk) - add_executable(${TARGET} talk.cpp gpt-2.cpp) - target_link_libraries(${TARGET} PRIVATE common common-sdl whisper ${CMAKE_THREAD_LIBS_INIT}) - - include(DefaultTargetOptions) -endif () diff --git a/examples/talk/README.md b/examples/talk/README.md deleted file mode 100644 index f0121f1c..00000000 --- a/examples/talk/README.md +++ /dev/null @@ -1,45 +0,0 @@ -# talk - -Talk with an Artificial Intelligence in your terminal - -[Demo Talk](https://user-images.githubusercontent.com/1991296/206805012-48e71cc2-588d-4745-8798-c1c70ea3b40d.mp4) - -Web version: [examples/talk.wasm](/examples/talk.wasm) - -## Building - -The `talk` tool depends on SDL2 library to capture audio from the microphone. You can build it like this: - -```bash -# Install SDL2 -# On Debian based linux distributions: -sudo apt-get install libsdl2-dev - -# On Fedora Linux: -sudo dnf install SDL2 SDL2-devel - -# Install SDL2 on Mac OS -brew install sdl2 - -# Build the "talk" executable -make talk - -# Run it -./talk -p Santa -``` - -## GPT-2 - -To run this, you will need a ggml GPT-2 model: [instructions](https://github.com/ggerganov/ggml/tree/master/examples/gpt-2#downloading-and-converting-the-original-models) - -Alternatively, you can simply download the smallest ggml GPT-2 117M model (240 MB) like this: - -``` -wget --quiet --show-progress -O models/ggml-gpt-2-117M.bin https://huggingface.co/ggerganov/ggml/resolve/main/ggml-model-gpt-2-117M.bin -``` - -## TTS - -For best experience, this example needs a TTS tool to convert the generated text responses to voice. -You can use any TTS engine that you would like - simply edit the [speak](speak) script to your needs. -By default, it is configured to use MacOS's `say` or `espeak` or Windows SpeechSynthesizer, but you can use whatever you wish. diff --git a/examples/talk/eleven-labs.py b/examples/talk/eleven-labs.py deleted file mode 100644 index 7ed1d5dc..00000000 --- a/examples/talk/eleven-labs.py +++ /dev/null @@ -1,80 +0,0 @@ -import sys -import argparse -import textwrap - -parser = argparse.ArgumentParser(add_help=False, - formatter_class=argparse.RawTextHelpFormatter) -parser.add_argument("-q", "--quick", action="store_true", - help="skip checking the required library") - -modes = parser.add_argument_group("action") -modes.add_argument("inputfile", metavar="TEXTFILE", - nargs='?', type=argparse.FileType(), default=sys.stdin, - help="read the text file (default: stdin)") -modes.add_argument("-l", "--list", action="store_true", - help="show the list of voices and exit") -modes.add_argument("-h", "--help", action="help", - help="show this help and exit") - -selopts = parser.add_argument_group("voice selection") -selmodes = selopts.add_mutually_exclusive_group() -selmodes.add_argument("-n", "--name", - default="Arnold", - help="get a voice object by name (default: Arnold)") -selmodes.add_argument("-v", "--voice", type=int, metavar="NUMBER", - help="get a voice object by number (see --list)") -selopts.add_argument("-f", "--filter", action="append", metavar="KEY=VAL", - default=["use case=narration"], - help=textwrap.dedent('''\ - filter voices by labels (default: "use case=narration") - this option can be used multiple times - filtering will be disabled if the first -f has no "=" (e.g. -f "any") - ''')) - -outmodes = parser.add_argument_group("output") -outgroup = outmodes.add_mutually_exclusive_group() -outgroup.add_argument("-s", "--save", metavar="FILE", - default="audio.mp3", - help="save the TTS to a file (default: audio.mp3)") -outgroup.add_argument("-p", "--play", action="store_true", - help="play the TTS with ffplay") - -args = parser.parse_args() - -if not args.quick: - import importlib.util - if importlib.util.find_spec("elevenlabs") is None: - print("elevenlabs library is not installed, you can install it to your enviroment using 'pip install elevenlabs'") - sys.exit() - -from elevenlabs import voices, generate, play, save - -if args.filter and "=" in args.filter[0]: - voicelist = voices() - for f in args.filter: - label, value = f.split("=") - voicelist = filter(lambda x: x.labels.get(label) == value, voicelist) - voicelist = list(voicelist) -else: - voicelist = list(voices()) - -if args.list: - for i, v in enumerate(voicelist): - print(str(i) + ": " + v.name + " " + str(v.labels)) - sys.exit() - -if args.voice: - voice = voicelist[args.voice % len(voicelist)] -else: - voice = args.name - # if -n should consult -f, use the following - #voice = next(x for x in voicelist if x.name == args.name) - -audio = generate( - text=str(args.inputfile.read()), - voice=voice -) -if args.play: - play(audio) -else: - save(audio, args.save) diff --git a/examples/talk/gpt-2.cpp b/examples/talk/gpt-2.cpp deleted file mode 100644 index 43ca8fa0..00000000 --- a/examples/talk/gpt-2.cpp +++ /dev/null @@ -1,809 +0,0 @@ -#include "ggml.h" -#include "common-ggml.h" - -#include "gpt-2.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/////////////////////// GPT-2 BEGIN ///////////////////////// - -// default hparams (GPT-2 117M) -struct gpt2_hparams { - int32_t n_vocab = 50257; - int32_t n_ctx = 1024; - int32_t n_embd = 768; - int32_t n_head = 12; - int32_t n_layer = 12; - int32_t ftype = 1; -}; - -struct gpt2_layer { - // normalization - struct ggml_tensor * ln_1_g; - struct ggml_tensor * ln_1_b; - - struct ggml_tensor * ln_2_g; - struct ggml_tensor * ln_2_b; - - // attention - struct ggml_tensor * c_attn_attn_w; - struct ggml_tensor * c_attn_attn_b; - - struct ggml_tensor * c_attn_proj_w; - struct ggml_tensor * c_attn_proj_b; - - // mlp - struct ggml_tensor * c_mlp_fc_w; - struct ggml_tensor * c_mlp_fc_b; - - struct ggml_tensor * c_mlp_proj_w; - struct ggml_tensor * c_mlp_proj_b; -}; - -struct gpt2_model { - gpt2_hparams hparams; - - // normalization - struct ggml_tensor * ln_f_g; - struct ggml_tensor * ln_f_b; - - struct ggml_tensor * wte; // position embedding - struct ggml_tensor * wpe; // token embedding - struct ggml_tensor * lm_head; // language model head - - std::vector layers; - - // key + value memory - struct ggml_tensor * memory_k; - struct ggml_tensor * memory_v; - - // - struct ggml_context * ctx; - std::map tensors; -}; - -// load the model's weights from a file -static bool gpt2_model_load(const std::string & fname, gpt2_model & model, gpt_vocab & vocab) { - printf("%s: loading model from '%s'\n", __func__, fname.c_str()); - - auto fin = std::ifstream(fname, std::ios::binary); - if (!fin) { - fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str()); - return false; - } - - // verify magic - { - uint32_t magic; - fin.read((char *) &magic, sizeof(magic)); - if (magic != 0x67676d6c) { - fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str()); - return false; - } - } - - // load hparams - { - auto & hparams = model.hparams; - - fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab)); - fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx)); - fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd)); - fin.read((char *) &hparams.n_head, sizeof(hparams.n_head)); - fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer)); - fin.read((char *) &hparams.ftype, sizeof(hparams.ftype)); - - printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); - printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); - printf("%s: n_embd = %d\n", __func__, hparams.n_embd); - printf("%s: n_head = %d\n", __func__, hparams.n_head); - printf("%s: n_layer = %d\n", __func__, hparams.n_layer); - printf("%s: ftype = %d\n", __func__, hparams.ftype); - } - - // load vocab - { - int32_t n_vocab = 0; - fin.read((char *) &n_vocab, sizeof(n_vocab)); - - if (n_vocab != model.hparams.n_vocab) { - fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n", - __func__, fname.c_str(), n_vocab, model.hparams.n_vocab); - return false; - } - - char word[129]; - - for (int i = 0; i < n_vocab; i++) { - uint32_t len; - fin.read((char *) &len, sizeof(len)); - word[len] = '\0'; - fin.read((char *) word, len); - - vocab.token_to_id[word] = i; - vocab.id_to_token[i] = word; - } - } - - // for the big tensors, we have the option to store the data in 16-bit floats or quantized - // in order to save memory and also to speed up the computation - ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model.hparams.ftype)); - if (wtype == GGML_TYPE_COUNT) { - fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n", - __func__, fname.c_str(), model.hparams.ftype); - return false; - } - - auto & ctx = model.ctx; - - size_t ctx_size = 0; - - { - const auto & hparams = model.hparams; - - const int n_embd = hparams.n_embd; - const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; - const int n_vocab = hparams.n_vocab; - - ctx_size += ggml_row_size(GGML_TYPE_F32, n_embd); // ln_f_g - ctx_size += ggml_row_size(GGML_TYPE_F32, n_embd); // ln_f_b - - ctx_size += n_vocab*ggml_row_size(wtype, n_embd); // wte - ctx_size += n_ctx*ggml_row_size(GGML_TYPE_F32, n_embd); // wpe - ctx_size += n_vocab*ggml_row_size(wtype, n_embd); // lm_head - - ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_1_g - ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_1_b - - ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_2_g - ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // ln_2_b - - ctx_size += n_layer*(ggml_row_size(wtype, 3*n_embd*n_embd)); // c_attn_attn_w - ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, 3*n_embd)); // c_attn_attn_b - - ctx_size += n_layer*(ggml_row_size(wtype, n_embd*n_embd)); // c_attn_proj_w - ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // c_attn_proj_b - - ctx_size += n_layer*(ggml_row_size(wtype, 4*n_embd*n_embd)); // c_mlp_fc_w - ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, 4*n_embd)); // c_mlp_fc_b - - ctx_size += n_layer*(ggml_row_size(wtype, 4*n_embd*n_embd)); // c_mlp_proj_w - ctx_size += n_layer*(ggml_row_size(GGML_TYPE_F32, n_embd)); // c_mlp_proj_b - - ctx_size += n_ctx*n_layer*ggml_row_size(GGML_TYPE_F32, n_embd); // memory_k - ctx_size += n_ctx*n_layer*ggml_row_size(GGML_TYPE_F32, n_embd); // memory_v - - ctx_size += (6 + 12*n_layer)*256; // object overhead - - printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); - } - - // create the ggml context - { - struct ggml_init_params params = { - /*.mem_size =*/ ctx_size, - /*.mem_buffer =*/ NULL, - /*.no_alloc =*/ false, - }; - - model.ctx = ggml_init(params); - if (!model.ctx) { - fprintf(stderr, "%s: ggml_init() failed\n", __func__); - return false; - } - } - - // prepare memory for the weights - { - const auto & hparams = model.hparams; - - const int n_embd = hparams.n_embd; - const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; - const int n_vocab = hparams.n_vocab; - - model.layers.resize(n_layer); - - model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - model.wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); - model.wpe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ctx); - model.lm_head = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); - - // map by name - model.tensors["model/ln_f/g"] = model.ln_f_g; - model.tensors["model/ln_f/b"] = model.ln_f_b; - - model.tensors["model/wte"] = model.wte; - model.tensors["model/wpe"] = model.wpe; - model.tensors["model/lm_head"] = model.lm_head; - - for (int i = 0; i < n_layer; ++i) { - auto & layer = model.layers[i]; - - layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - layer.ln_2_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - layer.ln_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 3*n_embd); - layer.c_attn_attn_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 3*n_embd); - - layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); - layer.c_attn_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd); - layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd); - - layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd); - layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); - - // map by name - model.tensors["model/h" + std::to_string(i) + "/ln_1/g"] = layer.ln_1_g; - model.tensors["model/h" + std::to_string(i) + "/ln_1/b"] = layer.ln_1_b; - - model.tensors["model/h" + std::to_string(i) + "/ln_2/g"] = layer.ln_2_g; - model.tensors["model/h" + std::to_string(i) + "/ln_2/b"] = layer.ln_2_b; - - model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/w"] = layer.c_attn_attn_w; - model.tensors["model/h" + std::to_string(i) + "/attn/c_attn/b"] = layer.c_attn_attn_b; - - model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/w"] = layer.c_attn_proj_w; - model.tensors["model/h" + std::to_string(i) + "/attn/c_proj/b"] = layer.c_attn_proj_b; - - model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/w"] = layer.c_mlp_fc_w; - model.tensors["model/h" + std::to_string(i) + "/mlp/c_fc/b"] = layer.c_mlp_fc_b; - - model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/w"] = layer.c_mlp_proj_w; - model.tensors["model/h" + std::to_string(i) + "/mlp/c_proj/b"] = layer.c_mlp_proj_b; - } - } - - // key + value memory - { - const auto & hparams = model.hparams; - - const int n_embd = hparams.n_embd; - const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; - - const int n_mem = n_layer*n_ctx; - const int n_elements = n_embd*n_mem; - - model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); - model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements); - - const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v); - - printf("%s: memory size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem); - } - - // load weights - { - size_t total_size = 0; - - bool has_lm_head = false; - - while (true) { - int32_t n_dims; - int32_t length; - int32_t ttype; - - fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); - fin.read(reinterpret_cast(&length), sizeof(length)); - fin.read(reinterpret_cast(&ttype), sizeof(ttype)); - - if (fin.eof()) { - break; - } - - int32_t nelements = 1; - int32_t ne[2] = { 1, 1 }; - for (int i = 0; i < n_dims; ++i) { - fin.read(reinterpret_cast(&ne[i]), sizeof(ne[i])); - nelements *= ne[i]; - } - - std::string name(length, 0); - fin.read(&name[0], length); - - if (model.tensors.find(name.data()) == model.tensors.end()) { - fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data()); - return false; - } - - auto tensor = model.tensors[name.data()]; - if (ggml_nelements(tensor) != nelements) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data()); - return false; - } - - if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { - fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", - __func__, name.data(), (int) tensor->ne[0], (int) tensor->ne[1], ne[0], ne[1]); - return false; - } - - // for debugging - if (0) { - printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor)); - } - - const size_t bpe = ggml_type_size(ggml_type(ttype)); - - if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) { - fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n", - __func__, name.data(), ggml_nbytes(tensor), nelements*bpe); - return false; - } - - fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); - - // GPT-2 models share the WTE tensor as the LM head - if (name == "model/wte" && has_lm_head == false) { - memcpy(model.lm_head->data, tensor->data, ggml_nbytes(tensor)); - } - - if (name == "model/lm_head") { - has_lm_head = true; - } - - total_size += ggml_nbytes(tensor); - } - - printf("%s: model size = %8.2f MB\n", __func__, total_size/1024.0/1024.0); - } - - fin.close(); - - return true; -} - -// evaluate the transformer -// -// - model: the model -// - n_threads: number of threads to use -// - n_past: the context size so far -// - embd_inp: the embeddings of the tokens in the context -// - embd_w: the predicted logits for the next token -// -// TODO: sync latest version from ggml repo -static bool gpt2_eval( - const gpt2_model & model, - const int n_threads, - const int n_past, - const std::vector & embd_inp, - std::vector & embd_w, - size_t & mem_per_token) { - const int N = embd_inp.size(); - - const auto & hparams = model.hparams; - - const int n_embd = hparams.n_embd; - const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; - const int n_head = hparams.n_head; - const int n_vocab = hparams.n_vocab; - - static size_t buf_size = 512u*1024*1024; - static void * buf = malloc(buf_size); - - if (mem_per_token > 0 && mem_per_token*N > buf_size) { - const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead - //printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new); - - // reallocate - buf_size = buf_size_new; - buf = realloc(buf, buf_size); - if (buf == nullptr) { - fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size); - return false; - } - } - - struct ggml_init_params params = { - /*.mem_size =*/ buf_size, - /*.mem_buffer =*/ buf, - /*.no_alloc =*/ false, - }; - - struct ggml_context * ctx0 = ggml_init(params); - struct ggml_cgraph gf = {}; - - struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); - memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd)); - - struct ggml_tensor * position = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); - for (int i = 0; i < N; ++i) { - ((int32_t *) position->data)[i] = n_past + i; - } - - // wte + wpe - struct ggml_tensor * inpL = - ggml_add(ctx0, - ggml_get_rows(ctx0, model.wte, embd), - ggml_get_rows(ctx0, model.wpe, position)); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * cur; - - // norm - { - // [ 768, N] - cur = ggml_norm(ctx0, inpL, 1e-5f); - - // cur = ln_1_g*cur + ln_1_b - // [ 768, N] - cur = ggml_add(ctx0, - ggml_mul(ctx0, - ggml_repeat(ctx0, model.layers[il].ln_1_g, cur), - cur), - ggml_repeat(ctx0, model.layers[il].ln_1_b, cur)); - } - - // attn - // [2304, 768] - model.layers[il].c_attn_attn_w - // [2304, 1] - model.layers[il].c_attn_attn_b - // [ 768, N] - cur (in) - // [2304, N] - cur (out) - // - // cur = attn_w*cur + attn_b - // [2304, N] - { - cur = ggml_mul_mat(ctx0, - model.layers[il].c_attn_attn_w, - cur); - - cur = ggml_add(ctx0, - ggml_repeat(ctx0, model.layers[il].c_attn_attn_b, cur), - cur); - } - - // self-attention - { - struct ggml_tensor * Qcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0*sizeof(float)*n_embd); - struct ggml_tensor * Kcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1*sizeof(float)*n_embd); - struct ggml_tensor * Vcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2*sizeof(float)*n_embd); - - // store key and value to memory - if (N >= 1) { - struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past)); - struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_v, N*n_embd, (ggml_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past)); - - ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k)); - ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v)); - } - - // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3) - // [64, N, 12] - struct ggml_tensor * Q = - ggml_permute(ctx0, - ggml_cpy(ctx0, - Qcur, - ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)), - 0, 2, 1, 3); - - // K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3) - // [64, n_past + N, 12] - struct ggml_tensor * K = - ggml_permute(ctx0, - ggml_reshape_3d(ctx0, - ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd), - n_embd/n_head, n_head, n_past + N), - 0, 2, 1, 3); - - // GG: flash attention - //struct ggml_tensor * V = - // ggml_cpy(ctx0, - // ggml_permute(ctx0, - // ggml_reshape_3d(ctx0, - // ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd), - // n_embd/n_head, n_head, n_past + N), - // 1, 2, 0, 3), - // ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_past + N, n_embd/n_head, n_head)); - - //struct ggml_tensor * KQV = ggml_flash_attn(ctx0, Q, K, V, true); - - // K * Q - // [n_past + N, N, 12] - struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); - - // KQ_scaled = KQ / sqrt(n_embd/n_head) - // [n_past + N, N, 12] - struct ggml_tensor * KQ_scaled = - ggml_scale(ctx0, - KQ, - 1.0f/sqrt(float(n_embd)/n_head)); - - // KQ_masked = mask_past(KQ_scaled) - // [n_past + N, N, 12] - struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past); - - // KQ = soft_max(KQ_masked) - // [n_past + N, N, 12] - struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); - - // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() - // [n_past + N, 64, 12] - struct ggml_tensor * V_trans = - ggml_cpy(ctx0, - ggml_permute(ctx0, - ggml_reshape_3d(ctx0, - ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd), - n_embd/n_head, n_head, n_past + N), - 1, 2, 0, 3), - ggml_new_tensor_3d(ctx0, model.memory_v->type, n_past + N, n_embd/n_head, n_head)); - - // KQV = transpose(V) * KQ_soft_max - // [64, N, 12] - struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max); - - // KQV_merged = KQV.permute(0, 2, 1, 3) - // [64, 12, N] - struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); - - // cur = KQV_merged.contiguous().view(n_embd, N) - // [768, N] - cur = ggml_cpy(ctx0, - KQV_merged, - ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); - } - - // projection - // [ 768, 768] - model.layers[il].c_attn_proj_w - // [ 768, 1] - model.layers[il].c_attn_proj_b - // [ 768, N] - cur (in) - // [ 768, N] - cur (out) - // - // cur = proj_w*cur + proj_b - // [768, N] - { - cur = ggml_mul_mat(ctx0, - model.layers[il].c_attn_proj_w, - cur); - - cur = ggml_add(ctx0, - ggml_repeat(ctx0, model.layers[il].c_attn_proj_b, cur), - cur); - } - - // add the input - cur = ggml_add(ctx0, cur, inpL); - - struct ggml_tensor * inpFF = cur; - - // feed-forward network - { - // norm - { - cur = ggml_norm(ctx0, inpFF, 1e-5f); - - // cur = ln_2_g*cur + ln_2_b - // [ 768, N] - cur = ggml_add(ctx0, - ggml_mul(ctx0, - ggml_repeat(ctx0, model.layers[il].ln_2_g, cur), - cur), - ggml_repeat(ctx0, model.layers[il].ln_2_b, cur)); - } - - // fully connected - // [3072, 768] - model.layers[il].c_mlp_fc_w - // [3072, 1] - model.layers[il].c_mlp_fc_b - // [ 768, N] - cur (in) - // [3072, N] - cur (out) - // - // cur = fc_w*cur + fc_b - // [3072, N] - cur = ggml_mul_mat(ctx0, - model.layers[il].c_mlp_fc_w, - cur); - - cur = ggml_add(ctx0, - ggml_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur), - cur); - - // GELU activation - // [3072, N] - cur = ggml_gelu(ctx0, cur); - - // projection - // [ 768, 3072] - model.layers[il].c_mlp_proj_w - // [ 768, 1] - model.layers[il].c_mlp_proj_b - // [3072, N] - cur (in) - // [ 768, N] - cur (out) - // - // cur = proj_w*cur + proj_b - // [768, N] - cur = ggml_mul_mat(ctx0, - model.layers[il].c_mlp_proj_w, - cur); - - cur = ggml_add(ctx0, - ggml_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur), - cur); - } - - // input for next layer - inpL = ggml_add(ctx0, cur, inpFF); - } - - // norm - { - // [ 768, N] - inpL = ggml_norm(ctx0, inpL, 1e-5f); - - // inpL = ln_f_g*inpL + ln_f_b - // [ 768, N] - inpL = ggml_add(ctx0, - ggml_mul(ctx0, - ggml_repeat(ctx0, model.ln_f_g, inpL), - inpL), - ggml_repeat(ctx0, model.ln_f_b, inpL)); - } - - // inpL = WTE * inpL - // [ 768, 50257] - model.lm_head - // [ 768, N] - inpL - inpL = ggml_mul_mat(ctx0, model.lm_head, inpL); - - // logits -> probs - //inpL = ggml_soft_max(ctx0, inpL); - - // run the computation - ggml_build_forward_expand (&gf, inpL); - ggml_graph_compute_with_ctx(ctx0, &gf, n_threads); - - //if (n_past%100 == 0) { - // ggml_graph_print (&gf); - // ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot"); - //} - - //embd_w.resize(n_vocab*N); - //memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N); - - // return result just for the last token - embd_w.resize(n_vocab); - memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab); - - if (mem_per_token == 0) { - mem_per_token = ggml_used_mem(ctx0)/N; - } - //printf("used_mem = %zu\n", ggml_used_mem(ctx0)); - - ggml_free(ctx0); - - return true; -} - -/////////////////////////////// GPT-2 END //////////////////////////////// - -constexpr int N_THREAD = 8; - -struct gpt2_context { - std::string prompt_base = R"(Hello, how are you? -I'm fine, thanks. How are you? -Thanks, I'm fine too. What are you doing? -I'm just sitting here. -It's a lovely day, isn't it? -Yes, it is. I love the weather this time of year. -I wish it would rain a little bit. -Me too. -)"; - - std::mt19937 rng; - - gpt_vocab vocab; - gpt2_model model; - - int32_t n_threads = std::min(N_THREAD, (int) std::thread::hardware_concurrency()); - - // sampling parameters - int32_t top_k = 5; - float top_p = 0.9f; - float temp = 1.0f; -}; - -struct gpt2_context * gpt2_init(const char * path_model) { - gpt2_context * ctx = new gpt2_context; - - ctx->rng = std::mt19937(time(nullptr)); - - // load the model - { - const int64_t t_start_us = ggml_time_us(); - - if (!gpt2_model_load(path_model, ctx->model, ctx->vocab)) { - fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, path_model); - delete ctx; - return nullptr; - } - - const int64_t t_load_us = ggml_time_us() - t_start_us; - - printf("gpt-2: model loaded in %d ms\n", (int) (t_load_us/1000)); - } - - return ctx; -} - -void gpt2_free(struct gpt2_context * ctx) { - delete ctx; -} - -const char * gpt2_get_prompt(struct gpt2_context * ctx) { - return ctx->prompt_base.c_str(); -} - -void gpt2_set_prompt(struct gpt2_context * ctx, const char * prompt) { - ctx->prompt_base = prompt; -} - -std::vector gpt2_tokenize(const gpt2_context * ctx, const char * text) { - return ::gpt_tokenize(ctx->vocab, text); -} - -std::string gpt2_gen_text(gpt2_context * ctx, const char * text, int max_tokens) { - int n_past = 0; - - std::vector embd_w; - - // tokenize the prompt - std::vector embd_inp = ::gpt2_tokenize(ctx, text); - - int n_predict = std::min(max_tokens, ctx->model.hparams.n_ctx - (int) embd_inp.size()); - - std::vector embd = embd_inp; - - size_t mem_per_token = 3000000; - - std::string result; - - for (int i = embd.size(); i < (int) embd_inp.size() + n_predict; i++) { - // predict - if (!embd.empty()) { - if (!gpt2_eval(ctx->model, ctx->n_threads, n_past, embd, embd_w, mem_per_token)) { - printf("gpt-2: failed to generate text\n"); - return ""; - } - } - - n_past += embd.size(); - embd.clear(); - - { - // sample next token - const int top_k = ctx->top_k; - const float top_p = ctx->top_p; - const float temp = ctx->temp; - - const int n_vocab = ctx->model.hparams.n_vocab; - - const gpt_vocab::id id = gpt_sample_top_k_top_p(ctx->vocab, embd_w.data() + (embd_w.size() - n_vocab), top_k, top_p, temp, ctx->rng); - - // add it to the context - embd.push_back(id); - } - - result += ctx->vocab.id_to_token[embd[0]]; - - // end of text token - if (embd.back() == 50256) { - break; - } - } - - return result; -} diff --git a/examples/talk/gpt-2.h b/examples/talk/gpt-2.h deleted file mode 100644 index 756fbfa9..00000000 --- a/examples/talk/gpt-2.h +++ /dev/null @@ -1,21 +0,0 @@ -#pragma once - -// TODO: Change to C-style API and move to ./examples for easy reuse. - -#include "common.h" - -#include -#include -#include - -struct gpt2_context; - -struct gpt2_context * gpt2_init(const char * path_model); -void gpt2_free(struct gpt2_context * ctx); - -const char * gpt2_get_prompt(struct gpt2_context * ctx); -void gpt2_set_prompt(struct gpt2_context * ctx, const char * prompt); - -std::vector gpt2_tokenize(const gpt2_context * ctx, const char * text); - -std::string gpt2_gen_text(gpt2_context * ctx, const char * text, int max_tokens); diff --git a/examples/talk/speak b/examples/talk/speak deleted file mode 100644 index 31ea417a..00000000 --- a/examples/talk/speak +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -# Usage: -# speak - -function installed() { command -v $1 >/dev/null 2>&1; } - -if installed espeak; then - espeak -v en-us+m$1 -s 225 -p 50 -a 200 -g 5 -k 5 -f $2 - -elif installed piper && installed aplay; then - cat $2 | piper --model ~/en_US-lessac-medium.onnx --output-raw | aplay -q -r 22050 -f S16_LE -t raw - - -# for Mac -elif installed say; then - say -f $2 - -# Eleven Labs -elif installed python3 && \ - python3 -c 'import importlib.util; exit(not importlib.util.find_spec("elevenlabs"))' && \ - installed ffplay; then - # It's possible to use the API for free with limited number of characters. - # To increase this limit register to https://beta.elevenlabs.io to get an api key - # and paste it after 'ELEVEN_API_KEY=' - # Keep the line commented to use the free version without api key - #export ELEVEN_API_KEY=your_api_key - wd=$(dirname $0) - script=$wd/eleven-labs.py - python3 $script -q -p -v $1 $2 >/dev/null 2>&1 - - # Uncomment to keep the audio file - #python3 $script -q -s ./audio.mp3 -v $1 $2 >/dev/null 2>&1 - #ffplay -autoexit -nodisp -loglevel quiet -hide_banner -i ./audio.mp3 >/dev/null 2>&1 - -else - echo 'Install espeak ("brew install espeak" or "apt-get install espeak"),' - echo 'piper ("pip install piper-tts" or https://github.com/rhasspy/piper) with aplay,' - echo 'or elevenlabs ("pip install elevenlabs") with ffplay.' - echo '(export ELEVEN_API_KEY if you have an api key from https://beta.elevenlabs.io)' -fi diff --git a/examples/talk/speak.bat b/examples/talk/speak.bat deleted file mode 100644 index d719d690..00000000 --- a/examples/talk/speak.bat +++ /dev/null @@ -1 +0,0 @@ -@powershell -ExecutionPolicy Bypass -F examples\talk\speak.ps1 %1 %2 diff --git a/examples/talk/speak.ps1 b/examples/talk/speak.ps1 deleted file mode 100644 index 51139586..00000000 --- a/examples/talk/speak.ps1 +++ /dev/null @@ -1,14 +0,0 @@ -# Set-ExecutionPolicy -ExecutionPolicy Bypass -Scope CurrentUser -param( - [Parameter(Mandatory=$true)][int]$voicenum, - [Parameter(Mandatory=$true)][string]$textfile -) - -Add-Type -AssemblyName System.Speech; -$speak = New-Object System.Speech.Synthesis.SpeechSynthesizer; -$voiceoptions = $speak.GetInstalledVoices("en-US"); -$voice = $voiceoptions[$voicenum % $voiceoptions.count]; -$speak.SelectVoice($voice.VoiceInfo.Name); -$speak.Rate="0"; -$text = Get-Content -Path $textfile; -$speak.Speak($text); diff --git a/examples/talk/talk.cpp b/examples/talk/talk.cpp deleted file mode 100644 index 428f38b7..00000000 --- a/examples/talk/talk.cpp +++ /dev/null @@ -1,376 +0,0 @@ -// Talk with AI -// - -#include "common-sdl.h" -#include "common.h" -#include "whisper.h" -#include "gpt-2.h" - -#include -#include -#include -#include -#include -#include -#include -#include - -// command-line parameters -struct whisper_params { - int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); - int32_t voice_ms = 10000; - int32_t capture_id = -1; - int32_t max_tokens = 32; - int32_t audio_ctx = 0; - - float vad_thold = 0.6f; - float freq_thold = 100.0f; - - bool translate = false; - bool print_special = false; - bool print_energy = false; - bool no_timestamps = true; - bool use_gpu = true; - bool flash_attn = false; - - std::string person = "Santa"; - std::string language = "en"; - std::string model_wsp = "models/ggml-base.en.bin"; - std::string model_gpt = "models/ggml-gpt-2-117M.bin"; - std::string speak = "./examples/talk/speak"; - std::string speak_file= "./examples/talk/to_speak.txt"; - std::string fname_out; -}; - -void whisper_print_usage(int argc, char ** argv, const whisper_params & params); - -static bool whisper_params_parse(int argc, char ** argv, whisper_params & params) { - for (int i = 1; i < argc; i++) { - std::string arg = argv[i]; - - if (arg == "-h" || arg == "--help") { - whisper_print_usage(argc, argv, params); - exit(0); - } - else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(argv[++i]); } - else if (arg == "-vms" || arg == "--voice-ms") { params.voice_ms = std::stoi(argv[++i]); } - else if (arg == "-c" || arg == "--capture") { params.capture_id = std::stoi(argv[++i]); } - else if (arg == "-mt" || arg == "--max-tokens") { params.max_tokens = std::stoi(argv[++i]); } - else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); } - else if (arg == "-vth" || arg == "--vad-thold") { params.vad_thold = std::stof(argv[++i]); } - else if (arg == "-fth" || arg == "--freq-thold") { params.freq_thold = std::stof(argv[++i]); } - else if (arg == "-tr" || arg == "--translate") { params.translate = true; } - else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; } - else if (arg == "-pe" || arg == "--print-energy") { params.print_energy = true; } - else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; } - else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; } - else if (arg == "-p" || arg == "--person") { params.person = argv[++i]; } - else if (arg == "-l" || arg == "--language") { params.language = argv[++i]; } - else if (arg == "-mw" || arg == "--model-whisper") { params.model_wsp = argv[++i]; } - else if (arg == "-mg" || arg == "--model-gpt") { params.model_gpt = argv[++i]; } - else if (arg == "-s" || arg == "--speak") { params.speak = argv[++i]; } - else if (arg == "-sf" || arg == "--speak_file") { params.speak_file = argv[++i]; } - else if (arg == "-f" || arg == "--file") { params.fname_out = argv[++i]; } - else { - fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); - whisper_print_usage(argc, argv, params); - exit(0); - } - } - - return true; -} - -void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & params) { - fprintf(stderr, "\n"); - fprintf(stderr, "usage: %s [options]\n", argv[0]); - fprintf(stderr, "\n"); - fprintf(stderr, "options:\n"); - fprintf(stderr, " -h, --help [default] show this help message and exit\n"); - fprintf(stderr, " -t N, --threads N [%-7d] number of threads to use during computation\n", params.n_threads); - fprintf(stderr, " -vms N, --voice-ms N [%-7d] voice duration in milliseconds\n", params.voice_ms); - fprintf(stderr, " -c ID, --capture ID [%-7d] capture device ID\n", params.capture_id); - fprintf(stderr, " -mt N, --max-tokens N [%-7d] maximum number of tokens per audio chunk\n", params.max_tokens); - fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx); - fprintf(stderr, " -vth N, --vad-thold N [%-7.2f] voice activity detection threshold\n", params.vad_thold); - fprintf(stderr, " -fth N, --freq-thold N [%-7.2f] high-pass frequency cutoff\n", params.freq_thold); - fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false"); - fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false"); - fprintf(stderr, " -pe, --print-energy [%-7s] print sound energy (for debugging)\n", params.print_energy ? "true" : "false"); - fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU\n", params.use_gpu ? "false" : "true"); - fprintf(stderr, " -fa, --flash-attn [%-7s] flash attention\n", params.flash_attn ? "true" : "false"); - fprintf(stderr, " -p NAME, --person NAME [%-7s] person name (for prompt selection)\n", params.person.c_str()); - fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language\n", params.language.c_str()); - fprintf(stderr, " -mw FILE, --model-whisper [%-7s] whisper model file\n", params.model_wsp.c_str()); - fprintf(stderr, " -mg FILE, --model-gpt [%-7s] gpt model file\n", params.model_gpt.c_str()); - fprintf(stderr, " -s FILE, --speak TEXT [%-7s] command for TTS\n", params.speak.c_str()); - fprintf(stderr, " -sf FILE, --speak_file [%-7s] file to pass to TTS\n", params.speak_file.c_str()); - fprintf(stderr, " -f FNAME, --file FNAME [%-7s] text output file name\n", params.fname_out.c_str()); - fprintf(stderr, "\n"); -} - -static std::string transcribe(whisper_context * ctx, const whisper_params & params, const std::vector & pcmf32, float & prob, int64_t & t_ms) { - const auto t_start = std::chrono::high_resolution_clock::now(); - - prob = 0.0f; - t_ms = 0; - - whisper_full_params wparams = whisper_full_default_params(WHISPER_SAMPLING_GREEDY); - - wparams.print_progress = false; - wparams.print_special = params.print_special; - wparams.print_realtime = false; - wparams.print_timestamps = !params.no_timestamps; - wparams.translate = params.translate; - wparams.no_context = true; - wparams.single_segment = true; - wparams.max_tokens = params.max_tokens; - wparams.language = params.language.c_str(); - wparams.n_threads = params.n_threads; - - wparams.audio_ctx = params.audio_ctx; - - if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) { - return ""; - } - - int prob_n = 0; - std::string result; - - const int n_segments = whisper_full_n_segments(ctx); - for (int i = 0; i < n_segments; ++i) { - const char * text = whisper_full_get_segment_text(ctx, i); - - result += text; - - const int n_tokens = whisper_full_n_tokens(ctx, i); - for (int j = 0; j < n_tokens; ++j) { - const auto token = whisper_full_get_token_data(ctx, i, j); - - prob += token.p; - ++prob_n; - } - } - - if (prob_n > 0) { - prob /= prob_n; - } - - const auto t_end = std::chrono::high_resolution_clock::now(); - t_ms = std::chrono::duration_cast(t_end - t_start).count(); - - return result; -} - -const std::string k_prompt = -R"(This is a dialogue between {0} (A) and a person (B). The dialogue so far is: - -B: Hello {0}, how are you? -A: I'm fine, thank you. -{1} -Here is how {0} (A) continues the dialogue: - -A:)"; - -int main(int argc, char ** argv) { - whisper_params params; - - if (whisper_params_parse(argc, argv, params) == false) { - return 1; - } - - if (whisper_lang_id(params.language.c_str()) == -1) { - fprintf(stderr, "error: unknown language '%s'\n", params.language.c_str()); - whisper_print_usage(argc, argv, params); - exit(0); - } - - // whisper init - struct whisper_context_params cparams = whisper_context_default_params(); - - cparams.use_gpu = params.use_gpu; - cparams.flash_attn = params.flash_attn; - - struct whisper_context * ctx_wsp = whisper_init_from_file_with_params(params.model_wsp.c_str(), cparams); - - // gpt init - - struct gpt2_context * ctx_gpt = gpt2_init(params.model_gpt.c_str()); - - // print some info about the processing - { - fprintf(stderr, "\n"); - if (!whisper_is_multilingual(ctx_wsp)) { - if (params.language != "en" || params.translate) { - params.language = "en"; - params.translate = false; - fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__); - } - } - fprintf(stderr, "%s: processing, %d threads, lang = %s, task = %s, timestamps = %d ...\n", - __func__, - params.n_threads, - params.language.c_str(), - params.translate ? "translate" : "transcribe", - params.no_timestamps ? 0 : 1); - - fprintf(stderr, "\n"); - } - - - // init audio - - audio_async audio(30*1000); - if (!audio.init(params.capture_id, WHISPER_SAMPLE_RATE)) { - fprintf(stderr, "%s: audio.init() failed!\n", __func__); - return 1; - } - - audio.resume(); - - int n_iter = 0; - - bool is_running = true; - bool force_speak = false; - - float prob0 = 0.0f; - - std::vector pcmf32_cur; - std::vector pcmf32_prompt; - - gpt2_set_prompt(ctx_gpt, ""); - - const int voice_id = rand()%6; - - fprintf(stderr, "gpt-2: prompt:\n"); - fprintf(stderr, "========================\n\n"); - fprintf(stderr, "%s\n", ::replace(k_prompt, "{0}", params.person).c_str()); - fprintf(stderr, "========================\n\n"); - - // main loop - while (is_running) { - // handle Ctrl + C - is_running = sdl_poll_events(); - - if (!is_running) { - break; - } - - // delay - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - - int64_t t_ms = 0; - - { - audio.get(2000, pcmf32_cur); - - if (::vad_simple(pcmf32_cur, WHISPER_SAMPLE_RATE, 1250, params.vad_thold, params.freq_thold, params.print_energy) || force_speak) { - fprintf(stdout, "%s: Speech detected! Processing ...\n", __func__); - - audio.get(params.voice_ms, pcmf32_cur); - - std::string text_heard; - - if (!force_speak) { - text_heard = ::trim(::transcribe(ctx_wsp, params, pcmf32_cur, prob0, t_ms)); - } - - // remove text between brackets using regex - { - std::regex re("\\[.*?\\]"); - text_heard = std::regex_replace(text_heard, re, ""); - } - - // remove text between brackets using regex - { - std::regex re("\\(.*?\\)"); - text_heard = std::regex_replace(text_heard, re, ""); - } - - // remove all characters, except for letters, numbers, punctuation and ':', '\'', '-', ' ' - text_heard = std::regex_replace(text_heard, std::regex("[^a-zA-Z0-9\\.,\\?!\\s\\:\\'\\-]"), ""); - - // take first line - text_heard = text_heard.substr(0, text_heard.find_first_of('\n')); - - // remove leading and trailing whitespace - text_heard = std::regex_replace(text_heard, std::regex("^\\s+"), ""); - text_heard = std::regex_replace(text_heard, std::regex("\\s+$"), ""); - - const std::vector tokens = gpt2_tokenize(ctx_gpt, text_heard.c_str()); - - if (text_heard.empty() || tokens.empty() || force_speak) { - fprintf(stdout, "%s: Heard nothing, skipping ...\n", __func__); - audio.clear(); - - continue; - } - - force_speak = false; - - fprintf(stdout, "%s: Heard '%s%s%s', (t = %d ms)\n", __func__, "\033[1m", text_heard.c_str(), "\033[0m", (int) t_ms); - - std::string prompt_base = gpt2_get_prompt(ctx_gpt); - - std::string text_to_speak; - - { - prompt_base += "B: " + text_heard + "\n"; - - std::string prompt = ::replace(::replace(k_prompt, "{0}", params.person), "{1}", prompt_base); - - text_to_speak = gpt2_gen_text(ctx_gpt, prompt.c_str(), params.max_tokens); - //text_to_speak = std::regex_replace(text_to_speak, std::regex("[^a-zA-Z0-9\\.,\\?!\\s\\:\\'\\-]"), ""); - text_to_speak = text_to_speak.substr(0, text_to_speak.find_first_of('\n')); - - // remove first 2 lines of base prompt - if (n_iter > 4) { - { - const size_t pos = prompt_base.find_first_of('\n'); - if (pos != std::string::npos) { - prompt_base = prompt_base.substr(pos + 1); - } - } - { - const size_t pos = prompt_base.find_first_of('\n'); - if (pos != std::string::npos) { - prompt_base = prompt_base.substr(pos + 1); - } - } - } - - prompt_base += "A:" + text_to_speak + "\n"; - - { - prompt = ::replace(::replace(k_prompt, "{0}", params.person), "{1}", prompt_base); - - printf("===============\n"); - printf("prompt:\n"); - printf("%s\n", prompt.c_str()); - printf("===============\n"); - } - } - - //printf("========================\n"); - //printf("gpt-2: prompt_base:\n%s\n", prompt_base.c_str()); - //printf("========================\n"); - - gpt2_set_prompt(ctx_gpt, prompt_base.c_str()); - - text_to_speak = ::replace(text_to_speak, params.person + ": ", ""); - speak_with_file(params.speak, text_to_speak, params.speak_file, voice_id); - - audio.clear(); - - ++n_iter; - } - } - } - - audio.pause(); - - whisper_print_timings(ctx_wsp); - whisper_free(ctx_wsp); - - return 0; -} diff --git a/examples/twitch.sh b/examples/twitch.sh index 1cd81428..73b9fe35 100755 --- a/examples/twitch.sh +++ b/examples/twitch.sh @@ -29,7 +29,7 @@ help() check_requirements() { - if ! command -v ./main &>/dev/null; then + if ! command -v ./build/bin/whisper-cli &>/dev/null; then echo "whisper.cpp main executable is required (make)" exit 1 fi @@ -100,7 +100,7 @@ do err=$(cat /tmp/whisper-live.err | wc -l) done - ./main -t $threads -m ./models/ggml-$model.bin -f /tmp/whisper-live.wav --no-timestamps -otxt 2> /tmp/whispererr | tail -n 1 + ./build/bin/whisper-cli -t $threads -m ./models/ggml-$model.bin -f /tmp/whisper-live.wav --no-timestamps -otxt 2> /tmp/whispererr | tail -n 1 while [ $SECONDS -lt $((($i+1)*$step)) ]; do sleep 1 diff --git a/examples/yt-wsp.sh b/examples/yt-wsp.sh index 8e5bbcf5..fb9f2a81 100755 --- a/examples/yt-wsp.sh +++ b/examples/yt-wsp.sh @@ -55,7 +55,7 @@ MODEL_PATH="${MODEL_PATH:-${SCRIPT_DIR}/../models/ggml-base.en.bin}" # Where to find the whisper.cpp executable. default to the examples directory # which holds this script in source control ################################################################################ -WHISPER_EXECUTABLE="${WHISPER_EXECUTABLE:-${SCRIPT_DIR}/../main}"; +WHISPER_EXECUTABLE="${WHISPER_EXECUTABLE:-${SCRIPT_DIR}/../build/bin/whisper-cli}"; # Set to desired language to be translated into english WHISPER_LANG="${WHISPER_LANG:-en}"; diff --git a/scripts/bench-all.sh b/scripts/bench-all.sh index c7dc52b1..01d6143c 100755 --- a/scripts/bench-all.sh +++ b/scripts/bench-all.sh @@ -38,13 +38,13 @@ if [ "$encoder_only" -eq 0 ]; then printf "Running memcpy benchmark\n" printf "\n" - ./build/bin/bench -w 1 -t $n_threads 2>&1 + ./build/bin/whisper-bench -w 1 -t $n_threads 2>&1 printf "\n" printf "Running ggml_mul_mat benchmark with $n_threads threads\n" printf "\n" - ./build/bin/bench -w 2 -t $n_threads 2>&1 + ./build/bin/whisper-bench -w 2 -t $n_threads 2>&1 printf "\n" printf "Running benchmark for all models\n" @@ -64,7 +64,7 @@ printf "| %6s | %6s | %16s | %13s | %3s | %3s | %7s | %7s | %7s | %7s | %7s |\n" for model in "${models[@]}"; do # actual run # store stderr output in a variable in order to parse it later - output=$(./build/bin/bench -m ./models/ggml-$model.bin -t $n_threads $fattn 2>&1) + output=$(./build/bin/whisper-bench -m ./models/ggml-$model.bin -t $n_threads $fattn 2>&1) ret=$? # parse the output: diff --git a/scripts/bench-wts.sh b/scripts/bench-wts.sh index 223d71b8..7ddf1dce 100755 --- a/scripts/bench-wts.sh +++ b/scripts/bench-wts.sh @@ -22,7 +22,7 @@ echo "Input file duration: ${DURATION}s" for model in $models; do echo "Running $model" - COMMAND="./main -m models/ggml-$model.bin -owts -f $1 -of $1.$model" + COMMAND="./build/bin/whisper-cli -m models/ggml-$model.bin -owts -f $1 -of $1.$model" if [ ! -z "$2" ]; then COMMAND="$COMMAND -fp $2" diff --git a/scripts/bench.py b/scripts/bench.py index 143f4fba..98d1c2bc 100644 --- a/scripts/bench.py +++ b/scripts/bench.py @@ -148,7 +148,7 @@ for model in filtered_models: for thread in threads: for processor_count in processors: # Construct the command to run - cmd = f"./main -m models/{model} -t {thread} -p {processor_count} -f {sample_file}" + cmd = f"./build/bin/whisper-cli -m models/{model} -t {thread} -p {processor_count} -f {sample_file}" # Run the command and get the output process = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT diff --git a/scripts/quantize-all.sh b/scripts/quantize-all.sh index 767462b8..e4cf61ee 100755 --- a/scripts/quantize-all.sh +++ b/scripts/quantize-all.sh @@ -19,8 +19,8 @@ for i in `ls ./models | grep ^ggml-.*.bin | grep -v "\-q"`; do m="models/$i" if [ -f "$m" ]; then if [ "${m##*.}" == "bin" ]; then - ./quantize "${m}" "${m::${#m}-4}-${qtype1}.bin" ${qtype1}; - ./quantize "${m}" "${m::${#m}-4}-${qtype0}.bin" ${qtype0}; + ./build/bin/whisper-quantize "${m}" "${m::${#m}-4}-${qtype1}.bin" ${qtype1}; + ./build/bin/whisper-quantize "${m}" "${m::${#m}-4}-${qtype0}.bin" ${qtype0}; filedex+=( "${m::${#m}-4}-${qtype1}.bin" "${m::${#m}-4}-${qtype0}.bin" ) fi fi diff --git a/tests/run-tests.sh b/tests/run-tests.sh index bb8aa5e2..ad2b8d3e 100755 --- a/tests/run-tests.sh +++ b/tests/run-tests.sh @@ -39,7 +39,7 @@ if [ $# -eq 0 ]; then fi model=$1 -main="../build/bin/main" +main="../build/bin/whisper-cli" threads="" if [ $# -eq 2 ]; then