diff --git a/examples/whisper.wasm/README.md b/examples/whisper.wasm/README.md index e7429f35..f228a8b6 100644 --- a/examples/whisper.wasm/README.md +++ b/examples/whisper.wasm/README.md @@ -37,6 +37,6 @@ emcmake cmake .. make -j # copy the produced page to your HTTP path -cp bin/whisper.wasm/* /path/to/html/ +cp bin/whisper.wasm/* /path/to/html/ cp bin/libmain.worker.js /path/to/html/ ``` diff --git a/whisper.cpp b/whisper.cpp index b08bf0fa..cca949fa 100644 --- a/whisper.cpp +++ b/whisper.cpp @@ -284,11 +284,11 @@ static const std::map> MEM_REQ_MODEL = { }, { GGML_TYPE_Q4_1, { - { MODEL_TINY, 31ull*MB }, - { MODEL_BASE, 57ull*MB }, - { MODEL_SMALL, 181ull*MB }, - { MODEL_MEDIUM, 559ull*MB }, - { MODEL_LARGE, 1122ull*MB }, + { MODEL_TINY, 32ull*MB }, + { MODEL_BASE, 58ull*MB }, + { MODEL_SMALL, 182ull*MB }, + { MODEL_MEDIUM, 562ull*MB }, + { MODEL_LARGE, 1124ull*MB }, }, }, { GGML_TYPE_Q4_2, @@ -300,22 +300,31 @@ static const std::map> MEM_REQ_MODEL = { { MODEL_LARGE, 940ull*MB }, }, }, - { GGML_TYPE_Q5_0, // TODO: fix + { GGML_TYPE_Q5_0, { - { MODEL_TINY, 31ull*MB }, - { MODEL_BASE, 57ull*MB }, - { MODEL_SMALL, 181ull*MB }, - { MODEL_MEDIUM, 559ull*MB }, - { MODEL_LARGE, 1122ull*MB }, + { MODEL_TINY, 30ull*MB }, + { MODEL_BASE, 54ull*MB }, + { MODEL_SMALL, 170ull*MB }, + { MODEL_MEDIUM, 516ull*MB }, + { MODEL_LARGE, 1034ull*MB }, }, }, { GGML_TYPE_Q5_1, { - { MODEL_TINY, 31ull*MB }, - { MODEL_BASE, 57ull*MB }, - { MODEL_SMALL, 181ull*MB }, - { MODEL_MEDIUM, 559ull*MB }, - { MODEL_LARGE, 1122ull*MB }, + { MODEL_TINY, 32ull*MB }, + { MODEL_BASE, 58ull*MB }, + { MODEL_SMALL, 182ull*MB }, + { MODEL_MEDIUM, 562ull*MB }, + { MODEL_LARGE, 1124ull*MB }, + }, + }, + { GGML_TYPE_Q8_0, + { + { MODEL_TINY, 45ull*MB }, + { MODEL_BASE, 84ull*MB }, + { MODEL_SMALL, 268ull*MB }, + { MODEL_MEDIUM, 834ull*MB }, + { MODEL_LARGE, 1674ull*MB }, }, }, };