mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2024-12-25 07:01:04 +00:00
794b162a46
* whisper : add integer quantization support * examples : add common-ggml + prepare to add "quantize" tool * whisper : quantization tool ready * whisper : fix F32 support * whisper : try to fix shared lib linkage * wasm : update quantized models to Q5 * bench.wasm : remove "medium" button * bench.wasm : fix custom model button * ggml : add Q5_0 and Q5_1 WASM SIMD * wasm : add quantized models to all WASM examples * wasm : bump DB version number to 2 * talk-llama : update example to latest llama.cpp * node : increase test timeout to 10s * readme : add information for model quantization * wasm : add links to other examples
115 lines
3.5 KiB
C++
115 lines
3.5 KiB
C++
#include "whisper.h"
|
|
|
|
#include <emscripten.h>
|
|
#include <emscripten/bind.h>
|
|
|
|
#include <vector>
|
|
#include <thread>
|
|
|
|
std::thread g_worker;
|
|
|
|
std::vector<struct whisper_context *> g_contexts(4, nullptr);
|
|
|
|
static inline int mpow2(int n) {
|
|
int p = 1;
|
|
while (p <= n) p *= 2;
|
|
return p/2;
|
|
}
|
|
|
|
EMSCRIPTEN_BINDINGS(whisper) {
|
|
emscripten::function("init", emscripten::optional_override([](const std::string & path_model) {
|
|
if (g_worker.joinable()) {
|
|
g_worker.join();
|
|
}
|
|
|
|
for (size_t i = 0; i < g_contexts.size(); ++i) {
|
|
if (g_contexts[i] == nullptr) {
|
|
g_contexts[i] = whisper_init_from_file(path_model.c_str());
|
|
if (g_contexts[i] != nullptr) {
|
|
return i + 1;
|
|
} else {
|
|
return (size_t) 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
return (size_t) 0;
|
|
}));
|
|
|
|
emscripten::function("free", emscripten::optional_override([](size_t index) {
|
|
if (g_worker.joinable()) {
|
|
g_worker.join();
|
|
}
|
|
|
|
--index;
|
|
|
|
if (index < g_contexts.size()) {
|
|
whisper_free(g_contexts[index]);
|
|
g_contexts[index] = nullptr;
|
|
}
|
|
}));
|
|
|
|
emscripten::function("full_default", emscripten::optional_override([](size_t index, const emscripten::val & audio, const std::string & lang, int nthreads, bool translate) {
|
|
if (g_worker.joinable()) {
|
|
g_worker.join();
|
|
}
|
|
|
|
--index;
|
|
|
|
if (index >= g_contexts.size()) {
|
|
return -1;
|
|
}
|
|
|
|
if (g_contexts[index] == nullptr) {
|
|
return -2;
|
|
}
|
|
|
|
struct whisper_full_params params = whisper_full_default_params(whisper_sampling_strategy::WHISPER_SAMPLING_GREEDY);
|
|
|
|
params.print_realtime = true;
|
|
params.print_progress = false;
|
|
params.print_timestamps = true;
|
|
params.print_special = false;
|
|
params.translate = translate;
|
|
params.language = whisper_is_multilingual(g_contexts[index]) ? lang.c_str() : "en";
|
|
params.n_threads = std::min(nthreads, std::min(16, mpow2(std::thread::hardware_concurrency())));
|
|
params.offset_ms = 0;
|
|
|
|
std::vector<float> pcmf32;
|
|
const int n = audio["length"].as<int>();
|
|
|
|
emscripten::val heap = emscripten::val::module_property("HEAPU8");
|
|
emscripten::val memory = heap["buffer"];
|
|
|
|
pcmf32.resize(n);
|
|
|
|
emscripten::val memoryView = audio["constructor"].new_(memory, reinterpret_cast<uintptr_t>(pcmf32.data()), n);
|
|
memoryView.call<void>("set", audio);
|
|
|
|
// print system information
|
|
{
|
|
printf("system_info: n_threads = %d / %d | %s\n",
|
|
params.n_threads, std::thread::hardware_concurrency(), whisper_print_system_info());
|
|
|
|
printf("%s: processing %d samples, %.1f sec, %d threads, %d processors, lang = %s, task = %s ...\n",
|
|
__func__, int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE,
|
|
params.n_threads, 1,
|
|
params.language,
|
|
params.translate ? "translate" : "transcribe");
|
|
|
|
printf("\n");
|
|
}
|
|
|
|
// run the worker
|
|
{
|
|
g_worker = std::thread([index, params, pcmf32 = std::move(pcmf32)]() {
|
|
whisper_reset_timings(g_contexts[index]);
|
|
whisper_full(g_contexts[index], params, pcmf32.data(), pcmf32.size());
|
|
whisper_print_timings(g_contexts[index]);
|
|
});
|
|
}
|
|
|
|
return 0;
|
|
}));
|
|
}
|