wip : polishing WASM example

This commit is contained in:
Georgi Gerganov 2022-10-22 16:57:54 +03:00
parent db460b78ff
commit 491ecd7056
4 changed files with 398 additions and 86 deletions

View File

@ -21,15 +21,13 @@ if (WHISPER_WASM_SINGLE_FILE)
)
endif()
#-s TOTAL_MEMORY=536870912 \
set_target_properties(${TARGET} PROPERTIES LINK_FLAGS " \
--bind \
-s MODULARIZE=1 \
-s ASSERTIONS=1 \
-s USE_PTHREADS=1 \
-s PTHREAD_POOL_SIZE=9 \
-s ALLOW_MEMORY_GROWTH=1 \
-s PTHREAD_POOL_SIZE=8 \
-s INITIAL_MEMORY=1610612736 \
-s TOTAL_MEMORY=1610612736 \
-s FORCE_FILESYSTEM=1 \
-s EXPORT_NAME=\"'whisper_factory'\" \
-s EXPORTED_RUNTIME_METHODS=\"['print', 'printErr', 'ccall', 'cwrap']\" \
${EXTRA_FLAGS} \
")

View File

@ -13,7 +13,11 @@ EMSCRIPTEN_BINDINGS(whisper) {
for (size_t i = 0; i < g_contexts.size(); ++i) {
if (g_contexts[i] == nullptr) {
g_contexts[i] = whisper_init(path_model.c_str());
return i + 1;
if (g_contexts[i] != nullptr) {
return i + 1;
} else {
return (size_t) 0;
}
}
}
@ -29,7 +33,7 @@ EMSCRIPTEN_BINDINGS(whisper) {
}
}));
emscripten::function("full_default", emscripten::optional_override([](size_t index, const emscripten::val & audio) {
emscripten::function("full_default", emscripten::optional_override([](size_t index, const emscripten::val & audio, const std::string & lang, bool translate) {
--index;
if (index >= g_contexts.size()) {
@ -42,15 +46,20 @@ EMSCRIPTEN_BINDINGS(whisper) {
struct whisper_full_params params = whisper_full_default_params(whisper_sampling_strategy::WHISPER_SAMPLING_GREEDY);
printf("full_default: available threads %d\n", std::thread::hardware_concurrency());
params.print_realtime = true;
params.print_progress = false;
params.print_timestamps = true;
params.print_special_tokens = false;
params.translate = false;
params.language = "en";
params.translate = translate;
params.language = whisper_is_multilingual(g_contexts[index]) ? lang.c_str() : "en";
params.n_threads = std::min(8, (int) std::thread::hardware_concurrency());
params.offset_ms = 0;
printf("full_default: using %d threads\n", params.n_threads);
printf("full_default: language '%s'\n", params.language);
std::vector<float> pcmf32;
const int n = audio["length"].as<int>();

File diff suppressed because one or more lines are too long

View File

@ -2,22 +2,170 @@
<html lang="en-us">
<head>
<title>whisper.cpp : WASM example</title>
<style>
#output {
width: 100%;
height: 100%;
margin: 0 auto;
margin-top: 10px;
border-left: 0px;
border-right: 0px;
padding-left: 0px;
padding-right: 0px;
display: block;
background-color: black;
color: white;
font-size: 10px;
font-family: 'Lucida Console', Monaco, monospace;
outline: none;
white-space: pre;
overflow-wrap: normal;
overflow-x: scroll;
}
</style>
</head>
<body>
<div id="main-container">
Minimal <b>whisper.cpp</b> example using Javascript bindings
<b>Minimal <a href="https://github.com/ggerganov/whisper.cpp">whisper.cpp</a> example running fully in the browser</b>
<br><br>
Model:
<input type="file" id="file" name="file" onchange="loadFile(event, 'ggml.bin')" />
Usage instructions:<br>
<ul>
<li>Load a ggml model file (you can obtain one from <a href="https://ggml.ggerganov.com/">here</a>, recommended: <b>tiny</b> or <b>base</b>)</li>
<li>Select audio file to transcribe or record audio from the microphone (sample: <a href="https://whisper.ggerganov.com/jfk.wav">jfk.wav</a>)</li>
<li>Click on the "Transcribe" button to start the transcription</li>
</ul>
Note that the computation is quite heavy and may take a few seconds to complete.<br>
The transcription results will be displayed in the text area below.<br><br>
<b>Important: your browser must support WASM SIMD instructions for this to work.</b>
<br><br><hr>
<div id="model">
Model:
<input type="file" id="file" name="file" onchange="loadFile(event, 'ggml.bin')" />
</div>
<br>
<!-- radio button to select between file upload or microphone -->
<div id="input">
Input:
<input type="radio" id="file" name="input" value="file" checked="checked" onchange="changeInput('file')" /> File
<input type="radio" id="mic" name="input" value="mic" onchange="changeInput('mic')" /> Microphone
</div>
<br>
<div id="input_file">
Audio file:
<input type="file" id="file" name="file" onchange="loadAudio(event)" />
</div>
<div id="input_mic" style="display: none;">
Microphone:
<button id="start" onclick="startRecording()">Start</button>
<button id="stop" onclick="stopRecording()" disabled>Stop</button>
<!-- progress bar to show recording progress -->
<br><br>
<div id="progress" style="display: none;">
<div id="progress-bar" style="width: 0%; height: 10px; background-color: #4CAF50;"></div>
<div id="progress-text">0%</div>
</div>
</div>
<audio controls="controls" id="audio" loop hidden>
Your browser does not support the &lt;audio&gt; tag.
<source id="source" src="" type="audio/wav" />
</audio>
<hr><br>
<table>
<tr>
<td>
Language:
<select id="language" name="language">
<option value="en">English</option>
<option value="ar">Arabic</option>
<option value="hy">Armenian</option>
<option value="az">Azerbaijani</option>
<option value="eu">Basque</option>
<option value="be">Belarusian</option>
<option value="bn">Bengali</option>
<option value="bg">Bulgarian</option>
<option value="ca">Catalan</option>
<option value="zh">Chinese</option>
<option value="hr">Croatian</option>
<option value="cs">Czech</option>
<option value="da">Danish</option>
<option value="nl">Dutch</option>
<option value="en">English</option>
<option value="et">Estonian</option>
<option value="tl">Filipino</option>
<option value="fi">Finnish</option>
<option value="fr">French</option>
<option value="gl">Galician</option>
<option value="ka">Georgian</option>
<option value="de">German</option>
<option value="el">Greek</option>
<option value="gu">Gujarati</option>
<option value="iw">Hebrew</option>
<option value="hi">Hindi</option>
<option value="hu">Hungarian</option>
<option value="is">Icelandic</option>
<option value="id">Indonesian</option>
<option value="ga">Irish</option>
<option value="it">Italian</option>
<option value="ja">Japanese</option>
<option value="kn">Kannada</option>
<option value="ko">Korean</option>
<option value="la">Latin</option>
<option value="lv">Latvian</option>
<option value="lt">Lithuanian</option>
<option value="mk">Macedonian</option>
<option value="ms">Malay</option>
<option value="mt">Maltese</option>
<option value="no">Norwegian</option>
<option value="fa">Persian</option>
<option value="pl">Polish</option>
<option value="pt">Portuguese</option>
<option value="ro">Romanian</option>
<option value="ru">Russian</option>
<option value="sr">Serbian</option>
<option value="sk">Slovak</option>
<option value="sl">Slovenian</option>
<option value="es">Spanish</option>
<option value="sw">Swahili</option>
<option value="sv">Swedish</option>
<option value="ta">Tamil</option>
<option value="te">Telugu</option>
<option value="th">Thai</option>
<option value="tr">Turkish</option>
<option value="uk">Ukrainian</option>
<option value="ur">Urdu</option>
<option value="vi">Vietnamese</option>
<option value="cy">Welsh</option>
<option value="yi">Yiddish</option>
</select>
</td>
<td>
<button onclick="onProcess(false);">Transcribe</button>
</td>
<td>
<button onclick="onProcess(true);">Translate</button>
</td>
</tr>
</table>
<br><br>
WAV:
<input type="file" id="file" name="file" onchange="loadAudio(event)" />
<br><br>
<button onclick="onTranscribe();">Transcribe</button>
<!-- textarea with height filling the rest of the page -->
<textarea id="output" rows="20"></textarea>
<br><br>
@ -32,8 +180,60 @@
</div>
</div>
<script type="text/javascript" src="whisper.js"></script>
<script type='text/javascript'>
// TODO: convert audio buffer to WAV
function setAudio(audio) {
//if (audio) {
// // convert to 16-bit PCM
// var blob = new Blob([audio], { type: 'audio/wav' });
// var url = URL.createObjectURL(blob);
// document.getElementById('source').src = url;
// document.getElementById('audio').hidden = false;
// document.getElementById('audio').loop = false;
// document.getElementById('audio').load();
//} else {
// document.getElementById('audio').hidden = true;
//}
}
function changeInput(input) {
if (input == 'file') {
document.getElementById('input_file').style.display = 'block';
document.getElementById('input_mic').style.display = 'none';
document.getElementById('progress').style.display = 'none';
} else {
document.getElementById('input_file').style.display = 'none';
document.getElementById('input_mic').style.display = 'block';
document.getElementById('progress').style.display = 'block';
}
}
var printTextarea = (function() {
var element = document.getElementById('output');
if (element) element.alue = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
console.log(text);
if (element) {
element.value += text + "\n";
element.scrollTop = element.scrollHeight; // focus on bottom
}
};
})();
var Module = {
print: printTextarea,
printErr: printTextarea,
setStatus: function(text) {
printTextarea('js: ' + text);
},
monitorRunDependencies: function(left) {
}
};
const kMaxAudio_s = 120;
const kSampleRate = 16000;
window.AudioContext = window.AudioContext || window.webkitAudioContext;
window.OfflineAudioContext = window.OfflineAudioContext || window.webkitOfflineAudioContext;
@ -43,15 +243,9 @@
// audio data
var audio = null;
// the whisper module instance
var whisper = null;
// the whisper instance
var instance = null;
// instantiate the whisper instance
// whisper_factory comes from the whisper.js module
whisper_factory().then(function(obj) {
whisper = obj;
});
var model_fname = '';
// helper function
function convertTypedArray(src, type) {
@ -60,47 +254,29 @@
return new type(buffer);
}
// initialize whisper
function init() {
if (!instance) {
instance = whisper.init('ggml.bin');
if (instance) {
console.log('whisper instance initialized');
}
}
if (!instance) {
console.log('whisper instance initialization failed');
return;
}
if (instance) {
var ret = whisper.full_default(instance, audio);
if (ret) {
console.log('whisper full_default returned: ' + ret);
}
}
}
function loadFile(event, fname) {
var file = event.target.files[0] || null;
if (file == null) {
return;
}
console.log(
"<p>File information: <strong>" + file.name +
"</strong> type: <strong>" + file.type +
"</strong> size: <strong>" + file.size +
"</strong> bytes</p>"
);
printTextarea("js: loading model: " + file.name + ", size: " + file.size + " bytes");
printTextarea('js: please wait ...');
var reader = new FileReader();
reader.onload = function(event) {
var buf = new Uint8Array(reader.result);
// write to WASM file using whisper.FS_createDataFile
whisper.FS_createDataFile("/", fname, buf, true, true);
// if the file exists, delete it
try {
Module.FS_unlink(fname);
} catch (e) {
}
Module.FS_createDataFile("/", fname, buf, true, true);
model_fname = file.name;
printTextarea('js: loaded model: ' + model_fname + ' size: ' + buf.length);
}
reader.readAsArrayBuffer(file);
}
@ -115,12 +291,8 @@
return;
}
console.log(
"<p>Audio information: <strong>" + file.name +
"</strong> type: <strong>" + file.type +
"</strong> size: <strong>" + file.size +
"</strong> bytes</p>"
);
printTextarea('js: loading audio: ' + file.name + ', size: ' + file.size + ' bytes');
printTextarea('js: please wait ...');
var reader = new FileReader();
reader.onload = function(event) {
@ -135,19 +307,172 @@
offlineContext.startRendering().then(function(renderedBuffer) {
audio = renderedBuffer.getChannelData(0);
//var audio16 = convertTypedArray(data, Int16Array);
printTextarea('js: audio loaded, size: ' + audio.length);
// truncate to first 30 seconds
if (audio.length > kMaxAudio_s*kSampleRate) {
audio = audio.slice(0, kMaxAudio_s*kSampleRate);
printTextarea('js: truncated audio to first ' + kMaxAudio_s + ' seconds');
}
setAudio(audio);
});
}, function(e) {
printTextarea('js: error decoding audio: ' + e);
audio = null;
setAudio(audio);
});
}
reader.readAsArrayBuffer(file);
}
//
// Microphone
//
var mediaRecorder = null;
var doRecording = false;
var startTime = 0;
function stopRecording() {
doRecording = false;
}
// record up to kMaxAudio_s seconds of audio from the microphone
// check if doRecording is false every 1000 ms and stop recording if so
// update progress information
function startRecording() {
if (!context) {
context = new AudioContext({sampleRate: 16000});
}
document.getElementById('start').disabled = true;
document.getElementById('stop').disabled = false;
document.getElementById('progress-bar').style.width = '0%';
document.getElementById('progress-text').innerHTML = '0%';
doRecording = true;
startTime = Date.now();
var chunks = [];
var stream = null;
navigator.mediaDevices.getUserMedia({audio: true, video: false})
.then(function(s) {
stream = s;
mediaRecorder = new MediaRecorder(stream);
mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data);
};
mediaRecorder.onstop = function(e) {
var blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
chunks = [];
document.getElementById('start').disabled = false;
document.getElementById('stop').disabled = true;
var reader = new FileReader();
reader.onload = function(event) {
var buf = new Uint8Array(reader.result);
context.decodeAudioData(buf.buffer, function(audioBuffer) {
var offlineContext = new OfflineAudioContext(audioBuffer.numberOfChannels, audioBuffer.length, audioBuffer.sampleRate);
var source = offlineContext.createBufferSource();
source.buffer = audioBuffer;
source.connect(offlineContext.destination);
source.start(0);
offlineContext.startRendering().then(function(renderedBuffer) {
audio = renderedBuffer.getChannelData(0);
printTextarea('js: audio recorded, size: ' + audio.length);
// truncate to first 30 seconds
if (audio.length > kMaxAudio_s*kSampleRate) {
audio = audio.slice(0, kMaxAudio_s*kSampleRate);
printTextarea('js: truncated audio to first ' + kMaxAudio_s + ' seconds');
}
setAudio(audio);
});
}, function(e) {
printTextarea('js: error decoding audio: ' + e);
audio = null;
setAudio(audio);
});
}
reader.readAsArrayBuffer(blob);
};
mediaRecorder.start();
})
.catch(function(err) {
printTextarea('js: error getting audio stream: ' + err);
});
var interval = setInterval(function() {
if (!doRecording) {
clearInterval(interval);
mediaRecorder.stop();
stream.getTracks().forEach(function(track) {
track.stop();
});
}
document.getElementById('progress-bar').style.width = (100*(Date.now() - startTime)/1000/kMaxAudio_s) + '%';
document.getElementById('progress-text').innerHTML = (100*(Date.now() - startTime)/1000/kMaxAudio_s).toFixed(0) + '%';
}, 1000);
printTextarea('js: recording ...');
setTimeout(function() {
if (doRecording) {
printTextarea('js: recording stopped after ' + kMaxAudio_s + ' seconds');
stopRecording();
}
}, kMaxAudio_s*1000);
}
//
// Transcribe
//
function onTranscribe() {
init();
function onProcess(translate) {
if (!instance) {
instance = Module.init('ggml.bin');
if (instance) {
printTextarea("js: whisper initialized, instance: " + instance);
document.getElementById('model').innerHTML = 'Model loaded: ' + model_fname;
}
}
if (!instance) {
printTextarea("js: failed to initialize whisper");
return;
}
if (!audio) {
printTextarea("js: no audio data");
return;
}
if (instance) {
printTextarea('');
printTextarea('js: processing - this might take a while ...');
printTextarea('js: the page will be unresponsive until the processing is completed');
printTextarea('');
printTextarea('');
setTimeout(function() {
var ret = Module.full_default(instance, audio, document.getElementById('language').value, translate);
console.log('js: full_default returned: ' + ret);
if (ret) {
printTextarea("js: whisper returned: " + ret);
}
}, 100);
}
}
</script>
<script type="text/javascript" src="whisper.js"></script>
</body>
</html>