mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-06-25 01:19:10 +00:00
Compare commits
20 Commits
Author | SHA1 | Date | |
---|---|---|---|
0a2621b637 | |||
1b7a7df793 | |||
4af1689ee5 | |||
b10d75199e | |||
ea3344eb8f | |||
83c742f1a7 | |||
41b48ab7f1 | |||
a728be9cdb | |||
46a68fb9b5 | |||
ccd56a9c5b | |||
3500ce8727 | |||
7519eabf65 | |||
b21213c23e | |||
9e700e1821 | |||
0bfe728b84 | |||
4e5674a5d5 | |||
4c66b6a828 | |||
c30bffc8a5 | |||
8fdfb0ba92 | |||
c71363f14c |
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
[submodule "bindings/ios"]
|
||||
path = bindings/ios
|
||||
url = https://github.com/ggerganov/whisper.spm
|
@ -9,6 +9,11 @@ if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
|
||||
set(WHISPER_STANDALONE ON)
|
||||
include(cmake/GitVars.cmake)
|
||||
include(cmake/BuildTypes.cmake)
|
||||
|
||||
# configure project version
|
||||
if (EXISTS "${CMAKE_SOURCE_DIR}/bindings/ios/Makefile-tmpl")
|
||||
configure_file(${CMAKE_SOURCE_DIR}/bindings/ios/Makefile-tmpl ${CMAKE_SOURCE_DIR}/bindings/ios/Makefile @ONLY)
|
||||
endif()
|
||||
else()
|
||||
set(WHISPER_STANDALONE OFF)
|
||||
endif()
|
||||
@ -43,6 +48,8 @@ option(WHISPER_SUPPORT_SDL2 "whisper: support for libSDL2" OFF)
|
||||
|
||||
if (APPLE)
|
||||
option(WHISPER_NO_ACCELERATE "whisper: disable Accelerate framework" OFF)
|
||||
option(WHISPER_NO_AVX "whisper: disable AVX" OFF)
|
||||
option(WHISPER_NO_AVX2 "whisper: disable AVX2" OFF)
|
||||
else()
|
||||
option(WHISPER_SUPPORT_OPENBLAS "whisper: support for OpenBLAS" OFF)
|
||||
endif()
|
||||
@ -138,15 +145,21 @@ if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES
|
||||
else()
|
||||
message(STATUS "x86 detected")
|
||||
if (MSVC)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /arch:AVX2")
|
||||
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /arch:AVX2")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX2")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /arch:AVX2")
|
||||
else()
|
||||
if (EMSCRIPTEN)
|
||||
# we require support for WASM SIMD 128-bit
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pthread -msimd128")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
|
||||
else()
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx -mavx2 -mfma -mf16c")
|
||||
if(NOT WHISPER_NO_AVX)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx")
|
||||
endif()
|
||||
if(NOT WHISPER_NO_AVX2)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx2")
|
||||
endif()
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma -mf16c")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
37
Makefile
37
Makefile
@ -50,11 +50,19 @@ endif
|
||||
# TODO: probably these flags need to be tweaked on some architectures
|
||||
# feel free to update the Makefile for your architecture and send a pull request or issue
|
||||
ifeq ($(UNAME_M),x86_64)
|
||||
# AVX 512
|
||||
CFLAGS += -mavx512f -mfma -mf16c
|
||||
|
||||
# AVX 256
|
||||
#CFLAGS += -mavx -mavx2 -mfma -mf16c
|
||||
CFLAGS += -mfma -mf16c
|
||||
ifeq ($(UNAME_S),Darwin)
|
||||
AVX1_M := $(shell sysctl machdep.cpu.features)
|
||||
ifneq (,$(findstring AVX1.0,$(AVX1_M)))
|
||||
CFLAGS += -mavx
|
||||
endif
|
||||
AVX2_M := $(shell sysctl machdep.cpu.leaf7_features)
|
||||
ifneq (,$(findstring AVX2,$(AVX2_M)))
|
||||
CFLAGS += -mavx2
|
||||
endif
|
||||
else
|
||||
CFLAGS += -mavx -mavx2
|
||||
endif
|
||||
endif
|
||||
ifeq ($(UNAME_M),amd64)
|
||||
CFLAGS += -mavx -mavx2 -mfma -mf16c
|
||||
@ -81,13 +89,11 @@ ifneq ($(filter armv8%,$(UNAME_M)),)
|
||||
CFLAGS += -mfp16-format=ieee -mno-unaligned-access
|
||||
endif
|
||||
|
||||
#
|
||||
# Build library + main
|
||||
#
|
||||
default: main
|
||||
|
||||
main: examples/main/main.cpp ggml.o whisper.o
|
||||
$(CXX) $(CXXFLAGS) examples/main/main.cpp whisper.o ggml.o -o main $(LDFLAGS)
|
||||
./main -h
|
||||
#
|
||||
# Build library
|
||||
#
|
||||
|
||||
ggml.o: ggml.c ggml.h
|
||||
$(CC) $(CFLAGS) -c ggml.c -o ggml.o
|
||||
@ -98,8 +104,11 @@ whisper.o: whisper.cpp whisper.h
|
||||
libwhisper.a: ggml.o whisper.o
|
||||
$(AR) rcs libwhisper.a ggml.o whisper.o
|
||||
|
||||
libwhisper.so: ggml.o whisper.o
|
||||
$(CXX) $(CXXFLAGS) -shared -o libwhisper.so ggml.o whisper.o $(LDFLAGS)
|
||||
|
||||
clean:
|
||||
rm -f *.o main stream bench libwhisper.a
|
||||
rm -f *.o main stream bench libwhisper.a libwhisper.so
|
||||
|
||||
#
|
||||
# Examples
|
||||
@ -107,6 +116,10 @@ clean:
|
||||
|
||||
CC_SDL=`sdl2-config --cflags --libs`
|
||||
|
||||
main: examples/main/main.cpp ggml.o whisper.o
|
||||
$(CXX) $(CXXFLAGS) examples/main/main.cpp ggml.o whisper.o -o main $(LDFLAGS)
|
||||
./main -h
|
||||
|
||||
stream: examples/stream/stream.cpp ggml.o whisper.o
|
||||
$(CXX) $(CXXFLAGS) examples/stream/stream.cpp ggml.o whisper.o -o stream $(CC_SDL) $(LDFLAGS)
|
||||
|
||||
|
@ -437,9 +437,12 @@ For more details, see the conversion script [models/convert-pt-to-ggml.py](model
|
||||
## Bindings
|
||||
|
||||
- [X] Rust: [tazz4843/whisper-rs](https://github.com/tazz4843/whisper-rs)
|
||||
- [X] Objective-C / Swift: [ggerganov/whisper.spm](https://github.com/ggerganov/whisper.spm)
|
||||
- [ ] Python:
|
||||
- [ ] Java:
|
||||
|
||||
## Examples
|
||||
|
||||
There are various examples of using the library for different projects in the [examples](examples) folder. Check them out!
|
||||
|
||||
## [Frequently asked questions (#126)](https://github.com/ggerganov/whisper.cpp/discussions/126)
|
||||
|
1
bindings/ios
Submodule
1
bindings/ios
Submodule
Submodule bindings/ios added at 4bda8e9d80
File diff suppressed because one or more lines are too long
49
examples/generate-karaoke.sh
Executable file
49
examples/generate-karaoke.sh
Executable file
@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
|
||||
executable="./main"
|
||||
model="base.en"
|
||||
model_path="models/ggml-$model.bin"
|
||||
|
||||
# require sox and ffmpeg to be installed
|
||||
if ! command -v sox &> /dev/null
|
||||
then
|
||||
echo "sox could not be found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v ffmpeg &> /dev/null
|
||||
then
|
||||
echo "ffmpeg could not be found"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
if [ ! -f "$executable" ]; then
|
||||
echo "'$executable' does not exist. Please build it first."
|
||||
exit 3
|
||||
fi
|
||||
|
||||
if [ ! -f "$model_path" ]; then
|
||||
echo "'$model_path' does not exist. Please download it first."
|
||||
exit 4
|
||||
fi
|
||||
|
||||
# record some raw audio
|
||||
sox -d rec.wav
|
||||
|
||||
# resample to 16kHz
|
||||
ffmpeg -y -i ./rec.wav -ar 16000 -ac 1 -c:a pcm_s16le ./rec16.wav > /dev/null 2>&1
|
||||
|
||||
# run Whisper
|
||||
echo "Processing ..."
|
||||
./main -m models/ggml-base.en.bin rec16.wav -owts > /dev/null 2>&1
|
||||
|
||||
# generate Karaoke video
|
||||
echo "Generating video ..."
|
||||
source rec16.wav.wts > /dev/null 2>&1
|
||||
|
||||
# play the video
|
||||
echo "Playing ./rec16.wav.mp4 ..."
|
||||
ffplay -loglevel 0 -autoexit ./rec16.wav.mp4
|
||||
|
||||
echo "Done"
|
||||
exit 0
|
@ -53,11 +53,13 @@ struct whisper_params {
|
||||
int32_t n_processors = 1;
|
||||
int32_t offset_t_ms = 0;
|
||||
int32_t offset_n = 0;
|
||||
int32_t duration_ms = 0;
|
||||
int32_t max_context = -1;
|
||||
int32_t max_len = 0;
|
||||
|
||||
float word_thold = 0.01f;
|
||||
|
||||
bool speed_up = false;
|
||||
bool verbose = false;
|
||||
bool translate = false;
|
||||
bool output_txt = false;
|
||||
@ -95,12 +97,16 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
||||
params.offset_t_ms = std::stoi(argv[++i]);
|
||||
} else if (arg == "-on" || arg == "--offset-n") {
|
||||
params.offset_n = std::stoi(argv[++i]);
|
||||
} else if (arg == "-d" || arg == "--duration") {
|
||||
params.duration_ms = std::stoi(argv[++i]);
|
||||
} else if (arg == "-mc" || arg == "--max-context") {
|
||||
params.max_context = std::stoi(argv[++i]);
|
||||
} else if (arg == "-ml" || arg == "--max-len") {
|
||||
params.max_len = std::stoi(argv[++i]);
|
||||
} else if (arg == "-wt" || arg == "--word-thold") {
|
||||
params.word_thold = std::stof(argv[++i]);
|
||||
} else if (arg == "-su" || arg == "--speed-up") {
|
||||
params.speed_up = true;
|
||||
} else if (arg == "-v" || arg == "--verbose") {
|
||||
params.verbose = true;
|
||||
} else if (arg == "--translate") {
|
||||
@ -154,9 +160,11 @@ void whisper_print_usage(int argc, char ** argv, const whisper_params & params)
|
||||
fprintf(stderr, " -p N, --processors N number of processors to use during computation (default: %d)\n", params.n_processors);
|
||||
fprintf(stderr, " -ot N, --offset-t N time offset in milliseconds (default: %d)\n", params.offset_t_ms);
|
||||
fprintf(stderr, " -on N, --offset-n N segment index offset (default: %d)\n", params.offset_n);
|
||||
fprintf(stderr, " -d N, --duration N duration of audio to process in milliseconds (default: %d)\n", params.duration_ms);
|
||||
fprintf(stderr, " -mc N, --max-context N maximum number of text context tokens to store (default: max)\n");
|
||||
fprintf(stderr, " -ml N, --max-len N maximum segment length in characters (default: %d)\n", params.max_len);
|
||||
fprintf(stderr, " -wt N, --word-thold N word timestamp probability threshold (default: %f)\n", params.word_thold);
|
||||
fprintf(stderr, " -su, --speed-up speed up audio by factor of 2 (faster processing, reduced accuracy, default: %s)\n", params.speed_up ? "true" : "false");
|
||||
fprintf(stderr, " -v, --verbose verbose output\n");
|
||||
fprintf(stderr, " --translate translate from source language to english\n");
|
||||
fprintf(stderr, " -otxt, --output-txt output result in a text file\n");
|
||||
@ -450,9 +458,30 @@ int main(int argc, char ** argv) {
|
||||
std::vector<float> pcmf32;
|
||||
{
|
||||
drwav wav;
|
||||
if (!drwav_init_file(&wav, fname_inp.c_str(), NULL)) {
|
||||
fprintf(stderr, "%s: failed to open WAV file '%s' - check your input\n", argv[0], fname_inp.c_str());
|
||||
whisper_print_usage(argc, argv, {});
|
||||
|
||||
if (fname_inp == "-") {
|
||||
std::vector<uint8_t> wav_data;
|
||||
{
|
||||
uint8_t buf[1024];
|
||||
while (true)
|
||||
{
|
||||
const size_t n = fread(buf, 1, sizeof(buf), stdin);
|
||||
if (n == 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
wav_data.insert(wav_data.end(), buf, buf + n);
|
||||
}
|
||||
}
|
||||
|
||||
if (drwav_init_memory(&wav, wav_data.data(), wav_data.size(), NULL) == false)
|
||||
{
|
||||
fprintf(stderr, "error: failed to open WAV file from stdin\n");
|
||||
return 4;
|
||||
}
|
||||
}
|
||||
else if (drwav_init_file(&wav, fname_inp.c_str(), NULL) == false) {
|
||||
fprintf(stderr, "error: failed to open '%s' as WAV file\n", fname_inp.c_str());
|
||||
return 4;
|
||||
}
|
||||
|
||||
@ -532,11 +561,14 @@ int main(int argc, char ** argv) {
|
||||
wparams.n_threads = params.n_threads;
|
||||
wparams.n_max_text_ctx = params.max_context >= 0 ? params.max_context : wparams.n_max_text_ctx;
|
||||
wparams.offset_ms = params.offset_t_ms;
|
||||
wparams.duration_ms = params.duration_ms;
|
||||
|
||||
wparams.token_timestamps = params.output_wts || params.max_len > 0;
|
||||
wparams.thold_pt = params.word_thold;
|
||||
wparams.max_len = params.output_wts && params.max_len == 0 ? 60 : params.max_len;
|
||||
|
||||
wparams.speed_up = params.speed_up;
|
||||
|
||||
// this callback is called on each new segment
|
||||
if (!wparams.print_realtime) {
|
||||
wparams.new_segment_callback = whisper_print_segment_callback;
|
||||
|
@ -40,7 +40,10 @@ struct whisper_params {
|
||||
int32_t step_ms = 3000;
|
||||
int32_t length_ms = 10000;
|
||||
int32_t capture_id = -1;
|
||||
int32_t max_tokens = 32;
|
||||
int32_t audio_ctx = 0;
|
||||
|
||||
bool speed_up = false;
|
||||
bool verbose = false;
|
||||
bool translate = false;
|
||||
bool no_context = true;
|
||||
@ -68,6 +71,12 @@ bool whisper_params_parse(int argc, char ** argv, whisper_params & params) {
|
||||
params.length_ms = std::stoi(argv[++i]);
|
||||
} else if (arg == "-c" || arg == "--capture") {
|
||||
params.capture_id = std::stoi(argv[++i]);
|
||||
} else if (arg == "-mt" || arg == "--max_tokens") {
|
||||
params.max_tokens = std::stoi(argv[++i]);
|
||||
} else if (arg == "-ac" || arg == "--audio_ctx") {
|
||||
params.audio_ctx = std::stoi(argv[++i]);
|
||||
} else if (arg == "-su" || arg == "--speed-up") {
|
||||
params.speed_up = true;
|
||||
} else if (arg == "-v" || arg == "--verbose") {
|
||||
params.verbose = true;
|
||||
} else if (arg == "--translate") {
|
||||
@ -113,6 +122,9 @@ void whisper_print_usage(int argc, char ** argv, const whisper_params & params)
|
||||
fprintf(stderr, " --step N audio step size in milliseconds (default: %d)\n", params.step_ms);
|
||||
fprintf(stderr, " --length N audio length in milliseconds (default: %d)\n", params.length_ms);
|
||||
fprintf(stderr, " -c ID, --capture ID capture device ID (default: -1)\n");
|
||||
fprintf(stderr, " -mt N, --max_tokens N maximum number of tokens per audio chunk (default: %d)\n", params.max_tokens);
|
||||
fprintf(stderr, " -ac N, --audio_ctx N audio context size (default: %d, 0 - all)\n", params.audio_ctx);
|
||||
fprintf(stderr, " -su, --speed-up speed up audio by factor of 2 (faster processing, reduced accuracy, default: %s)\n", params.speed_up ? "true" : "false");
|
||||
fprintf(stderr, " -v, --verbose verbose output\n");
|
||||
fprintf(stderr, " --translate translate from source language to english\n");
|
||||
fprintf(stderr, " -kc, --keep-context keep text context from earlier audio (default: false)\n");
|
||||
@ -217,6 +229,7 @@ int main(int argc, char ** argv) {
|
||||
const int n_samples = (params.step_ms/1000.0)*WHISPER_SAMPLE_RATE;
|
||||
const int n_samples_len = (params.length_ms/1000.0)*WHISPER_SAMPLE_RATE;
|
||||
const int n_samples_30s = 30*WHISPER_SAMPLE_RATE;
|
||||
const int n_samples_keep = 0.2*WHISPER_SAMPLE_RATE;
|
||||
|
||||
std::vector<float> pcmf32(n_samples_30s, 0.0f);
|
||||
std::vector<float> pcmf32_old;
|
||||
@ -299,7 +312,7 @@ int main(int argc, char ** argv) {
|
||||
//const int n_samples_take = std::min((int) pcmf32_old.size(), std::max(0, n_samples_30s/30 - n_samples_new));
|
||||
|
||||
// take up to params.length_ms audio from previous iteration
|
||||
const int n_samples_take = std::min((int) pcmf32_old.size(), std::max(0, n_samples_len - n_samples_new));
|
||||
const int n_samples_take = std::min((int) pcmf32_old.size(), std::max(0, n_samples_keep + n_samples_len - n_samples_new));
|
||||
|
||||
//printf("processing: take = %d, new = %d, old = %d\n", n_samples_take, n_samples_new, (int) pcmf32_old.size());
|
||||
|
||||
@ -323,9 +336,14 @@ int main(int argc, char ** argv) {
|
||||
wparams.print_timestamps = !params.no_timestamps;
|
||||
wparams.translate = params.translate;
|
||||
wparams.no_context = params.no_context;
|
||||
wparams.single_segment = true;
|
||||
wparams.max_tokens = params.max_tokens;
|
||||
wparams.language = params.language.c_str();
|
||||
wparams.n_threads = params.n_threads;
|
||||
|
||||
wparams.audio_ctx = params.audio_ctx;
|
||||
wparams.speed_up = params.speed_up;
|
||||
|
||||
if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
||||
fprintf(stderr, "%s: failed to process audio\n", argv[0]);
|
||||
return 6;
|
||||
@ -373,7 +391,8 @@ int main(int argc, char ** argv) {
|
||||
if ((n_iter % n_new_line) == 0) {
|
||||
printf("\n");
|
||||
|
||||
pcmf32_old.clear();
|
||||
// keep part of the audio for next iteration to try to mitigate word boundary issues
|
||||
pcmf32_old = std::vector<float>(pcmf32.end() - n_samples_keep, pcmf32.end());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
250
ggml.c
250
ggml.c
@ -37,8 +37,14 @@ typedef HANDLE pthread_t;
|
||||
|
||||
typedef DWORD thread_ret_t;
|
||||
static int pthread_create(pthread_t* out, void* unused, thread_ret_t(*func)(void*), void* arg) {
|
||||
out = CreateThread(NULL, 0, func, arg, 0, NULL);
|
||||
return out != NULL;
|
||||
HANDLE handle = CreateThread(NULL, 0, func, arg, 0, NULL);
|
||||
if (handle == NULL)
|
||||
{
|
||||
return EAGAIN;
|
||||
}
|
||||
|
||||
*out = handle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pthread_join(pthread_t thread, void* unused) {
|
||||
@ -327,45 +333,6 @@ inline static void ggml_vec_dot_f32(const int n, float * restrict s, const float
|
||||
for (int i = n16; i < n; ++i) {
|
||||
sumf += x[i]*y[i];
|
||||
}
|
||||
#elif defined(__AVX512F__)
|
||||
const int n64 = (n & ~63);
|
||||
|
||||
__m512 sum0 = _mm512_setzero_ps();
|
||||
__m512 sum1 = _mm512_setzero_ps();
|
||||
__m512 sum2 = _mm512_setzero_ps();
|
||||
__m512 sum3 = _mm512_setzero_ps();
|
||||
|
||||
__m512 x0, x1, x2, x3;
|
||||
__m512 y0, y1, y2, y3;
|
||||
|
||||
for (int i = 0; i < n64; i += 64) {
|
||||
x0 = _mm512_loadu_ps(x + i + 0);
|
||||
x1 = _mm512_loadu_ps(x + i + 16);
|
||||
x2 = _mm512_loadu_ps(x + i + 32);
|
||||
x3 = _mm512_loadu_ps(x + i + 48);
|
||||
|
||||
y0 = _mm512_loadu_ps(y + i + 0);
|
||||
y1 = _mm512_loadu_ps(y + i + 16);
|
||||
y2 = _mm512_loadu_ps(y + i + 32);
|
||||
y3 = _mm512_loadu_ps(y + i + 48);
|
||||
|
||||
sum0 = _mm512_fmadd_ps(x0, y0, sum0);
|
||||
sum1 = _mm512_fmadd_ps(x1, y1, sum1);
|
||||
sum2 = _mm512_fmadd_ps(x2, y2, sum2);
|
||||
sum3 = _mm512_fmadd_ps(x3, y3, sum3);
|
||||
}
|
||||
|
||||
sum0 = _mm512_add_ps(sum0, sum1);
|
||||
sum2 = _mm512_add_ps(sum2, sum3);
|
||||
sum0 = _mm512_add_ps(sum0, sum2);
|
||||
|
||||
sumf = sum0[0] + sum0[1] + sum0[2] + sum0[3] + sum0[4] + sum0[5] + sum0[6] + sum0[7] +
|
||||
sum0[8] + sum0[9] + sum0[10] + sum0[11] + sum0[12] + sum0[13] + sum0[14] + sum0[15];
|
||||
|
||||
// leftovers
|
||||
for (int i = n64; i < n; ++i) {
|
||||
sumf += x[i]*y[i];
|
||||
}
|
||||
#elif defined(__AVX2__)
|
||||
// AVX 256-bit
|
||||
const int n32 = (n & ~31);
|
||||
@ -563,47 +530,6 @@ inline static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t
|
||||
for (int i = n32; i < n; ++i) {
|
||||
sumf += ggml_fp16_to_fp32(x[i])*ggml_fp16_to_fp32(y[i]);
|
||||
}
|
||||
#elif defined(__AVX512F__)
|
||||
// AVX 512-bit
|
||||
const int n64 = (n & ~63);
|
||||
|
||||
__m512 sum0 = _mm512_setzero_ps();
|
||||
__m512 sum1 = _mm512_setzero_ps();
|
||||
__m512 sum2 = _mm512_setzero_ps();
|
||||
__m512 sum3 = _mm512_setzero_ps();
|
||||
|
||||
__m512 x0, x1, x2, x3;
|
||||
__m512 y0, y1, y2, y3;
|
||||
|
||||
for (int i = 0; i < n64; i += 64) {
|
||||
x0 = _mm512_cvtph_ps(_mm256_loadu_si256((__m256i*)(x + i + 0 )));
|
||||
x1 = _mm512_cvtph_ps(_mm256_loadu_si256((__m256i*)(x + i + 16)));
|
||||
x2 = _mm512_cvtph_ps(_mm256_loadu_si256((__m256i*)(x + i + 32)));
|
||||
x3 = _mm512_cvtph_ps(_mm256_loadu_si256((__m256i*)(x + i + 48)));
|
||||
|
||||
y0 = _mm512_cvtph_ps(_mm256_loadu_si256((__m256i*)(y + i + 0 )));
|
||||
y1 = _mm512_cvtph_ps(_mm256_loadu_si256((__m256i*)(y + i + 16)));
|
||||
y2 = _mm512_cvtph_ps(_mm256_loadu_si256((__m256i*)(y + i + 32)));
|
||||
y3 = _mm512_cvtph_ps(_mm256_loadu_si256((__m256i*)(y + i + 48)));
|
||||
|
||||
sum0 = _mm512_fmadd_ps(x0, y0, sum0);
|
||||
sum1 = _mm512_fmadd_ps(x1, y1, sum1);
|
||||
sum2 = _mm512_fmadd_ps(x2, y2, sum2);
|
||||
sum3 = _mm512_fmadd_ps(x3, y3, sum3);
|
||||
}
|
||||
|
||||
const __m512 sum01 = _mm512_add_ps(sum0, sum1);
|
||||
const __m512 sum23 = _mm512_add_ps(sum2, sum3);
|
||||
const __m512 sum0123 = _mm512_add_ps(sum01, sum23);
|
||||
|
||||
sumf = sum0123[0] + sum0123[1] + sum0123[2] + sum0123[3] + sum0123[4] + sum0123[5] + sum0123[6] + sum0123[7] +
|
||||
sum0123[8] + sum0123[9] + sum0123[10] + sum0123[11] + sum0123[12] + sum0123[13] + sum0123[14] + sum0123[15];
|
||||
|
||||
// leftovers
|
||||
for (int i = n64; i < n; ++i) {
|
||||
//GGML_ASSERT(false);
|
||||
sumf += ggml_fp16_to_fp32(x[i])*ggml_fp16_to_fp32(y[i]);
|
||||
}
|
||||
#elif defined(__AVX2__)
|
||||
// AVX 256-bit
|
||||
const int n32 = (n & ~31);
|
||||
@ -710,7 +636,7 @@ inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float
|
||||
// NEON 128-bit
|
||||
const int n16 = (n & ~15);
|
||||
|
||||
const float32x4_t v0 = vdupq_n_f32(v);
|
||||
const float32x4_t v4 = vdupq_n_f32(v);
|
||||
|
||||
float32x4_t x0, x1, x2, x3;
|
||||
float32x4_t y0, y1, y2, y3;
|
||||
@ -726,14 +652,14 @@ inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float
|
||||
y2 = vld1q_f32(y + i + 8);
|
||||
y3 = vld1q_f32(y + i + 12);
|
||||
|
||||
y0 = vfmaq_f32(y0, x0, v0);
|
||||
y1 = vfmaq_f32(y1, x1, v0);
|
||||
y2 = vfmaq_f32(y2, x2, v0);
|
||||
y3 = vfmaq_f32(y3, x3, v0);
|
||||
y0 = vfmaq_f32(y0, x0, v4);
|
||||
y1 = vfmaq_f32(y1, x1, v4);
|
||||
y2 = vfmaq_f32(y2, x2, v4);
|
||||
y3 = vfmaq_f32(y3, x3, v4);
|
||||
|
||||
vst1q_f32(y + i + 0, y0);
|
||||
vst1q_f32(y + i + 4, y1);
|
||||
vst1q_f32(y + i + 8, y2);
|
||||
vst1q_f32(y + i + 0, y0);
|
||||
vst1q_f32(y + i + 4, y1);
|
||||
vst1q_f32(y + i + 8, y2);
|
||||
vst1q_f32(y + i + 12, y3);
|
||||
}
|
||||
|
||||
@ -741,46 +667,11 @@ inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float
|
||||
for (int i = n16; i < n; ++i) {
|
||||
y[i] += x[i]*v;
|
||||
}
|
||||
#elif defined(__AVX512F__)
|
||||
// AVX512 512-bit
|
||||
const int n64 = (n & ~63);
|
||||
|
||||
const __m512 v0 = _mm512_set1_ps(v);
|
||||
|
||||
__m512 x0, x1, x2, x3;
|
||||
__m512 y0, y1, y2, y3;
|
||||
|
||||
for (int i = 0; i < n64; i += 64) {
|
||||
x0 = _mm512_loadu_ps(x + i + 0);
|
||||
x1 = _mm512_loadu_ps(x + i + 16);
|
||||
x2 = _mm512_loadu_ps(x + i + 32);
|
||||
x3 = _mm512_loadu_ps(x + i + 48);
|
||||
|
||||
y0 = _mm512_loadu_ps(y + i + 0);
|
||||
y1 = _mm512_loadu_ps(y + i + 16);
|
||||
y2 = _mm512_loadu_ps(y + i + 32);
|
||||
y3 = _mm512_loadu_ps(y + i + 48);
|
||||
|
||||
y0 = _mm512_fmadd_ps(x0, v0, y0);
|
||||
y1 = _mm512_fmadd_ps(x1, v0, y1);
|
||||
y2 = _mm512_fmadd_ps(x2, v0, y2);
|
||||
y3 = _mm512_fmadd_ps(x3, v0, y3);
|
||||
|
||||
_mm512_storeu_ps(y + i + 0, y0);
|
||||
_mm512_storeu_ps(y + i + 16, y1);
|
||||
_mm512_storeu_ps(y + i + 32, y2);
|
||||
_mm512_storeu_ps(y + i + 48, y3);
|
||||
}
|
||||
|
||||
// leftovers
|
||||
for (int i = n64; i < n; ++i) {
|
||||
y[i] += x[i]*v;
|
||||
}
|
||||
#elif defined(__AVX2__)
|
||||
// AVX 256-bit
|
||||
const int n32 = (n & ~31);
|
||||
|
||||
const __m256 v0 = _mm256_set1_ps(v);
|
||||
const __m256 v4 = _mm256_set1_ps(v);
|
||||
|
||||
__m256 x0, x1, x2, x3;
|
||||
__m256 y0, y1, y2, y3;
|
||||
@ -796,13 +687,13 @@ inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float
|
||||
y2 = _mm256_loadu_ps(y + i + 16);
|
||||
y3 = _mm256_loadu_ps(y + i + 24);
|
||||
|
||||
y0 = _mm256_fmadd_ps(x0, v0, y0);
|
||||
y1 = _mm256_fmadd_ps(x1, v0, y1);
|
||||
y2 = _mm256_fmadd_ps(x2, v0, y2);
|
||||
y3 = _mm256_fmadd_ps(x3, v0, y3);
|
||||
y0 = _mm256_fmadd_ps(x0, v4, y0);
|
||||
y1 = _mm256_fmadd_ps(x1, v4, y1);
|
||||
y2 = _mm256_fmadd_ps(x2, v4, y2);
|
||||
y3 = _mm256_fmadd_ps(x3, v4, y3);
|
||||
|
||||
_mm256_storeu_ps(y + i + 0, y0);
|
||||
_mm256_storeu_ps(y + i + 8, y1);
|
||||
_mm256_storeu_ps(y + i + 0, y0);
|
||||
_mm256_storeu_ps(y + i + 8, y1);
|
||||
_mm256_storeu_ps(y + i + 16, y2);
|
||||
_mm256_storeu_ps(y + i + 24, y3);
|
||||
}
|
||||
@ -815,7 +706,7 @@ inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float
|
||||
// WASM SIMD 128-bit
|
||||
const int n16 = (n & ~15);
|
||||
|
||||
const v128_t v0 = wasm_f32x4_splat(v);
|
||||
const v128_t v4 = wasm_f32x4_splat(v);
|
||||
|
||||
v128_t x0, x1, x2, x3;
|
||||
v128_t y0, y1, y2, y3;
|
||||
@ -831,10 +722,10 @@ inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float
|
||||
y2 = wasm_v128_load(y + i + 8);
|
||||
y3 = wasm_v128_load(y + i + 12);
|
||||
|
||||
y0 = wasm_f32x4_add(y0, wasm_f32x4_mul(x0, v0));
|
||||
y1 = wasm_f32x4_add(y1, wasm_f32x4_mul(x1, v0));
|
||||
y2 = wasm_f32x4_add(y2, wasm_f32x4_mul(x2, v0));
|
||||
y3 = wasm_f32x4_add(y3, wasm_f32x4_mul(x3, v0));
|
||||
y0 = wasm_f32x4_add(y0, wasm_f32x4_mul(x0, v4));
|
||||
y1 = wasm_f32x4_add(y1, wasm_f32x4_mul(x1, v4));
|
||||
y2 = wasm_f32x4_add(y2, wasm_f32x4_mul(x2, v4));
|
||||
y3 = wasm_f32x4_add(y3, wasm_f32x4_mul(x3, v4));
|
||||
|
||||
wasm_v128_store(y + i + 0, y0);
|
||||
wasm_v128_store(y + i + 4, y1);
|
||||
@ -860,7 +751,7 @@ inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * restrict y, ggml_
|
||||
const int n32 = (n & ~31);
|
||||
|
||||
#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
|
||||
const float16x8_t v0 = vdupq_n_f16(v);
|
||||
const float16x8_t v8 = vdupq_n_f16(v);
|
||||
|
||||
float16x8_t x0, x1, x2, x3;
|
||||
float16x8_t y0, y1, y2, y3;
|
||||
@ -876,10 +767,10 @@ inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * restrict y, ggml_
|
||||
x2 = vld1q_f16(x + i + 16);
|
||||
x3 = vld1q_f16(x + i + 24);
|
||||
|
||||
y0 = vfmaq_f16(y0, x0, v0);
|
||||
y1 = vfmaq_f16(y1, x1, v0);
|
||||
y2 = vfmaq_f16(y2, x2, v0);
|
||||
y3 = vfmaq_f16(y3, x3, v0);
|
||||
y0 = vfmaq_f16(y0, x0, v8);
|
||||
y1 = vfmaq_f16(y1, x1, v8);
|
||||
y2 = vfmaq_f16(y2, x2, v8);
|
||||
y3 = vfmaq_f16(y3, x3, v8);
|
||||
|
||||
vst1q_f16(y + i + 0 , y0);
|
||||
vst1q_f16(y + i + 8 , y1);
|
||||
@ -887,7 +778,8 @@ inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * restrict y, ggml_
|
||||
vst1q_f16(y + i + 24, y3);
|
||||
}
|
||||
#else
|
||||
const float32x4_t v0 = vdupq_n_f32(v);
|
||||
const float32x4_t v40 = vdupq_n_f32(v);
|
||||
const float32x4_t v41 = vdupq_n_f32(v);
|
||||
|
||||
float32x4_t x0, x1, x2, x3, x4, x5, x6, x7;
|
||||
float32x4_t y0, y1, y2, y3, y4, y5, y6, y7;
|
||||
@ -911,14 +803,14 @@ inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * restrict y, ggml_
|
||||
x6 = vcvt_f32_f16(vld1_f16(x + i + 24));
|
||||
x7 = vcvt_f32_f16(vld1_f16(x + i + 28));
|
||||
|
||||
y0 = vfmaq_f32(y0, x0, v0);
|
||||
y1 = vfmaq_f32(y1, x1, v0);
|
||||
y2 = vfmaq_f32(y2, x2, v0);
|
||||
y3 = vfmaq_f32(y3, x3, v0);
|
||||
y4 = vfmaq_f32(y4, x4, v0);
|
||||
y5 = vfmaq_f32(y5, x5, v0);
|
||||
y6 = vfmaq_f32(y6, x6, v0);
|
||||
y7 = vfmaq_f32(y7, x7, v0);
|
||||
y0 = vfmaq_f32(y0, x0, v40);
|
||||
y1 = vfmaq_f32(y1, x1, v40);
|
||||
y2 = vfmaq_f32(y2, x2, v40);
|
||||
y3 = vfmaq_f32(y3, x3, v40);
|
||||
y4 = vfmaq_f32(y4, x4, v41);
|
||||
y5 = vfmaq_f32(y5, x5, v41);
|
||||
y6 = vfmaq_f32(y6, x6, v41);
|
||||
y7 = vfmaq_f32(y7, x7, v41);
|
||||
|
||||
vst1_f16(y + i + 0 , vcvt_f16_f32(y0));
|
||||
vst1_f16(y + i + 4 , vcvt_f16_f32(y1));
|
||||
@ -936,47 +828,11 @@ inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * restrict y, ggml_
|
||||
GGML_ASSERT(false);
|
||||
y[i] = ggml_fp32_to_fp16(ggml_fp16_to_fp32(y[i]) + ggml_fp16_to_fp32(x[i])*v);
|
||||
}
|
||||
#elif defined(__AVX512F__)
|
||||
// AVX 512-bit
|
||||
const int n64 = (n & ~63);
|
||||
|
||||
const __m512 v0 = _mm512_set1_ps(v);
|
||||
|
||||
__m512 x0, x1, x2, x3;
|
||||
__m512 y0, y1, y2, y3;
|
||||
|
||||
for (int i = 0; i < n64; i += 64) {
|
||||
x0 = _mm512_cvtph_ps(_mm256_loadu_si256((__m256i*)(x + i + 0 )));
|
||||
x1 = _mm512_cvtph_ps(_mm256_loadu_si256((__m256i*)(x + i + 16)));
|
||||
x2 = _mm512_cvtph_ps(_mm256_loadu_si256((__m256i*)(x + i + 32)));
|
||||
x3 = _mm512_cvtph_ps(_mm256_loadu_si256((__m256i*)(x + i + 48)));
|
||||
|
||||
y0 = _mm512_cvtph_ps(_mm256_loadu_si256((__m256i*)(y + i + 0 )));
|
||||
y1 = _mm512_cvtph_ps(_mm256_loadu_si256((__m256i*)(y + i + 16)));
|
||||
y2 = _mm512_cvtph_ps(_mm256_loadu_si256((__m256i*)(y + i + 32)));
|
||||
y3 = _mm512_cvtph_ps(_mm256_loadu_si256((__m256i*)(y + i + 48)));
|
||||
|
||||
y0 = _mm512_fmadd_ps(x0, v0, y0);
|
||||
y1 = _mm512_fmadd_ps(x1, v0, y1);
|
||||
y2 = _mm512_fmadd_ps(x2, v0, y2);
|
||||
y3 = _mm512_fmadd_ps(x3, v0, y3);
|
||||
|
||||
_mm256_storeu_si256((__m256i*)(y + i + 0 ), _mm512_cvtps_ph(y0, 0));
|
||||
_mm256_storeu_si256((__m256i*)(y + i + 16), _mm512_cvtps_ph(y1, 0));
|
||||
_mm256_storeu_si256((__m256i*)(y + i + 32), _mm512_cvtps_ph(y2, 0));
|
||||
_mm256_storeu_si256((__m256i*)(y + i + 48), _mm512_cvtps_ph(y3, 0));
|
||||
}
|
||||
|
||||
// leftovers
|
||||
for (int i = n64; i < n; ++i) {
|
||||
GGML_ASSERT(false);
|
||||
y[i] = ggml_fp32_to_fp16(ggml_fp16_to_fp32(y[i]) + ggml_fp16_to_fp32(x[i])*v);
|
||||
}
|
||||
#elif defined(__AVX2__)
|
||||
// AVX 256-bit
|
||||
const int n32 = (n & ~31);
|
||||
|
||||
const __m256 v0 = _mm256_set1_ps(v);
|
||||
const __m256 v8 = _mm256_set1_ps(v);
|
||||
|
||||
__m256 x0, x1, x2, x3;
|
||||
__m256 y0, y1, y2, y3;
|
||||
@ -992,10 +848,10 @@ inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * restrict y, ggml_
|
||||
x2 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(x + i + 16)));
|
||||
x3 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(x + i + 24)));
|
||||
|
||||
y0 = _mm256_fmadd_ps(x0, v0, y0);
|
||||
y1 = _mm256_fmadd_ps(x1, v0, y1);
|
||||
y2 = _mm256_fmadd_ps(x2, v0, y2);
|
||||
y3 = _mm256_fmadd_ps(x3, v0, y3);
|
||||
y0 = _mm256_fmadd_ps(x0, v8, y0);
|
||||
y1 = _mm256_fmadd_ps(x1, v8, y1);
|
||||
y2 = _mm256_fmadd_ps(x2, v8, y2);
|
||||
y3 = _mm256_fmadd_ps(x3, v8, y3);
|
||||
|
||||
_mm_storeu_si128((__m128i*)(y + i + 0 ), _mm256_cvtps_ph(y0, 0));
|
||||
_mm_storeu_si128((__m128i*)(y + i + 8 ), _mm256_cvtps_ph(y1, 0));
|
||||
@ -1012,7 +868,7 @@ inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * restrict y, ggml_
|
||||
// WASM SIMD 128-bit
|
||||
const int n16 = (n & ~15);
|
||||
|
||||
const v128_t v0 = wasm_f32x4_splat(v);
|
||||
const v128_t v4 = wasm_f32x4_splat(v);
|
||||
|
||||
v128_t x0, x1, x2, x3;
|
||||
v128_t y0, y1, y2, y3;
|
||||
@ -1036,10 +892,10 @@ inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * restrict y, ggml_
|
||||
y2 = wasm_v128_load(ty + 8);
|
||||
y3 = wasm_v128_load(ty + 12);
|
||||
|
||||
y0 = wasm_f32x4_add(y0, wasm_f32x4_mul(x0, v0));
|
||||
y1 = wasm_f32x4_add(y1, wasm_f32x4_mul(x1, v0));
|
||||
y2 = wasm_f32x4_add(y2, wasm_f32x4_mul(x2, v0));
|
||||
y3 = wasm_f32x4_add(y3, wasm_f32x4_mul(x3, v0));
|
||||
y0 = wasm_f32x4_add(y0, wasm_f32x4_mul(x0, v4));
|
||||
y1 = wasm_f32x4_add(y1, wasm_f32x4_mul(x1, v4));
|
||||
y2 = wasm_f32x4_add(y2, wasm_f32x4_mul(x2, v4));
|
||||
y3 = wasm_f32x4_add(y3, wasm_f32x4_mul(x3, v4));
|
||||
|
||||
wasm_v128_store(ty + 0, y0);
|
||||
wasm_v128_store(ty + 4, y1);
|
||||
|
175
ggml.h
175
ggml.h
@ -1,5 +1,174 @@
|
||||
#pragma once
|
||||
|
||||
//
|
||||
// GGML Tensor Library
|
||||
//
|
||||
// This documentation is still a work in progress.
|
||||
// If you wish some specific topics to be covered, feel free to drop a comment:
|
||||
//
|
||||
// https://github.com/ggerganov/whisper.cpp/issues/40
|
||||
//
|
||||
// ## Overview
|
||||
//
|
||||
// This library implements:
|
||||
//
|
||||
// - a set of tensor operations
|
||||
// - automatic differentiation
|
||||
// - basic optimization algorithms
|
||||
//
|
||||
// The aim of this library is to provide a minimalistic approach for various machine learning tasks. This includes,
|
||||
// but is not limited to, the following:
|
||||
//
|
||||
// - linear regression
|
||||
// - support vector machines
|
||||
// - neural networks
|
||||
//
|
||||
// The library allows the user to define a certain function using the available tensor operations. This function
|
||||
// definition is represented internally via a computation graph. Each tensor operation in the function definition
|
||||
// corresponds to a node in the graph. Having the computation graph defined, the user can choose to compute the
|
||||
// function's value and/or its gradient with respect to the input variables. Optionally, the function can be optimized
|
||||
// using one of the available optimization algorithms.
|
||||
//
|
||||
// For example, here we define the function: f(x) = a*x^2 + b
|
||||
//
|
||||
// {
|
||||
// struct ggml_init_params params = {
|
||||
// .mem_size = 16*1024*1024,
|
||||
// .mem_buffer = NULL,
|
||||
// };
|
||||
//
|
||||
// // memory allocation happens here
|
||||
// struct ggml_context * ctx = ggml_init(params);
|
||||
//
|
||||
// struct ggml_tensor * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
|
||||
//
|
||||
// ggml_set_param(ctx, x); // x is an input variable
|
||||
//
|
||||
// struct ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
|
||||
// struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
|
||||
// struct ggml_tensor * x2 = ggml_mul(ctx, x, x);
|
||||
// struct ggml_tensor * f = ggml_add(ctx, ggml_mul(ctx, a, x2), b);
|
||||
//
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Notice that the function definition above does not involve any actual computation. The computation is performed only
|
||||
// when the user explicitly requests it. For example, to compute the function's value at x = 2.0:
|
||||
//
|
||||
// {
|
||||
// ...
|
||||
//
|
||||
// struct ggml_cgraph gf = ggml_build_forward(f);
|
||||
//
|
||||
// // set the input variable and parameter values
|
||||
// ggml_set_f32(x, 2.0f);
|
||||
// ggml_set_f32(a, 3.0f);
|
||||
// ggml_set_f32(b, 4.0f);
|
||||
//
|
||||
// ggml_graph_compute(ctx0, &gf);
|
||||
//
|
||||
// printf("f = %f\n", ggml_get_f32_1d(f, 0));
|
||||
//
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// The actual computation is performed in the ggml_graph_compute() function.
|
||||
//
|
||||
// The ggml_new_tensor_...() functions create new tensors. They are allocated in the memory buffer provided to the
|
||||
// ggml_init() function. You have to be careful not to exceed the memory buffer size. Therefore, you have to know
|
||||
// in advance how much memory you need for your computation. Alternatively, you can allocate a large enough memory
|
||||
// and after defining the computation graph, call the ggml_used_mem() function to find out how much memory was
|
||||
// actually needed.
|
||||
//
|
||||
// The ggml_set_param() function marks a tensor as an input variable. This is used by the automatic
|
||||
// differentiation and optimization algorithms.
|
||||
//
|
||||
// The described approach allows to define the function graph once and then compute its forward or backward graphs
|
||||
// multiple times. All computations will use the same memory buffer allocated in the ggml_init() function. This way
|
||||
// the user can avoid the memory allocation overhead at runtime.
|
||||
//
|
||||
// The library supports multi-dimensional tensors - up to 4 dimensions. The FP16 and FP32 data types are first class
|
||||
// citizens, but in theory the library can be extended to support FP8 and integer data types.
|
||||
//
|
||||
// Each tensor operation produces a new tensor. Initially the library was envisioned to support only the use of unary
|
||||
// and binary operations. Most of the available operations fall into one of these two categories. With time, it became
|
||||
// clear that the library needs to support more complex operations. The way to support these operations is not clear
|
||||
// yet, but a few examples are demonstrated in the following operations:
|
||||
//
|
||||
// - ggml_permute()
|
||||
// - ggml_conv_1d_1s()
|
||||
// - ggml_conv_1d_2s()
|
||||
//
|
||||
// For each tensor operator, the library implements a forward and backward computation function. The forward function
|
||||
// computes the output tensor value given the input tensor values. The backward function computes the adjoint of the
|
||||
// input tensors given the adjoint of the output tensor. For a detailed explanation of what this means, take a
|
||||
// calculus class, or watch the following video:
|
||||
//
|
||||
// What is Automatic Differentiation?
|
||||
// https://www.youtube.com/watch?v=wG_nF1awSSY
|
||||
//
|
||||
//
|
||||
// ## Tensor data (struct ggml_tensor)
|
||||
//
|
||||
// The tensors are stored in memory via the ggml_tensor struct. The structure provides information about the size of
|
||||
// the tensor, the data type, and the memory buffer where the tensor data is stored. Additionally, it contains
|
||||
// pointers to the "source" tensors - i.e. the tensors that were used to compute the current tensor. For example:
|
||||
//
|
||||
// {
|
||||
// struct ggml_tensor * c = ggml_add(ctx, a, b);
|
||||
//
|
||||
// assert(c->src[0] == a);
|
||||
// assert(c->src[1] == b);
|
||||
// }
|
||||
//
|
||||
// The multi-dimensional tensors are stored in row-major order. The ggml_tensor struct contains fields for the
|
||||
// number of elements in each dimension ("ne") as well as the number of bytes ("nb", a.k.a. stride). This allows
|
||||
// to store tensors that are not contiguous in memory, which is useful for operations such as transposition and
|
||||
// permutation. All tensor operations have to take the stride into account and not assume that the tensor is
|
||||
// contiguous in memory.
|
||||
//
|
||||
// The data of the tensor is accessed via the "data" pointer. For example:
|
||||
//
|
||||
// {
|
||||
// struct ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 2, 3);
|
||||
//
|
||||
// // a[1, 2] = 1.0f;
|
||||
// *(float *) ((char *) a->data + 2*a->nb[1] + 1*a->nb[0]) = 1.0f;
|
||||
//
|
||||
// // a[2, 0] = 2.0f;
|
||||
// *(float *) ((char *) a->data + 0*a->nb[1] + 2*a->nb[0]) = 2.0f;
|
||||
//
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Alternatively, there are helper functions, such as ggml_get_f32_1d() and ggml_set_f32_1d() that can be used.
|
||||
//
|
||||
// ## The matrix multiplication operator (ggml_mul_mat)
|
||||
//
|
||||
// TODO
|
||||
//
|
||||
//
|
||||
// ## Multi-threading
|
||||
//
|
||||
// TODO
|
||||
//
|
||||
//
|
||||
// ## Overview of ggml.c
|
||||
//
|
||||
// TODO
|
||||
//
|
||||
//
|
||||
// ## SIMD optimizations
|
||||
//
|
||||
// TODO
|
||||
//
|
||||
//
|
||||
// ## Debugging ggml
|
||||
//
|
||||
// TODO
|
||||
//
|
||||
//
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@ -21,7 +190,8 @@ typedef __fp16 ggml_fp16_t;
|
||||
typedef uint16_t ggml_fp16_t;
|
||||
#endif
|
||||
|
||||
float ggml_fp16_to_fp32(ggml_fp16_t x);
|
||||
// convert FP16 <-> FP32
|
||||
float ggml_fp16_to_fp32(ggml_fp16_t x);
|
||||
ggml_fp16_t ggml_fp32_to_fp16(float x);
|
||||
|
||||
struct ggml_object;
|
||||
@ -36,6 +206,7 @@ enum ggml_type {
|
||||
GGML_TYPE_COUNT,
|
||||
};
|
||||
|
||||
// available tensor operations:
|
||||
enum ggml_op {
|
||||
GGML_OP_NONE = 0,
|
||||
|
||||
@ -136,7 +307,7 @@ struct ggml_init_params {
|
||||
void * mem_buffer; // if NULL, memory will be allocated internally
|
||||
};
|
||||
|
||||
void ggml_time_init(void);
|
||||
void ggml_time_init(void); // call this once at the beginning of the program
|
||||
int64_t ggml_time_ms(void);
|
||||
int64_t ggml_time_us(void);
|
||||
int64_t ggml_cycles(void);
|
||||
|
@ -297,8 +297,6 @@ for name in list_vars.keys():
|
||||
name == "encoder.conv2.bias" or \
|
||||
name == "encoder.positional_embedding" or \
|
||||
name == "decoder.positional_embedding":
|
||||
ftype = 0
|
||||
data = data.astype(np.float32)
|
||||
print(" Converting to float32")
|
||||
data = data.astype(np.float32)
|
||||
ftype = 0
|
||||
|
149
whisper.cpp
149
whisper.cpp
@ -424,6 +424,9 @@ struct whisper_context {
|
||||
int64_t t_last;
|
||||
whisper_token tid_last;
|
||||
std::vector<float> energy; // PCM signal energy
|
||||
|
||||
// [EXPERIMENTAL] speed-up techniques
|
||||
int32_t exp_n_audio_ctx; // 0 - use default
|
||||
};
|
||||
|
||||
// load the model from a ggml file
|
||||
@ -613,7 +616,7 @@ static bool whisper_model_load(const std::string & fname, whisper_context & wctx
|
||||
const int n_audio_state = hparams.n_audio_state;
|
||||
const int n_audio_layer = hparams.n_audio_layer;
|
||||
|
||||
const int n_text_ctx = hparams.n_text_ctx;
|
||||
const int n_text_ctx = hparams.n_text_ctx;
|
||||
const int n_text_state = hparams.n_text_state;
|
||||
const int n_text_layer = hparams.n_text_layer;
|
||||
|
||||
@ -748,7 +751,7 @@ static bool whisper_model_load(const std::string & fname, whisper_context & wctx
|
||||
const int n_audio_state = hparams.n_audio_state;
|
||||
const int n_audio_layer = hparams.n_audio_layer;
|
||||
|
||||
const int n_text_ctx = hparams.n_text_ctx;
|
||||
const int n_text_ctx = hparams.n_text_ctx;
|
||||
const int n_text_state = hparams.n_text_state;
|
||||
const int n_text_layer = hparams.n_text_layer;
|
||||
|
||||
@ -967,7 +970,7 @@ static bool whisper_model_load(const std::string & fname, whisper_context & wctx
|
||||
|
||||
// key/value memory for the cross-attention layer
|
||||
{
|
||||
const int n_audio_ctx = hparams.n_audio_ctx;
|
||||
const int n_audio_ctx = hparams.n_audio_ctx;
|
||||
|
||||
const int n_mem = n_text_layer*n_audio_ctx;
|
||||
const int n_elements = n_text_state*n_mem;
|
||||
@ -1076,13 +1079,11 @@ static bool whisper_encode(
|
||||
const auto & mel_inp = wctx.mel;
|
||||
const auto & hparams = model.hparams;
|
||||
|
||||
const int n_ctx = hparams.n_audio_ctx;
|
||||
const int n_ctx = wctx.exp_n_audio_ctx > 0 ? wctx.exp_n_audio_ctx : hparams.n_audio_ctx;
|
||||
const int n_state = hparams.n_audio_state;
|
||||
const int n_head = hparams.n_audio_head;
|
||||
const int n_layer = hparams.n_audio_layer;
|
||||
|
||||
const int N = n_ctx;
|
||||
|
||||
const int n_mels = hparams.n_mels;
|
||||
assert(mel_inp.n_mel == n_mels);
|
||||
|
||||
@ -1132,7 +1133,30 @@ static bool whisper_encode(
|
||||
cur = ggml_gelu(ctx0, cur);
|
||||
}
|
||||
|
||||
cur = ggml_add(ctx0, model.e_pe, ggml_transpose(ctx0, cur));
|
||||
// ===================================================================
|
||||
// NOTE: experimenting with partial evaluation of the encoder (ignore)
|
||||
//static int iter = -1;
|
||||
//const int n_iter = 1500/n_ctx;
|
||||
|
||||
//iter = (iter + 1) % n_iter;
|
||||
|
||||
//if (iter == 0) {
|
||||
// memset(model.memory_cross_k->data, 0, ggml_nbytes(model.memory_cross_k));
|
||||
// memset(model.memory_cross_v->data, 0, ggml_nbytes(model.memory_cross_v));
|
||||
//}
|
||||
|
||||
static int iter = 0;
|
||||
|
||||
const size_t e_pe_stride = model.e_pe->ne[0]*ggml_element_size(model.e_pe);
|
||||
const size_t e_pe_offset = model.e_pe->ne[0]*ggml_element_size(model.e_pe)*n_ctx*iter;
|
||||
|
||||
struct ggml_tensor * e_pe = ggml_view_2d(ctx0, model.e_pe, model.e_pe->ne[0], n_ctx, e_pe_stride, e_pe_offset);
|
||||
|
||||
cur = ggml_add(ctx0, e_pe, ggml_transpose(ctx0, cur));
|
||||
// ===================================================================
|
||||
|
||||
// original:
|
||||
//cur = ggml_add(ctx0, model.e_pe, ggml_transpose(ctx0, cur));
|
||||
|
||||
struct ggml_tensor * inpL = cur;
|
||||
|
||||
@ -1198,14 +1222,14 @@ static bool whisper_encode(
|
||||
ggml_permute(ctxL,
|
||||
ggml_cpy(ctxL,
|
||||
Qcur,
|
||||
ggml_new_tensor_3d(ctxL, GGML_TYPE_F16, n_state/n_head, n_head, N)),
|
||||
ggml_new_tensor_3d(ctxL, GGML_TYPE_F16, n_state/n_head, n_head, n_ctx)),
|
||||
0, 2, 1, 3);
|
||||
|
||||
struct ggml_tensor * K =
|
||||
ggml_permute(ctxL,
|
||||
ggml_cpy(ctxL,
|
||||
Kcur,
|
||||
ggml_new_tensor_3d(ctxL, GGML_TYPE_F16, n_state/n_head, n_head, N)),
|
||||
ggml_new_tensor_3d(ctxL, GGML_TYPE_F16, n_state/n_head, n_head, n_ctx)),
|
||||
0, 2, 1, 3);
|
||||
|
||||
struct ggml_tensor * V =
|
||||
@ -1213,9 +1237,9 @@ static bool whisper_encode(
|
||||
ggml_permute(ctxL,
|
||||
ggml_reshape_3d(ctxL,
|
||||
Vcur,
|
||||
n_state/n_head, n_head, N),
|
||||
n_state/n_head, n_head, n_ctx),
|
||||
1, 2, 0, 3),
|
||||
ggml_new_tensor_3d(ctxL, GGML_TYPE_F16, N, n_state/n_head, n_head)
|
||||
ggml_new_tensor_3d(ctxL, GGML_TYPE_F16, n_ctx, n_state/n_head, n_head)
|
||||
);
|
||||
|
||||
struct ggml_tensor * KQV = ggml_flash_attn(ctxL, Q, K, V, false);
|
||||
@ -1224,14 +1248,14 @@ static bool whisper_encode(
|
||||
ggml_permute(ctxL,
|
||||
ggml_cpy(ctxL,
|
||||
Qcur,
|
||||
ggml_new_tensor_3d(ctxL, GGML_TYPE_F32, n_state/n_head, n_head, N)),
|
||||
ggml_new_tensor_3d(ctxL, GGML_TYPE_F32, n_state/n_head, n_head, n_ctx)),
|
||||
0, 2, 1, 3);
|
||||
|
||||
struct ggml_tensor * K =
|
||||
ggml_permute(ctxL,
|
||||
ggml_cpy(ctxL,
|
||||
Kcur,
|
||||
ggml_new_tensor_3d(ctxL, GGML_TYPE_F16, n_state/n_head, n_head, N)),
|
||||
ggml_new_tensor_3d(ctxL, GGML_TYPE_F16, n_state/n_head, n_head, n_ctx)),
|
||||
0, 2, 1, 3);
|
||||
|
||||
// K * Q
|
||||
@ -1249,7 +1273,7 @@ static bool whisper_encode(
|
||||
// ggml_permute(ctxL,
|
||||
// ggml_cpy(ctxL,
|
||||
// Vcur,
|
||||
// ggml_new_tensor_3d(ctxL, GGML_TYPE_F16, n_state/n_head, n_head, N)),
|
||||
// ggml_new_tensor_3d(ctxL, GGML_TYPE_F16, n_state/n_head, n_head, n_ctx)),
|
||||
// 1, 2, 0, 3);
|
||||
|
||||
//struct ggml_tensor * KQV = ggml_mul_mat(ctxL, V_trans, KQ_soft_max);
|
||||
@ -1259,9 +1283,9 @@ static bool whisper_encode(
|
||||
ggml_permute(ctxL,
|
||||
ggml_reshape_3d(ctxL,
|
||||
Vcur,
|
||||
n_state/n_head, n_head, N),
|
||||
n_state/n_head, n_head, n_ctx),
|
||||
0, 2, 1, 3),
|
||||
ggml_new_tensor_3d(ctxL, GGML_TYPE_F16, n_state/n_head, N, n_head)
|
||||
ggml_new_tensor_3d(ctxL, GGML_TYPE_F16, n_state/n_head, n_ctx, n_head)
|
||||
);
|
||||
|
||||
struct ggml_tensor * KQV = ggml_mul_mat(ctxL, ggml_transpose(ctxL, V), KQ_soft_max);
|
||||
@ -1271,7 +1295,7 @@ static bool whisper_encode(
|
||||
|
||||
cur = ggml_cpy(ctxL,
|
||||
KQV_merged,
|
||||
ggml_new_tensor_2d(ctxL, GGML_TYPE_F32, n_state, N));
|
||||
ggml_new_tensor_2d(ctxL, GGML_TYPE_F32, n_state, n_ctx));
|
||||
}
|
||||
|
||||
// projection
|
||||
@ -1425,6 +1449,8 @@ static bool whisper_encode(
|
||||
Vcross),
|
||||
Vcross);
|
||||
|
||||
//struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_cross_k, n_state*n_ctx, (ggml_element_size(model.memory_cross_k)*n_state)*(il*hparams.n_audio_ctx + iter*n_ctx));
|
||||
//struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_cross_v, n_state*n_ctx, (ggml_element_size(model.memory_cross_v)*n_state)*(il*hparams.n_audio_ctx + iter*n_ctx));
|
||||
struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_cross_k, n_state*n_ctx, (ggml_element_size(model.memory_cross_k)*n_state)*(il*n_ctx));
|
||||
struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_cross_v, n_state*n_ctx, (ggml_element_size(model.memory_cross_v)*n_state)*(il*n_ctx));
|
||||
|
||||
@ -1474,7 +1500,7 @@ static bool whisper_decode(
|
||||
const int n_layer = hparams.n_text_layer;
|
||||
|
||||
const int N = n_tokens;
|
||||
const int M = hparams.n_audio_ctx;
|
||||
const int M = wctx.exp_n_audio_ctx > 0 ? wctx.exp_n_audio_ctx : hparams.n_audio_ctx;
|
||||
|
||||
struct ggml_init_params params = {
|
||||
.mem_size = wctx.buf_compute.size(),
|
||||
@ -2031,6 +2057,7 @@ static bool log_mel_spectrogram(
|
||||
const int n_mel,
|
||||
const int n_threads,
|
||||
const whisper_filters & filters,
|
||||
const bool speed_up,
|
||||
whisper_mel & mel) {
|
||||
|
||||
// Hanning window
|
||||
@ -2044,7 +2071,7 @@ static bool log_mel_spectrogram(
|
||||
mel.n_len = (n_samples)/fft_step;
|
||||
mel.data.resize(mel.n_mel*mel.n_len);
|
||||
|
||||
const int n_fft = 1 + fft_size/2;
|
||||
const int n_fft = 1 + (speed_up ? fft_size/4 : fft_size/2);
|
||||
|
||||
//printf("%s: n_samples = %d, n_len = %d\n", __func__, n_samples, mel.n_len);
|
||||
//printf("%s: recording length: %f s\n", __func__, (float) n_samples/sample_rate);
|
||||
@ -2091,6 +2118,13 @@ static bool log_mel_spectrogram(
|
||||
//}
|
||||
}
|
||||
|
||||
if (speed_up) {
|
||||
// scale down in the frequency domain results in a speed up in the time domain
|
||||
for (int j = 0; j < n_fft; j++) {
|
||||
fft_out[j] = 0.5*(fft_out[2*j] + fft_out[2*j + 1]);
|
||||
}
|
||||
}
|
||||
|
||||
// mel spectrogram
|
||||
for (int j = 0; j < mel.n_mel; j++) {
|
||||
double sum = 0.0;
|
||||
@ -2171,7 +2205,21 @@ void whisper_free(struct whisper_context * ctx) {
|
||||
int whisper_pcm_to_mel(struct whisper_context * ctx, const float * samples, int n_samples, int n_threads) {
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
|
||||
if (!log_mel_spectrogram(samples, n_samples, WHISPER_SAMPLE_RATE, WHISPER_N_FFT, WHISPER_HOP_LENGTH, WHISPER_N_MEL, n_threads, ctx->model.filters, ctx->mel)) {
|
||||
if (!log_mel_spectrogram(samples, n_samples, WHISPER_SAMPLE_RATE, WHISPER_N_FFT, WHISPER_HOP_LENGTH, WHISPER_N_MEL, n_threads, ctx->model.filters, false, ctx->mel)) {
|
||||
fprintf(stderr, "%s: failed to compute mel spectrogram\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ctx->t_mel_us = ggml_time_us() - t_start_us;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// same as whisper_pcm_to_mel, but applies a Phase Vocoder to speed up the audio x2
|
||||
int whisper_pcm_to_mel_phase_vocoder(struct whisper_context * ctx, const float * samples, int n_samples, int n_threads) {
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
|
||||
if (!log_mel_spectrogram(samples, n_samples, WHISPER_SAMPLE_RATE, 2*WHISPER_N_FFT, 2*WHISPER_HOP_LENGTH, WHISPER_N_MEL, n_threads, ctx->model.filters, true, ctx->mel)) {
|
||||
fprintf(stderr, "%s: failed to compute mel spectrogram\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
@ -2339,9 +2387,11 @@ struct whisper_full_params whisper_full_default_params(enum whisper_sampling_str
|
||||
/*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()),
|
||||
/*.n_max_text_ctx =*/ 16384,
|
||||
/*.offset_ms =*/ 0,
|
||||
/*.duration_ms =*/ 0,
|
||||
|
||||
/*.translate =*/ false,
|
||||
/*.no_context =*/ false,
|
||||
/*.single_segment =*/ false,
|
||||
/*.print_special_tokens =*/ false,
|
||||
/*.print_progress =*/ true,
|
||||
/*.print_realtime =*/ false,
|
||||
@ -2351,6 +2401,10 @@ struct whisper_full_params whisper_full_default_params(enum whisper_sampling_str
|
||||
/*.thold_pt =*/ 0.01f,
|
||||
/*.thold_ptsum =*/ 0.01f,
|
||||
/*.max_len =*/ 0,
|
||||
/*.max_tokens =*/ 0,
|
||||
|
||||
/*.speed_up =*/ false,
|
||||
/*.audio_ctx =*/ 0,
|
||||
|
||||
/*.language =*/ "en",
|
||||
|
||||
@ -2376,9 +2430,11 @@ struct whisper_full_params whisper_full_default_params(enum whisper_sampling_str
|
||||
/*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()),
|
||||
/*.n_max_text_ctx =*/ 16384,
|
||||
/*.offset_ms =*/ 0,
|
||||
/*.duration_ms =*/ 0,
|
||||
|
||||
/*.translate =*/ false,
|
||||
/*.no_context =*/ false,
|
||||
/*.single_segment =*/ false,
|
||||
/*.print_special_tokens =*/ false,
|
||||
/*.print_progress =*/ true,
|
||||
/*.print_realtime =*/ false,
|
||||
@ -2388,6 +2444,10 @@ struct whisper_full_params whisper_full_default_params(enum whisper_sampling_str
|
||||
/*.thold_pt =*/ 0.01f,
|
||||
/*.thold_ptsum =*/ 0.01f,
|
||||
/*.max_len =*/ 0,
|
||||
/*.max_tokens =*/ 0,
|
||||
|
||||
/*.speed_up =*/ false,
|
||||
/*.audio_ctx =*/ 0,
|
||||
|
||||
/*.language =*/ "en",
|
||||
|
||||
@ -2483,9 +2543,16 @@ int whisper_full(
|
||||
result_all.clear();
|
||||
|
||||
// compute log mel spectrogram
|
||||
if (whisper_pcm_to_mel(ctx, samples, n_samples, params.n_threads) != 0) {
|
||||
fprintf(stderr, "%s: failed to compute log mel spectrogram\n", __func__);
|
||||
return -1;
|
||||
if (params.speed_up) {
|
||||
if (whisper_pcm_to_mel_phase_vocoder(ctx, samples, n_samples, params.n_threads) != 0) {
|
||||
fprintf(stderr, "%s: failed to compute log mel spectrogram\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
if (whisper_pcm_to_mel(ctx, samples, n_samples, params.n_threads) != 0) {
|
||||
fprintf(stderr, "%s: failed to compute log mel spectrogram\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (params.token_timestamps) {
|
||||
@ -2496,11 +2563,12 @@ int whisper_full(
|
||||
}
|
||||
|
||||
const int seek_start = params.offset_ms/10;
|
||||
const int seek_end = seek_start + (params.duration_ms == 0 ? whisper_n_len(ctx) : params.duration_ms/10);
|
||||
|
||||
// if length of spectrogram is less than 1s (100 samples), then return
|
||||
// basically don't process anything that is less than 1s
|
||||
// see issue #39: https://github.com/ggerganov/whisper.cpp/issues/39
|
||||
if (whisper_n_len(ctx) < 100 + seek_start) {
|
||||
if (seek_end < 100 + seek_start) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2510,6 +2578,9 @@ int whisper_full(
|
||||
prompt_past.clear();
|
||||
}
|
||||
|
||||
// overwrite audio_ctx
|
||||
ctx->exp_n_audio_ctx = params.audio_ctx;
|
||||
|
||||
// these tokens determine the task that will be performed
|
||||
std::vector<whisper_token> prompt_init = { whisper_token_sot(ctx) };
|
||||
if (whisper_is_multilingual(ctx)) {
|
||||
@ -2533,7 +2604,7 @@ int whisper_full(
|
||||
// main loop
|
||||
int seek = seek_start;
|
||||
while (true) {
|
||||
int progress_cur = (100*seek)/whisper_n_len(ctx);
|
||||
const int progress_cur = (100*(seek - seek_start))/(seek_end - seek_start);
|
||||
while (progress_cur >= progress_prev + progress_step) {
|
||||
progress_prev += progress_step;
|
||||
if (params.print_progress) {
|
||||
@ -2541,7 +2612,7 @@ int whisper_full(
|
||||
}
|
||||
}
|
||||
|
||||
if (seek + 100 >= whisper_n_len(ctx)) {
|
||||
if (seek + 100 >= seek_end) {
|
||||
break;
|
||||
}
|
||||
|
||||
@ -2620,15 +2691,21 @@ int whisper_full(
|
||||
//}
|
||||
|
||||
// end of text token
|
||||
if (token.id == whisper_token_eot(ctx)) {
|
||||
if (token.id == whisper_token_eot(ctx) || (params.max_tokens > 0 && i > params.max_tokens)) {
|
||||
if (result_len == 0) {
|
||||
if (seek + seek_delta + 100 >= whisper_n_len(ctx)) {
|
||||
if (seek + seek_delta + 100 >= seek_end) {
|
||||
result_len = i + 1;
|
||||
} else {
|
||||
// TODO: figure out how to resolve this
|
||||
fprintf(stderr, "\n%s: failed to generate timestamp token - this should not happen\n\n", __func__);
|
||||
}
|
||||
}
|
||||
|
||||
if (params.single_segment) {
|
||||
result_len = i + 1;
|
||||
seek_delta = 100*WHISPER_CHUNK_SIZE;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
@ -2670,16 +2747,19 @@ int whisper_full(
|
||||
if (tokens_cur[i].id > whisper_token_beg(ctx)) {
|
||||
const auto t1 = seek + 2*(tokens_cur[i].tid - whisper_token_beg(ctx));
|
||||
if (!text.empty()) {
|
||||
const auto tt0 = params.speed_up ? 2*t0 : t0;
|
||||
const auto tt1 = params.speed_up ? 2*t1 : t1;
|
||||
|
||||
if (params.print_realtime) {
|
||||
if (params.print_timestamps) {
|
||||
printf("[%s --> %s] %s\n", to_timestamp(t0).c_str(), to_timestamp(t1).c_str(), text.c_str());
|
||||
printf("[%s --> %s] %s\n", to_timestamp(tt0).c_str(), to_timestamp(tt1).c_str(), text.c_str());
|
||||
} else {
|
||||
printf("%s", text.c_str());
|
||||
fflush(stdout);
|
||||
}
|
||||
}
|
||||
|
||||
result_all.push_back({ t0, t1, text, {} });
|
||||
result_all.push_back({ tt0, tt1, text, {} });
|
||||
for (int j = i0; j <= i; j++) {
|
||||
result_all.back().tokens.push_back(tokens_cur[j]);
|
||||
}
|
||||
@ -2711,16 +2791,19 @@ int whisper_full(
|
||||
if (!text.empty()) {
|
||||
const auto t1 = seek + seek_delta;
|
||||
|
||||
const auto tt0 = params.speed_up ? 2*t0 : t0;
|
||||
const auto tt1 = params.speed_up ? 2*t1 : t1;
|
||||
|
||||
if (params.print_realtime) {
|
||||
if (params.print_timestamps) {
|
||||
printf("[%s --> %s] %s\n", to_timestamp(t0).c_str(), to_timestamp(t1).c_str(), text.c_str());
|
||||
printf("[%s --> %s] %s\n", to_timestamp(tt0).c_str(), to_timestamp(tt1).c_str(), text.c_str());
|
||||
} else {
|
||||
printf("%s", text.c_str());
|
||||
fflush(stdout);
|
||||
}
|
||||
}
|
||||
|
||||
result_all.push_back({ t0, t1, text, {} });
|
||||
result_all.push_back({ tt0, tt1, text, {} });
|
||||
for (int j = i0; j < (int) tokens_cur.size(); j++) {
|
||||
result_all.back().tokens.push_back(tokens_cur[j]);
|
||||
}
|
||||
@ -2802,7 +2885,7 @@ int whisper_full_parallel(
|
||||
|
||||
// key/value memory for the cross-attention layer
|
||||
{
|
||||
const int n_audio_ctx = hparams.n_audio_ctx;
|
||||
const int n_audio_ctx = hparams.n_audio_ctx;
|
||||
|
||||
const int n_mem = n_text_layer*n_audio_ctx;
|
||||
const int n_elements = n_text_state*n_mem;
|
||||
|
@ -186,10 +186,12 @@ extern "C" {
|
||||
|
||||
int n_threads;
|
||||
int n_max_text_ctx;
|
||||
int offset_ms;
|
||||
int offset_ms; // start offset in ms
|
||||
int duration_ms; // audio duration to process in ms
|
||||
|
||||
bool translate;
|
||||
bool no_context;
|
||||
bool single_segment; // force single segment output (useful for streaming)
|
||||
bool print_special_tokens;
|
||||
bool print_progress;
|
||||
bool print_realtime;
|
||||
@ -200,6 +202,11 @@ extern "C" {
|
||||
float thold_pt; // timestamp token probability threshold (~0.01)
|
||||
float thold_ptsum; // timestamp token sum probability threshold (~0.01)
|
||||
int max_len; // max segment length in characters
|
||||
int max_tokens; // max tokens per segment (0 = no limit)
|
||||
|
||||
// [EXPERIMENTAL] speed-up techniques
|
||||
bool speed_up; // speed-up the audio by 2x using Phase Vocoder
|
||||
int audio_ctx; // overwrite the audio context size (0 = use default)
|
||||
|
||||
const char * language;
|
||||
|
||||
|
Reference in New Issue
Block a user