mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-06-24 17:15:19 +00:00
Compare commits
12 Commits
ggml-conv
...
parallel-s
Author | SHA1 | Date | |
---|---|---|---|
a2f3b82db3 | |||
76c8b5235b | |||
d029784fb0 | |||
40c66036b6 | |||
fc8565d0e2 | |||
b618229340 | |||
b27726da93 | |||
0867e696a7 | |||
66bb2e9401 | |||
3bfc43e3e3 | |||
f53e1388f5 | |||
933c5bef97 |
1
.gitignore
vendored
1
.gitignore
vendored
@ -8,6 +8,7 @@
|
||||
.DS_Store
|
||||
|
||||
build/
|
||||
build-coreml/
|
||||
build-em/
|
||||
build-debug/
|
||||
build-release/
|
||||
|
@ -15,33 +15,13 @@ declare -a filedex
|
||||
cd `dirname $0`
|
||||
cd ../
|
||||
|
||||
# Let's loop across all the objects in the 'models' dir:
|
||||
for i in ./models/*; do
|
||||
# Check to see if it's a file or directory
|
||||
if [ -d "$i" ]; then
|
||||
# It's a directory! We should make sure it's not empty first:
|
||||
if [ "$(ls -A $i)" ]; then
|
||||
# Passed! Let's go searching for bin files (shouldn't need to go more than a layer deep here)
|
||||
for f in "$i"/*.bin; do
|
||||
# [Neuron Activation]
|
||||
newfile=`echo "${f##*/}" | cut -d _ -f 1`;
|
||||
if [ "$newfile" != "q5" ]; then
|
||||
./quantize "${f}" "${i:-4}/${i:9:${#i}-4}-${qtype1}.bin" ${qtype1};
|
||||
./quantize "${f}" "${i:-4}/${i:9:${#i}-4}-${qtype0}.bin" ${qtype0};
|
||||
filedex+=( "${i:-4}/${i:9:${#i}-4}-${qtype1}.bin" "${i:-4}/${i:9:${#i}-4}-${qtype0}.bin" )
|
||||
fi
|
||||
done
|
||||
fi
|
||||
else
|
||||
# It's a file! Let's make sure it's the right type:
|
||||
if [ "${i##*.}" == "bin" ]; then
|
||||
# And we probably want to skip the testing files
|
||||
if [ "${i:9:8}" != "for-test" ]; then
|
||||
# [Neuron Activation]
|
||||
./quantize "${i}" "${i:-4}-${qtype1}.bin" ${qtype1};
|
||||
./quantize "${i}" "${i:-4}-${qtype0}.bin" ${qtype0};
|
||||
filedex+=( "${i:-4}-${qtype1}.bin" "${i:-4}-${qtype0}.bin" )
|
||||
fi
|
||||
for i in `ls ./models | grep ^ggml-.*.bin | grep -v "\-q"`; do
|
||||
m="models/$i"
|
||||
if [ -f "$m" ]; then
|
||||
if [ "${m##*.}" == "bin" ]; then
|
||||
./quantize "${m}" "${m::${#m}-4}-${qtype1}.bin" ${qtype1};
|
||||
./quantize "${m}" "${m::${#m}-4}-${qtype0}.bin" ${qtype0};
|
||||
filedex+=( "${m::${#m}-4}-${qtype1}.bin" "${m::${#m}-4}-${qtype0}.bin" )
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
@ -4747,11 +4747,11 @@ static __global__ void im2col_f32_f16(
|
||||
(threadIdx.x * gridDim.y * gridDim.z + blockIdx.y * gridDim.z + blockIdx.z) * CHW +
|
||||
(blockIdx.x * (blockDim.y * blockDim.z) + threadIdx.y * blockDim.z + threadIdx.z);
|
||||
|
||||
if (!(iih < 0 || iih >= IH || iiw < 0 || iiw >= IW)) {
|
||||
if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) {
|
||||
dst[offset_dst] = __float2half(0.0f);
|
||||
} else {
|
||||
const int offset_src = threadIdx.x * ofs0 + blockIdx.x * ofs1;
|
||||
dst[offset_dst] = __float2half(x[offset_src + iih * IW + iiw]);
|
||||
} else {
|
||||
dst[offset_dst] = __float2half(0.0f);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include <stdbool.h>
|
||||
|
||||
// max memory buffers that can be mapped to the device
|
||||
#define GGML_METAL_MAX_BUFFERS 16
|
||||
#define GGML_METAL_MAX_BUFFERS 64
|
||||
#define GGML_METAL_MAX_COMMAND_BUFFERS 32
|
||||
|
||||
struct ggml_tensor;
|
||||
|
@ -479,6 +479,10 @@ static id<MTLBuffer> ggml_metal_get_buffer(struct ggml_metal_context * ctx, stru
|
||||
|
||||
const int64_t tsize = ggml_nbytes(t);
|
||||
|
||||
if (t->buffer && t->buffer->backend && t->buffer->backend->context) {
|
||||
ctx = t->buffer->backend->context;
|
||||
}
|
||||
|
||||
// find the view that contains the tensor fully
|
||||
for (int i = 0; i < ctx->n_buffers; ++i) {
|
||||
const int64_t ioffs = (int64_t) t->data - (int64_t) ctx->buffers[i].data;
|
||||
|
@ -1327,11 +1327,11 @@ kernel void kernel_im2col_f16(
|
||||
(tpitg[0] * tgpg[1] * tgpg[2] + tgpig[1] * tgpg[2] + tgpig[2]) * CHW +
|
||||
(tgpig[0] * (ntg[1] * ntg[2]) + tpitg[1] * ntg[2] + tpitg[2]);
|
||||
|
||||
if (!(iih < 0 || iih >= IH || iiw < 0 || iiw >= IW)) {
|
||||
if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) {
|
||||
dst[offset_dst] = 0.0f;
|
||||
} else {
|
||||
const int32_t offset_src = tpitg[0] * ofs0 + tgpig[0] * ofs1;
|
||||
dst[offset_dst] = x[offset_src + iih * IW + iiw];
|
||||
} else {
|
||||
dst[offset_dst] = 0.0f;
|
||||
}
|
||||
}
|
||||
|
||||
|
22
ggml.c
22
ggml.c
@ -1777,7 +1777,6 @@ static void ggml_setup_op_has_task_pass(void) {
|
||||
p[GGML_OP_DIAG_MASK_INF ] = true;
|
||||
p[GGML_OP_DIAG_MASK_ZERO ] = true;
|
||||
p[GGML_OP_CONV_TRANSPOSE_1D ] = true;
|
||||
p[GGML_OP_IM2COL ] = true;
|
||||
p[GGML_OP_CONV_TRANSPOSE_2D ] = true;
|
||||
p[GGML_OP_FLASH_ATTN_BACK ] = true;
|
||||
p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
|
||||
@ -5122,8 +5121,6 @@ static int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p,
|
||||
return (ins + 2 * p - d * (ks - 1) - 1) / s + 1;
|
||||
}
|
||||
|
||||
// ggml_conv_1d
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_conv_1d(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
@ -5263,14 +5260,14 @@ struct ggml_tensor * ggml_conv_2d(
|
||||
int p1,
|
||||
int d0,
|
||||
int d1) {
|
||||
struct ggml_tensor * result = ggml_im2col(ctx, a, b, s0, s1, p0, p1, d0, d1, true); // [N, OH, OW, IC * KH * KW]
|
||||
struct ggml_tensor * im2col = ggml_im2col(ctx, a, b, s0, s1, p0, p1, d0, d1, true); // [N, OH, OW, IC * KH * KW]
|
||||
|
||||
result =
|
||||
ggml_reshape_4d(ctx,
|
||||
ggml_mul_mat(ctx,
|
||||
ggml_reshape_2d(ctx, result, result->ne[0], result->ne[3] * result->ne[2] * result->ne[1]), // [N, OH, OW, IC * KH * KW] => [N*OH*OW, IC * KH * KW]
|
||||
ggml_reshape_2d(ctx, a, (a->ne[0] * a->ne[1] * a->ne[2]), a->ne[3])), // [OC,IC, KH, KW] => [OC, IC * KH * KW]
|
||||
result->ne[1], result->ne[2], a->ne[3], result->ne[3]); // [N, OC, OH, OW]
|
||||
struct ggml_tensor * result =
|
||||
ggml_mul_mat(ctx,
|
||||
ggml_reshape_2d(ctx, im2col, im2col->ne[0], im2col->ne[3] * im2col->ne[2] * im2col->ne[1]), // [N, OH, OW, IC * KH * KW] => [N*OH*OW, IC * KH * KW]
|
||||
ggml_reshape_2d(ctx, a, (a->ne[0] * a->ne[1] * a->ne[2]), a->ne[3])); // [OC,IC, KH, KW] => [OC, IC * KH * KW]
|
||||
|
||||
result = ggml_reshape_4d(ctx, result, im2col->ne[1], im2col->ne[2], a->ne[3], im2col->ne[3]); // [N, OC, OH, OW]
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -11757,7 +11754,6 @@ static void ggml_compute_forward_im2col_f16(
|
||||
GGML_ASSERT(nb10 == sizeof(float));
|
||||
|
||||
if (params->type == GGML_TASK_INIT) {
|
||||
memset(dst->data, 0, ggml_nbytes(dst));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -11783,7 +11779,9 @@ static void ggml_compute_forward_im2col_f16(
|
||||
const int64_t iiw = iow*s0 + ikw*d0 - p0;
|
||||
const int64_t iih = ioh*s1 + ikh*d1 - p1;
|
||||
|
||||
if (!(iih < 0 || iih >= IH || iiw < 0 || iiw >= IW)) {
|
||||
if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) {
|
||||
dst_data[iic*(KH*KW) + ikh*KW + ikw] = 0;
|
||||
} else {
|
||||
dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_FP32_TO_FP16(src_data[iih*IW + iiw]);
|
||||
}
|
||||
}
|
||||
|
255
whisper.cpp
255
whisper.cpp
@ -155,8 +155,8 @@ static void whisper_log_callback_default(ggml_log_level level, const char * text
|
||||
//
|
||||
|
||||
static void ggml_graph_compute_helper(
|
||||
struct ggml_cgraph * graph,
|
||||
std::vector<uint8_t> & buf,
|
||||
ggml_cgraph * graph,
|
||||
int n_threads,
|
||||
whisper_abort_callback abort_callback,
|
||||
void * abort_callback_data) {
|
||||
@ -173,6 +173,21 @@ static void ggml_graph_compute_helper(
|
||||
ggml_graph_compute(graph, &plan);
|
||||
}
|
||||
|
||||
static void ggml_graph_compute_helper(
|
||||
struct ggml_backend * backend,
|
||||
struct ggml_cgraph * graph,
|
||||
int n_threads) {
|
||||
if (ggml_backend_is_cpu(backend)) {
|
||||
ggml_backend_cpu_set_n_threads(backend, n_threads);
|
||||
}
|
||||
#ifdef GGML_USE_METAL
|
||||
if (ggml_backend_is_metal(backend)) {
|
||||
ggml_backend_metal_set_n_cb(backend, n_threads);
|
||||
}
|
||||
#endif
|
||||
ggml_backend_graph_compute(backend, graph);
|
||||
}
|
||||
|
||||
// faster matrix multiplications for tensors that do not have dimension 0 divisible by "pad"
|
||||
// the idea is to represent the original matrix multiplication:
|
||||
//
|
||||
@ -207,6 +222,7 @@ static struct ggml_tensor * ggml_mul_mat_pad(struct ggml_context * ctx, struct g
|
||||
}
|
||||
|
||||
// TODO: check if other platforms can benefit from this optimization
|
||||
// TODO: CUDA is currently broken - seems ggml_mul_mat does not handle views correctly
|
||||
#if defined(GGML_USE_METAL)
|
||||
#define ggml_mul_mat ggml_mul_mat_pad
|
||||
#endif
|
||||
@ -333,75 +349,6 @@ static const std::map<std::string, std::pair<int, std::string>> g_lang = {
|
||||
{ "yue", { 99, "cantonese", } },
|
||||
};
|
||||
|
||||
static const size_t MB = 1ull*1024*1024;
|
||||
|
||||
// TODO: avoid using GGUF
|
||||
static const std::map<ggml_type, std::map<e_model, size_t>> MEM_REQ_MODEL = {
|
||||
{ GGML_TYPE_F32,
|
||||
{
|
||||
{ MODEL_TINY, 74ull*MB },
|
||||
{ MODEL_BASE, 142ull*MB },
|
||||
{ MODEL_SMALL, 466ull*MB },
|
||||
{ MODEL_MEDIUM, 1464ull*MB },
|
||||
{ MODEL_LARGE, 2952ull*MB },
|
||||
},
|
||||
},
|
||||
{ GGML_TYPE_F16,
|
||||
{
|
||||
{ MODEL_TINY, 74ull*MB },
|
||||
{ MODEL_BASE, 142ull*MB },
|
||||
{ MODEL_SMALL, 466ull*MB },
|
||||
{ MODEL_MEDIUM, 1464ull*MB },
|
||||
{ MODEL_LARGE, 2952ull*MB },
|
||||
},
|
||||
},
|
||||
{ GGML_TYPE_Q4_0,
|
||||
{
|
||||
{ MODEL_TINY, 26ull*MB },
|
||||
{ MODEL_BASE, 50ull*MB },
|
||||
{ MODEL_SMALL, 154ull*MB },
|
||||
{ MODEL_MEDIUM, 470ull*MB },
|
||||
{ MODEL_LARGE, 940ull*MB },
|
||||
},
|
||||
},
|
||||
{ GGML_TYPE_Q4_1,
|
||||
{
|
||||
{ MODEL_TINY, 32ull*MB },
|
||||
{ MODEL_BASE, 58ull*MB },
|
||||
{ MODEL_SMALL, 182ull*MB },
|
||||
{ MODEL_MEDIUM, 562ull*MB },
|
||||
{ MODEL_LARGE, 1124ull*MB },
|
||||
},
|
||||
},
|
||||
{ GGML_TYPE_Q5_0,
|
||||
{
|
||||
{ MODEL_TINY, 30ull*MB },
|
||||
{ MODEL_BASE, 54ull*MB },
|
||||
{ MODEL_SMALL, 170ull*MB },
|
||||
{ MODEL_MEDIUM, 516ull*MB },
|
||||
{ MODEL_LARGE, 1034ull*MB },
|
||||
},
|
||||
},
|
||||
{ GGML_TYPE_Q5_1,
|
||||
{
|
||||
{ MODEL_TINY, 32ull*MB },
|
||||
{ MODEL_BASE, 58ull*MB },
|
||||
{ MODEL_SMALL, 182ull*MB },
|
||||
{ MODEL_MEDIUM, 562ull*MB },
|
||||
{ MODEL_LARGE, 1124ull*MB },
|
||||
},
|
||||
},
|
||||
{ GGML_TYPE_Q8_0,
|
||||
{
|
||||
{ MODEL_TINY, 45ull*MB },
|
||||
{ MODEL_BASE, 84ull*MB },
|
||||
{ MODEL_SMALL, 268ull*MB },
|
||||
{ MODEL_MEDIUM, 834ull*MB },
|
||||
{ MODEL_LARGE, 1674ull*MB },
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
struct whisper_mel {
|
||||
int n_len;
|
||||
int n_len_org;
|
||||
@ -587,10 +534,6 @@ struct whisper_kv_cache {
|
||||
int n; // number of tokens currently in the cache
|
||||
};
|
||||
|
||||
struct whisper_model_data {
|
||||
ggml_backend_buffer_t buffer_main;
|
||||
};
|
||||
|
||||
struct whisper_model {
|
||||
e_model type = MODEL_UNKNOWN;
|
||||
|
||||
@ -625,11 +568,11 @@ struct whisper_model {
|
||||
std::vector<whisper_layer_encoder> layers_encoder;
|
||||
std::vector<whisper_layer_decoder> layers_decoder;
|
||||
|
||||
// context
|
||||
// ggml context that contains all the meta information about the model tensors
|
||||
struct ggml_context * ctx;
|
||||
|
||||
// the model backend data is read-only and can be shared between processors
|
||||
struct whisper_model_data * data;
|
||||
struct ggml_backend_buffer * buffer;
|
||||
|
||||
// tensors
|
||||
int n_loaded;
|
||||
@ -699,26 +642,36 @@ struct whisper_allocr {
|
||||
};
|
||||
|
||||
static size_t whisper_allocr_size(struct whisper_allocr & allocr) {
|
||||
return allocr.meta.size() + ggml_backend_buffer_get_size(allocr.buffer);
|
||||
return allocr.meta.size() + ggml_allocr_max_size(allocr.alloc);
|
||||
}
|
||||
|
||||
// measure the memory usage of a graph and prepare the allocr's internal data buffer
|
||||
static void whisper_allocr_graph_init(struct whisper_allocr & allocr, ggml_backend_t backend, std::function<struct ggml_cgraph *()> && get_graph) {
|
||||
auto & alloc = allocr.alloc;
|
||||
auto & meta = allocr.meta;
|
||||
auto & buffer = allocr.buffer;
|
||||
|
||||
const int tensor_alignment = ggml_backend_get_alignment(backend);
|
||||
alloc = ggml_allocr_new_measure(tensor_alignment);
|
||||
alloc = ggml_allocr_new_measure_from_backend(backend);
|
||||
|
||||
meta.resize(ggml_tensor_overhead()*WHISPER_MAX_NODES + ggml_graph_overhead());
|
||||
|
||||
const size_t alloc_size = ggml_allocr_alloc_graph(alloc, get_graph()) + tensor_alignment;
|
||||
ggml_allocr_alloc_graph(alloc, get_graph());
|
||||
}
|
||||
|
||||
static void whisper_allocr_graph_realloc(struct whisper_allocr & allocr, ggml_backend_t backend) {
|
||||
if (allocr.alloc == nullptr) {
|
||||
// this can be null if we use external encoder like CoreML or OpenVINO
|
||||
return;
|
||||
}
|
||||
|
||||
auto & alloc = allocr.alloc;
|
||||
auto & buffer = allocr.buffer;
|
||||
|
||||
size_t size = ggml_allocr_max_size(alloc);
|
||||
|
||||
ggml_allocr_free(alloc);
|
||||
|
||||
buffer = ggml_backend_alloc_buffer(backend, alloc_size);
|
||||
alloc = ggml_allocr_new_from_buffer(buffer);
|
||||
buffer = ggml_backend_alloc_buffer(backend, size);
|
||||
alloc = ggml_allocr_new_from_buffer(buffer);
|
||||
}
|
||||
|
||||
static void whisper_allocr_free(struct whisper_allocr & allocr) {
|
||||
@ -753,6 +706,8 @@ struct whisper_state {
|
||||
// buffer for swapping KV caches between decoders during beam-search
|
||||
std::vector<kv_buf> kv_swap_bufs;
|
||||
|
||||
ggml_backend_t backend = nullptr;
|
||||
|
||||
// ggml-alloc:
|
||||
// - stores meta info about the intermediate tensors into the `meta` buffers
|
||||
// - stores the actual tensor data into the `data` buffers
|
||||
@ -765,7 +720,7 @@ struct whisper_state {
|
||||
struct ggml_tensor * embd_conv = nullptr;
|
||||
struct ggml_tensor * embd_enc = nullptr;
|
||||
|
||||
// TODO: helper until conv is implemented in CUDA
|
||||
// helper for GPU offloading
|
||||
std::vector<float> inp_mel;
|
||||
|
||||
// decode output (2-dimensional array: [n_tokens][n_vocab])
|
||||
@ -932,6 +887,37 @@ static void kv_cache_free(struct whisper_kv_cache & cache) {
|
||||
}
|
||||
}
|
||||
|
||||
static ggml_backend_t whisper_backend_init(const whisper_context_params & params) {
|
||||
ggml_backend_t backend_gpu = NULL;
|
||||
|
||||
// initialize the backends
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
if (params.use_gpu) {
|
||||
WHISPER_LOG_INFO("%s: using CUDA backend\n", __func__);
|
||||
backend_gpu = ggml_backend_cuda_init();
|
||||
if (!backend_gpu) {
|
||||
WHISPER_LOG_ERROR("%s: ggml_backend_cuda_init() failed\n", __func__);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
if (params.use_gpu) {
|
||||
WHISPER_LOG_INFO("%s: using Metal backend\n", __func__);
|
||||
ggml_metal_log_set_callback(whisper_log_callback_default, nullptr);
|
||||
backend_gpu = ggml_backend_metal_init();
|
||||
if (!backend_gpu) {
|
||||
WHISPER_LOG_ERROR("%s: ggml_backend_metal_init() failed\n", __func__);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (backend_gpu) {
|
||||
return backend_gpu;
|
||||
}
|
||||
return ggml_backend_cpu_init();
|
||||
}
|
||||
|
||||
// load the model from a ggml file
|
||||
//
|
||||
// file format:
|
||||
@ -1350,40 +1336,7 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con
|
||||
}
|
||||
}
|
||||
|
||||
// init backends
|
||||
{
|
||||
model.data = new whisper_model_data;
|
||||
|
||||
ggml_backend_t backend_gpu = NULL;
|
||||
|
||||
// initialize the backends
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
if (wctx.params.use_gpu > 0) {
|
||||
WHISPER_LOG_INFO("%s: using CUDA backend\n", __func__);
|
||||
backend_gpu = ggml_backend_cuda_init();
|
||||
if (!backend_gpu) {
|
||||
WHISPER_LOG_ERROR("%s: ggml_backend_cuda_init() failed\n", __func__);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
if (wctx.params.use_gpu) {
|
||||
WHISPER_LOG_INFO("%s: using Metal backend\n", __func__);
|
||||
ggml_metal_log_set_callback(whisper_log_callback_default, nullptr);
|
||||
backend_gpu = ggml_backend_metal_init();
|
||||
if (!backend_gpu) {
|
||||
WHISPER_LOG_ERROR("%s: ggml_backend_metal_init() failed\n", __func__);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (backend_gpu) {
|
||||
wctx.backend = backend_gpu;
|
||||
} else {
|
||||
wctx.backend = ggml_backend_cpu_init();
|
||||
}
|
||||
}
|
||||
wctx.backend = whisper_backend_init(wctx.params);
|
||||
|
||||
{
|
||||
size_t size_main = 0;
|
||||
@ -1392,17 +1345,17 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con
|
||||
size_main += ggml_nbytes(t.second) + ggml_tensor_overhead();
|
||||
}
|
||||
|
||||
model.data->buffer_main = ggml_backend_alloc_buffer(wctx.backend, size_main);
|
||||
model.buffer = ggml_backend_alloc_buffer(wctx.backend, size_main);
|
||||
|
||||
WHISPER_LOG_INFO("%s: %8s buffer size = %8.2f MB\n", __func__, ggml_backend_name(wctx.backend), size_main / 1024.0 / 1024.0);
|
||||
}
|
||||
|
||||
ggml_allocr * alloc_main = ggml_allocr_new_from_buffer(model.data->buffer_main);
|
||||
ggml_allocr * alloc = ggml_allocr_new_from_buffer(model.buffer);
|
||||
|
||||
// allocate tensors in the backend buffers
|
||||
{
|
||||
for (const auto & t : model.tensors) {
|
||||
ggml_allocr_alloc(alloc_main, t.second);
|
||||
ggml_allocr_alloc(alloc, t.second);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1524,7 +1477,7 @@ static bool whisper_model_load(struct whisper_model_loader * loader, whisper_con
|
||||
}
|
||||
}
|
||||
|
||||
ggml_allocr_free(alloc_main);
|
||||
ggml_allocr_free(alloc);
|
||||
|
||||
wctx.t_load_us = ggml_time_us() - t_start_us;
|
||||
|
||||
@ -2017,15 +1970,7 @@ static bool whisper_encode_internal(
|
||||
ggml_allocr_alloc_graph(alloc, gf);
|
||||
|
||||
if (!whisper_encode_external(wstate)) {
|
||||
if (ggml_backend_is_cpu(wctx.backend)) {
|
||||
ggml_backend_cpu_set_n_threads(wctx.backend, n_threads);
|
||||
}
|
||||
#ifdef GGML_USE_METAL
|
||||
if (ggml_backend_is_metal(wctx.backend)) {
|
||||
ggml_backend_metal_set_n_cb(wctx.backend, n_threads);
|
||||
}
|
||||
#endif
|
||||
ggml_backend_graph_compute(wctx.backend, gf);
|
||||
ggml_graph_compute_helper(wstate.backend, gf, n_threads);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2039,15 +1984,7 @@ static bool whisper_encode_internal(
|
||||
|
||||
ggml_allocr_alloc_graph(alloc, gf);
|
||||
|
||||
if (ggml_backend_is_cpu(wctx.backend)) {
|
||||
ggml_backend_cpu_set_n_threads(wctx.backend, n_threads);
|
||||
}
|
||||
#ifdef GGML_USE_METAL
|
||||
if (ggml_backend_is_metal(wctx.backend)) {
|
||||
ggml_backend_metal_set_n_cb(wctx.backend, n_threads);
|
||||
}
|
||||
#endif
|
||||
ggml_backend_graph_compute(wctx.backend, gf);
|
||||
ggml_graph_compute_helper(wstate.backend, gf, n_threads);
|
||||
}
|
||||
|
||||
// cross
|
||||
@ -2060,15 +1997,7 @@ static bool whisper_encode_internal(
|
||||
|
||||
ggml_allocr_alloc_graph(alloc, gf);
|
||||
|
||||
if (ggml_backend_is_cpu(wctx.backend)) {
|
||||
ggml_backend_cpu_set_n_threads(wctx.backend, n_threads);
|
||||
}
|
||||
#ifdef GGML_USE_METAL
|
||||
if (ggml_backend_is_metal(wctx.backend)) {
|
||||
ggml_backend_metal_set_n_cb(wctx.backend, n_threads);
|
||||
}
|
||||
#endif
|
||||
ggml_backend_graph_compute(wctx.backend, gf);
|
||||
ggml_graph_compute_helper(wstate.backend, gf, n_threads);
|
||||
}
|
||||
|
||||
wstate.t_encode_us += ggml_time_us() - t_start_us;
|
||||
@ -2459,15 +2388,7 @@ static bool whisper_decode_internal(
|
||||
|
||||
logits = gf->nodes[gf->n_nodes - 1];
|
||||
|
||||
if (ggml_backend_is_cpu(wctx.backend)) {
|
||||
ggml_backend_cpu_set_n_threads(wctx.backend, n_threads);
|
||||
}
|
||||
#ifdef GGML_USE_METAL
|
||||
if (ggml_backend_is_metal(wctx.backend)) {
|
||||
ggml_backend_metal_set_n_cb(wctx.backend, n_threads);
|
||||
}
|
||||
#endif
|
||||
ggml_backend_graph_compute(wctx.backend, gf);
|
||||
ggml_graph_compute_helper(wstate.backend, gf, n_threads);
|
||||
}
|
||||
|
||||
// extract logits for all N tokens
|
||||
@ -2910,6 +2831,8 @@ struct whisper_state * whisper_init_state(whisper_context * ctx) {
|
||||
|
||||
whisper_state * state = new whisper_state;
|
||||
|
||||
state->backend = whisper_backend_init(ctx->params);
|
||||
|
||||
if (!kv_cache_init(ctx->model.hparams, state->decoders[0].kv_self, ctx->backend, ctx->itype, ctx->model.hparams.n_text_ctx)) {
|
||||
WHISPER_LOG_ERROR("%s: kv_cache_init() failed for self-attention cache\n", __func__);
|
||||
delete state;
|
||||
@ -3007,6 +2930,11 @@ struct whisper_state * whisper_init_state(whisper_context * ctx) {
|
||||
WHISPER_LOG_INFO("%s: compute buffer (decode) = %7.2f MB\n", __func__, whisper_allocr_size(state->alloc_decode) / 1024.0 / 1024.0);
|
||||
}
|
||||
|
||||
whisper_allocr_graph_realloc(state->alloc_conv, ctx->backend);
|
||||
whisper_allocr_graph_realloc(state->alloc_encode, ctx->backend);
|
||||
whisper_allocr_graph_realloc(state->alloc_cross, ctx->backend);
|
||||
whisper_allocr_graph_realloc(state->alloc_decode, ctx->backend);
|
||||
|
||||
state->rng = std::mt19937(0);
|
||||
|
||||
return state;
|
||||
@ -3258,6 +3186,8 @@ void whisper_free_state(struct whisper_state * state)
|
||||
whisper_allocr_free(state->alloc_cross);
|
||||
whisper_allocr_free(state->alloc_decode);
|
||||
|
||||
ggml_backend_free(state->backend);
|
||||
|
||||
delete state;
|
||||
}
|
||||
}
|
||||
@ -3267,10 +3197,9 @@ void whisper_free(struct whisper_context * ctx) {
|
||||
if (ctx->model.ctx) {
|
||||
ggml_free(ctx->model.ctx);
|
||||
}
|
||||
if (ctx->model.data) {
|
||||
ggml_backend_buffer_free(ctx->model.data->buffer_main);
|
||||
|
||||
delete ctx->model.data;
|
||||
if (ctx->model.buffer) {
|
||||
ggml_backend_buffer_free(ctx->model.buffer);
|
||||
}
|
||||
|
||||
whisper_free_state(ctx->state);
|
||||
@ -5570,12 +5499,12 @@ WHISPER_API const char * whisper_bench_ggml_mul_mat_str(int n_threads) {
|
||||
double tsum = 0.0;
|
||||
|
||||
// heat-up
|
||||
ggml_graph_compute_helper(work, gf, n_threads, nullptr, nullptr);
|
||||
ggml_graph_compute_helper(gf, work, n_threads, nullptr, nullptr);
|
||||
|
||||
for (int i = 0; i < n_max; ++i) {
|
||||
const int64_t t0 = ggml_time_us();
|
||||
|
||||
ggml_graph_compute_helper(work, gf, n_threads, nullptr, nullptr);
|
||||
ggml_graph_compute_helper(gf, work, n_threads, nullptr, nullptr);
|
||||
|
||||
const int64_t t1 = ggml_time_us();
|
||||
|
||||
|
Reference in New Issue
Block a user