mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-05-15 23:13:10 +00:00
Some checks failed
Bindings Tests (Ruby) / ubuntu-22 (push) Has been cancelled
CI / determine-tag (push) Has been cancelled
CI / ubuntu-22 (linux/amd64) (push) Has been cancelled
CI / ubuntu-22 (linux/ppc64le) (push) Has been cancelled
CI / ubuntu-22-arm64 (linux/arm64) (push) Has been cancelled
CI / ubuntu-22-arm-v7 (linux/arm/v7) (push) Has been cancelled
CI / macOS-latest (generic/platform=iOS) (push) Has been cancelled
CI / macOS-latest (generic/platform=macOS) (push) Has been cancelled
CI / macOS-latest (generic/platform=tvOS) (push) Has been cancelled
CI / ubuntu-22-gcc (linux/amd64, Debug) (push) Has been cancelled
CI / ubuntu-22-gcc (linux/amd64, Release) (push) Has been cancelled
CI / ubuntu-22-gcc (linux/ppc64le, Debug) (push) Has been cancelled
CI / ubuntu-22-gcc (linux/ppc64le, Release) (push) Has been cancelled
CI / ubuntu-22-gcc-arm64 (linux/arm64, Debug) (push) Has been cancelled
CI / ubuntu-22-gcc-arm64 (linux/arm64, Release) (push) Has been cancelled
CI / ubuntu-22-gcc-arm-v7 (linux/arm/v7, Debug) (push) Has been cancelled
CI / ubuntu-22-gcc-arm-v7 (linux/arm/v7, Release) (push) Has been cancelled
CI / ubuntu-22-clang (linux/amd64, Debug) (push) Has been cancelled
CI / ubuntu-22-clang (linux/amd64, Release) (push) Has been cancelled
CI / ubuntu-22-clang (linux/arm64, Debug) (push) Has been cancelled
CI / ubuntu-22-clang (linux/arm64, Release) (push) Has been cancelled
CI / ubuntu-22-clang (linux/ppc64le, Debug) (push) Has been cancelled
CI / ubuntu-22-clang (linux/ppc64le, Release) (push) Has been cancelled
CI / ubuntu-22-gcc-sanitized (linux/amd64, ADDRESS) (push) Has been cancelled
CI / ubuntu-22-gcc-sanitized (linux/amd64, THREAD) (push) Has been cancelled
CI / ubuntu-22-gcc-sanitized (linux/amd64, UNDEFINED) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl (linux/amd64, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl (linux/arm/v7, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl (linux/arm64, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl (linux/ppc64le, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl-fp16 (linux/amd64, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl-fp16 (linux/arm/v7, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl-fp16 (linux/arm64, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl-fp16 (linux/ppc64le, icx, icpx, ON) (push) Has been cancelled
CI / windows-msys2 (Release, clang-x86_64, CLANG64) (push) Has been cancelled
CI / windows-msys2 (Release, ucrt-x86_64, UCRT64) (push) Has been cancelled
CI / windows (Win32, Release, win32-x86, x86, 2.28.5, ON) (push) Has been cancelled
CI / windows (x64, Release, win32-x86-64, x64, 2.28.5, ON) (push) Has been cancelled
CI / windows-blas (Win32, ON, Release, x86, 2.28.5, ON) (push) Has been cancelled
CI / windows-blas (x64, ON, Release, x64, 2.28.5, ON) (push) Has been cancelled
CI / windows-cublas (x64, Release, ON, 11.8.0, ON, 2.28.5) (push) Has been cancelled
CI / windows-cublas (x64, Release, ON, 12.2.0, ON, 2.28.5) (push) Has been cancelled
CI / emscripten (Release) (push) Has been cancelled
CI / ios-xcode-build (Release) (push) Has been cancelled
CI / android (push) Has been cancelled
CI / android_java (push) Has been cancelled
CI / bindings-java (push) Has been cancelled
CI / quantize (push) Has been cancelled
CI / release (push) Has been cancelled
CI / coreml-base-en (push) Has been cancelled
CI / vad (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/main-musa.Dockerfile platform:linux/amd64 tag:main-musa]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/main.Dockerfile platform:linux/amd64 tag:main]) (push) Has been cancelled
Examples WASM / deploy-wasm-github-pages (push) Has been cancelled
ggml-ci
277 lines
8.0 KiB
C++
277 lines
8.0 KiB
C++
#pragma once
|
|
|
|
#include "llama.h"
|
|
#include "llama-batch.h"
|
|
#include "llama-cparams.h"
|
|
#include "llama-graph.h"
|
|
#include "llama-adapter.h"
|
|
|
|
#include "ggml-cpp.h"
|
|
#include "ggml-opt.h"
|
|
|
|
#include <map>
|
|
#include <vector>
|
|
|
|
struct llama_model;
|
|
struct llama_kv_cache;
|
|
|
|
class llama_io_read_i;
|
|
class llama_io_write_i;
|
|
|
|
struct llama_context {
|
|
// init scheduler and compute buffers, reserve worst-case graphs
|
|
llama_context(
|
|
const llama_model & model,
|
|
llama_context_params params);
|
|
|
|
~llama_context();
|
|
|
|
void synchronize();
|
|
|
|
const llama_model & get_model() const;
|
|
const llama_cparams & get_cparams() const;
|
|
|
|
ggml_backend_sched_t get_sched() const;
|
|
|
|
ggml_context * get_ctx_compute() const;
|
|
|
|
uint32_t n_ctx() const;
|
|
uint32_t n_ctx_per_seq() const;
|
|
uint32_t n_batch() const;
|
|
uint32_t n_ubatch() const;
|
|
uint32_t n_seq_max() const;
|
|
|
|
uint32_t n_threads() const;
|
|
uint32_t n_threads_batch() const;
|
|
|
|
llama_kv_cache * get_kv_self();
|
|
const llama_kv_cache * get_kv_self() const;
|
|
|
|
void kv_self_update();
|
|
|
|
enum llama_pooling_type pooling_type() const;
|
|
|
|
float * get_logits();
|
|
float * get_logits_ith(int32_t i);
|
|
|
|
float * get_embeddings();
|
|
float * get_embeddings_ith(int32_t i);
|
|
float * get_embeddings_seq(llama_seq_id seq_id);
|
|
|
|
void attach_threadpool(
|
|
ggml_threadpool_t threadpool,
|
|
ggml_threadpool_t threadpool_batch);
|
|
|
|
void detach_threadpool();
|
|
|
|
void set_n_threads(int32_t n_threads, int32_t n_threads_batch);
|
|
|
|
void set_abort_callback(bool (*abort_callback)(void * data), void * abort_callback_data);
|
|
|
|
void set_embeddings (bool value);
|
|
void set_causal_attn(bool value);
|
|
void set_warmup(bool value);
|
|
|
|
void set_adapter_lora(
|
|
llama_adapter_lora * adapter,
|
|
float scale);
|
|
|
|
bool rm_adapter_lora(
|
|
llama_adapter_lora * adapter);
|
|
|
|
void clear_adapter_lora();
|
|
|
|
bool apply_adapter_cvec(
|
|
const float * data,
|
|
size_t len,
|
|
int32_t n_embd,
|
|
int32_t il_start,
|
|
int32_t il_end);
|
|
|
|
int encode(llama_batch & inp_batch);
|
|
int decode(llama_batch & inp_batch);
|
|
|
|
//
|
|
// state save/load
|
|
//
|
|
|
|
size_t state_get_size();
|
|
size_t state_get_data( uint8_t * dst, size_t size);
|
|
size_t state_set_data(const uint8_t * src, size_t size);
|
|
|
|
size_t state_seq_get_size(llama_seq_id seq_id);
|
|
size_t state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size);
|
|
size_t state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size);
|
|
|
|
bool state_load_file(
|
|
const char * filepath,
|
|
llama_token * tokens_out,
|
|
size_t n_token_capacity,
|
|
size_t * n_token_count_out);
|
|
|
|
bool state_save_file(
|
|
const char * filepath,
|
|
const llama_token * tokens,
|
|
size_t n_token_count);
|
|
|
|
size_t state_seq_load_file(
|
|
llama_seq_id seq_id,
|
|
const char * filepath,
|
|
llama_token * tokens_out,
|
|
size_t n_token_capacity,
|
|
size_t * n_token_count_out);
|
|
|
|
size_t state_seq_save_file(
|
|
llama_seq_id seq_id,
|
|
const char * filepath,
|
|
const llama_token * tokens,
|
|
size_t n_token_count);
|
|
|
|
//
|
|
// perf
|
|
//
|
|
|
|
llama_perf_context_data perf_get_data() const;
|
|
void perf_reset();
|
|
|
|
//
|
|
// training
|
|
//
|
|
|
|
void opt_init(struct llama_model * model, struct llama_opt_params lopt_params);
|
|
|
|
void opt_epoch(
|
|
ggml_opt_dataset_t dataset,
|
|
ggml_opt_result_t result_train,
|
|
ggml_opt_result_t result_eval,
|
|
int64_t idata_split,
|
|
ggml_opt_epoch_callback callback_train,
|
|
ggml_opt_epoch_callback callback_eval);
|
|
|
|
void opt_epoch_iter(
|
|
ggml_opt_dataset_t dataset,
|
|
ggml_opt_result_t result,
|
|
const std::vector<llama_token> & tokens,
|
|
const std::vector<llama_token> & labels_sparse,
|
|
llama_batch & batch,
|
|
ggml_opt_epoch_callback callback,
|
|
bool train,
|
|
int64_t idata_in_loop,
|
|
int64_t ndata_in_loop,
|
|
int64_t t_loop_start);
|
|
|
|
private:
|
|
//
|
|
// output
|
|
//
|
|
|
|
// Make sure enough space is available for outputs.
|
|
// Returns max number of outputs for which space was reserved.
|
|
int32_t output_reserve(int32_t n_outputs);
|
|
|
|
//
|
|
// graph
|
|
//
|
|
|
|
public:
|
|
int32_t graph_max_nodes() const;
|
|
|
|
// zero-out inputs and create the ctx_compute for the compute graph
|
|
ggml_cgraph * graph_init();
|
|
|
|
// returns the result of ggml_backend_sched_graph_compute_async execution
|
|
ggml_status graph_compute(
|
|
ggml_cgraph * gf,
|
|
bool batched);
|
|
|
|
private:
|
|
llm_graph_result_ptr graph_build(
|
|
ggml_context * ctx,
|
|
ggml_cgraph * gf,
|
|
const llama_ubatch & ubatch,
|
|
llm_graph_type gtype);
|
|
|
|
llm_graph_cb graph_get_cb() const;
|
|
|
|
// TODO: read/write lora adapters and cvec
|
|
size_t state_write_data(llama_io_write_i & io);
|
|
size_t state_read_data (llama_io_read_i & io);
|
|
|
|
size_t state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id);
|
|
size_t state_seq_read_data (llama_io_read_i & io, llama_seq_id seq_id);
|
|
|
|
//
|
|
// members
|
|
//
|
|
|
|
const llama_model & model;
|
|
|
|
llama_cparams cparams;
|
|
llama_adapter_cvec cvec;
|
|
llama_adapter_loras loras;
|
|
|
|
llama_cross cross; // TODO: tmp for handling cross-attention - need something better probably
|
|
|
|
std::unique_ptr<llama_memory_i> memory;
|
|
|
|
// decode output (2-dimensional array: [n_outputs][n_vocab])
|
|
size_t logits_size = 0; // capacity (of floats) for logits
|
|
float * logits = nullptr;
|
|
|
|
// embeddings output (2-dimensional array: [n_outputs][n_embd])
|
|
// populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
|
|
size_t embd_size = 0; // capacity (of floats) for embeddings
|
|
float * embd = nullptr;
|
|
|
|
// sequence embeddings output (map of [n_embd] vectors)
|
|
// populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
|
|
std::map<llama_seq_id, std::vector<float>> embd_seq;
|
|
|
|
int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
|
|
int32_t n_outputs_max = 0; // capacity (of tokens positions) for the output buffers
|
|
|
|
std::vector<int32_t> output_ids; // map batch token positions to ids of the logits and embd buffers
|
|
|
|
ggml_backend_sched_ptr sched;
|
|
|
|
ggml_backend_t backend_cpu = nullptr;
|
|
std::vector<ggml_backend_ptr> backends;
|
|
|
|
ggml_context_ptr ctx_compute;
|
|
|
|
// training
|
|
ggml_opt_context_t opt_ctx = nullptr;
|
|
|
|
ggml_threadpool_t threadpool = nullptr;
|
|
ggml_threadpool_t threadpool_batch = nullptr;
|
|
|
|
ggml_abort_callback abort_callback = nullptr;
|
|
void * abort_callback_data = nullptr;
|
|
|
|
std::vector<std::pair<ggml_backend_t, ggml_backend_set_n_threads_t>> set_n_threads_fns;
|
|
|
|
// buffer types used for the compute buffer of each backend
|
|
std::vector<ggml_backend_t> backend_ptrs;
|
|
std::vector<ggml_backend_buffer_type_t> backend_buft;
|
|
|
|
// memory buffers used to evaluate the model
|
|
std::vector<uint8_t> buf_compute_meta;
|
|
|
|
// host buffer for the model output (logits and embeddings)
|
|
ggml_backend_buffer_ptr buf_output;
|
|
|
|
bool has_evaluated_once = false;
|
|
|
|
// perf
|
|
mutable int64_t t_start_us = 0;
|
|
mutable int64_t t_load_us = 0;
|
|
mutable int64_t t_p_eval_us = 0;
|
|
mutable int64_t t_eval_us = 0;
|
|
|
|
mutable int64_t t_compute_start_us = 0;
|
|
mutable int64_t n_queued_tokens = 0;
|
|
|
|
mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
|
|
mutable int32_t n_eval = 0; // number of eval calls
|
|
};
|