mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-06-08 10:11:32 +00:00
Some checks failed
Bindings Tests (Ruby) / ubuntu-22 (push) Has been cancelled
CI / determine-tag (push) Has been cancelled
CI / ubuntu-22 (linux/amd64) (push) Has been cancelled
CI / ubuntu-22 (linux/ppc64le) (push) Has been cancelled
CI / ubuntu-22-arm64 (linux/arm64) (push) Has been cancelled
CI / ubuntu-22-arm-v7 (linux/arm/v7) (push) Has been cancelled
CI / macOS-latest (generic/platform=iOS) (push) Has been cancelled
CI / macOS-latest (generic/platform=macOS) (push) Has been cancelled
CI / macOS-latest (generic/platform=tvOS) (push) Has been cancelled
CI / ubuntu-22-gcc (linux/amd64, Debug) (push) Has been cancelled
CI / ubuntu-22-gcc (linux/amd64, Release) (push) Has been cancelled
CI / ubuntu-22-gcc (linux/ppc64le, Debug) (push) Has been cancelled
CI / ubuntu-22-gcc (linux/ppc64le, Release) (push) Has been cancelled
CI / ubuntu-22-gcc-arm64 (linux/arm64, Debug) (push) Has been cancelled
CI / ubuntu-22-gcc-arm64 (linux/arm64, Release) (push) Has been cancelled
CI / ubuntu-22-gcc-arm-v7 (linux/arm/v7, Debug) (push) Has been cancelled
CI / ubuntu-22-gcc-arm-v7 (linux/arm/v7, Release) (push) Has been cancelled
CI / ubuntu-22-clang (linux/amd64, Debug) (push) Has been cancelled
CI / ubuntu-22-clang (linux/amd64, Release) (push) Has been cancelled
CI / ubuntu-22-clang (linux/arm64, Debug) (push) Has been cancelled
CI / ubuntu-22-clang (linux/arm64, Release) (push) Has been cancelled
CI / ubuntu-22-clang (linux/ppc64le, Debug) (push) Has been cancelled
CI / ubuntu-22-clang (linux/ppc64le, Release) (push) Has been cancelled
CI / ubuntu-22-gcc-sanitized (linux/amd64, ADDRESS) (push) Has been cancelled
CI / ubuntu-22-gcc-sanitized (linux/amd64, THREAD) (push) Has been cancelled
CI / ubuntu-22-gcc-sanitized (linux/amd64, UNDEFINED) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl (linux/amd64, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl (linux/arm/v7, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl (linux/arm64, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl (linux/ppc64le, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl-fp16 (linux/amd64, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl-fp16 (linux/arm/v7, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl-fp16 (linux/arm64, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl-fp16 (linux/ppc64le, icx, icpx, ON) (push) Has been cancelled
CI / windows-msys2 (Release, clang-x86_64, CLANG64) (push) Has been cancelled
CI / windows-msys2 (Release, ucrt-x86_64, UCRT64) (push) Has been cancelled
CI / windows (Win32, Release, win32-x86, x86, 2.28.5, ON) (push) Has been cancelled
CI / windows (x64, Release, win32-x86-64, x64, 2.28.5, ON) (push) Has been cancelled
CI / windows-blas (Win32, ON, x86, 0.3.29, Release, x86, 2.28.5, ON) (push) Has been cancelled
CI / windows-blas (x64, ON, x64_64, 0.3.29, Release, x64, 2.28.5, ON) (push) Has been cancelled
CI / windows-cublas (x64, Release, ON, 11.8.0, ON, 2.28.5) (push) Has been cancelled
CI / windows-cublas (x64, Release, ON, 12.2.0, ON, 2.28.5) (push) Has been cancelled
CI / emscripten (Release) (push) Has been cancelled
CI / ios-xcode-build (Release) (push) Has been cancelled
CI / android (push) Has been cancelled
CI / android_java (push) Has been cancelled
CI / bindings-java (push) Has been cancelled
CI / quantize (push) Has been cancelled
CI / release (push) Has been cancelled
CI / coreml-base-en (push) Has been cancelled
CI / vad (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/main-musa.Dockerfile platform:linux/amd64 tag:main-musa]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/main.Dockerfile platform:linux/amd64 tag:main]) (push) Has been cancelled
Examples WASM / deploy-wasm-github-pages (push) Has been cancelled
ggml-ci
77 lines
2.3 KiB
C++
77 lines
2.3 KiB
C++
#pragma once
|
|
|
|
#include "llama.h"
|
|
|
|
#include <memory>
|
|
#include <vector>
|
|
|
|
struct llama_ubatch;
|
|
|
|
struct llama_memory_params {
|
|
// kv cache
|
|
ggml_type type_k;
|
|
ggml_type type_v;
|
|
|
|
// use full-size SWA cache
|
|
bool swa_full;
|
|
};
|
|
|
|
// general concept of LLM memory
|
|
// the KV cache is a type of LLM memory, but there can be other types
|
|
class llama_memory_i {
|
|
public:
|
|
virtual ~llama_memory_i() = default;
|
|
|
|
virtual void clear() = 0;
|
|
|
|
virtual bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) = 0;
|
|
virtual void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) = 0;
|
|
virtual void seq_keep(llama_seq_id seq_id) = 0;
|
|
virtual void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) = 0;
|
|
virtual void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) = 0;
|
|
|
|
virtual llama_pos seq_pos_min(llama_seq_id seq_id) const = 0;
|
|
virtual llama_pos seq_pos_max(llama_seq_id seq_id) const = 0;
|
|
|
|
virtual bool get_can_edit() const = 0;
|
|
};
|
|
|
|
enum llama_memory_status {
|
|
LLAMA_MEMORY_STATUS_SUCCESS = 0,
|
|
LLAMA_MEMORY_STATUS_FAILED_PREPARE,
|
|
LLAMA_MEMORY_STATUS_FAILED_COMPUTE,
|
|
};
|
|
|
|
// the interface for managing the memory state during batch processing
|
|
// this interface is implemented per memory type. see:
|
|
// - llama_kv_cache_unified_state
|
|
// - llama_kv_cache_unified_iswa_state
|
|
// ...
|
|
//
|
|
// the only method that can mutate the memory and the memory state is llama_memory_i::apply()
|
|
//
|
|
// TODO: rename to llama_memory_context_i ?
|
|
class llama_memory_state_i {
|
|
public:
|
|
virtual ~llama_memory_state_i() = default;
|
|
|
|
// consume the current ubatch from the state and proceed to the next one
|
|
// return false if we are done
|
|
virtual bool next() = 0;
|
|
|
|
// apply the memory state for the current ubatch to the memory object
|
|
// return false on failure
|
|
virtual bool apply() = 0;
|
|
|
|
// TODO: this might get reworked in the future when refactoring llama_batch
|
|
virtual std::vector<int64_t> & out_ids() = 0;
|
|
|
|
// get the current ubatch
|
|
virtual const llama_ubatch & get_ubatch() const = 0;
|
|
|
|
// get the status of the memory state
|
|
virtual llama_memory_status get_status() const = 0;
|
|
};
|
|
|
|
using llama_memory_state_ptr = std::unique_ptr<llama_memory_state_i>;
|