mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2025-06-14 21:08:07 +00:00
@ -12,9 +12,12 @@ if (WHISPER_SDL2)
|
|||||||
llama-context.cpp
|
llama-context.cpp
|
||||||
llama-cparams.cpp
|
llama-cparams.cpp
|
||||||
llama-grammar.cpp
|
llama-grammar.cpp
|
||||||
|
llama-graph.cpp
|
||||||
llama-hparams.cpp
|
llama-hparams.cpp
|
||||||
llama-impl.cpp
|
llama-impl.cpp
|
||||||
|
llama-io.cpp
|
||||||
llama-kv-cache.cpp
|
llama-kv-cache.cpp
|
||||||
|
llama-memory.cpp
|
||||||
llama-mmap.cpp
|
llama-mmap.cpp
|
||||||
llama-model-loader.cpp
|
llama-model-loader.cpp
|
||||||
llama-model.cpp
|
llama-model.cpp
|
||||||
|
@ -4,14 +4,13 @@
|
|||||||
#include "llama-mmap.h"
|
#include "llama-mmap.h"
|
||||||
#include "llama-model.h"
|
#include "llama-model.h"
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
|
|
||||||
// vec
|
// vec
|
||||||
|
|
||||||
struct ggml_tensor * llama_adapter_cvec::tensor_for(int il) const {
|
ggml_tensor * llama_adapter_cvec::tensor_for(int il) const {
|
||||||
if (il < 0 || il < layer_start || il > layer_end || (size_t) il >= tensors.size()) {
|
if (il < 0 || il < layer_start || il > layer_end || (size_t) il >= tensors.size()) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
@ -19,7 +18,7 @@ struct ggml_tensor * llama_adapter_cvec::tensor_for(int il) const {
|
|||||||
return tensors[il];
|
return tensors[il];
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * llama_adapter_cvec::apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const {
|
ggml_tensor * llama_adapter_cvec::apply_to(ggml_context * ctx, ggml_tensor * cur, int il) const {
|
||||||
ggml_tensor * layer_dir = tensor_for(il);
|
ggml_tensor * layer_dir = tensor_for(il);
|
||||||
if (layer_dir != nullptr) {
|
if (layer_dir != nullptr) {
|
||||||
cur = ggml_add(ctx, cur, layer_dir);
|
cur = ggml_add(ctx, cur, layer_dir);
|
||||||
@ -40,7 +39,7 @@ bool llama_adapter_cvec::init(const llama_model & model) {
|
|||||||
auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
|
auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
|
||||||
auto it = ctx_map.find(buft);
|
auto it = ctx_map.find(buft);
|
||||||
if (it == ctx_map.end()) {
|
if (it == ctx_map.end()) {
|
||||||
struct ggml_init_params params = {
|
ggml_init_params params = {
|
||||||
/*.mem_size =*/ hparams.n_layer*ggml_tensor_overhead(),
|
/*.mem_size =*/ hparams.n_layer*ggml_tensor_overhead(),
|
||||||
/*.mem_buffer =*/ NULL,
|
/*.mem_buffer =*/ NULL,
|
||||||
/*.no_alloc =*/ true,
|
/*.no_alloc =*/ true,
|
||||||
@ -91,7 +90,7 @@ bool llama_adapter_cvec::init(const llama_model & model) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t llama_adapter_cvec::apply(
|
bool llama_adapter_cvec::apply(
|
||||||
const llama_model & model,
|
const llama_model & model,
|
||||||
const float * data,
|
const float * data,
|
||||||
size_t len,
|
size_t len,
|
||||||
@ -104,17 +103,17 @@ int32_t llama_adapter_cvec::apply(
|
|||||||
// disable the current control vector (but leave allocated for later)
|
// disable the current control vector (but leave allocated for later)
|
||||||
layer_start = -1;
|
layer_start = -1;
|
||||||
layer_end = -1;
|
layer_end = -1;
|
||||||
return 0;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (n_embd != (int) hparams.n_embd) {
|
if (n_embd != (int) hparams.n_embd) {
|
||||||
LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
|
LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
|
||||||
return 1;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tensors.empty()) {
|
if (tensors.empty()) {
|
||||||
if (!init(model)) {
|
if (!init(model)) {
|
||||||
return 1;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -130,12 +129,12 @@ int32_t llama_adapter_cvec::apply(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// lora
|
// lora
|
||||||
|
|
||||||
llama_adapter_lora_weight * llama_adapter_lora::get_weight(struct ggml_tensor * w) {
|
llama_adapter_lora_weight * llama_adapter_lora::get_weight(ggml_tensor * w) {
|
||||||
const std::string name(w->name);
|
const std::string name(w->name);
|
||||||
|
|
||||||
const auto pos = ab_map.find(name);
|
const auto pos = ab_map.find(name);
|
||||||
@ -146,11 +145,11 @@ llama_adapter_lora_weight * llama_adapter_lora::get_weight(struct ggml_tensor *
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void llama_adapter_lora_init_impl(struct llama_model & model, const char * path_lora, struct llama_adapter_lora & adapter) {
|
static void llama_adapter_lora_init_impl(llama_model & model, const char * path_lora, llama_adapter_lora & adapter) {
|
||||||
LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
|
LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
|
||||||
|
|
||||||
ggml_context * ctx_init;
|
ggml_context * ctx_init;
|
||||||
struct gguf_init_params meta_gguf_params = {
|
gguf_init_params meta_gguf_params = {
|
||||||
/* .no_alloc = */ true,
|
/* .no_alloc = */ true,
|
||||||
/* .ctx = */ &ctx_init,
|
/* .ctx = */ &ctx_init,
|
||||||
};
|
};
|
||||||
@ -201,7 +200,7 @@ static void llama_adapter_lora_init_impl(struct llama_model & model, const char
|
|||||||
auto it = ctx_map.find(buft);
|
auto it = ctx_map.find(buft);
|
||||||
if (it == ctx_map.end()) {
|
if (it == ctx_map.end()) {
|
||||||
// add a new context
|
// add a new context
|
||||||
struct ggml_init_params params = {
|
ggml_init_params params = {
|
||||||
/*.mem_size =*/ n_tensors*ggml_tensor_overhead(),
|
/*.mem_size =*/ n_tensors*ggml_tensor_overhead(),
|
||||||
/*.mem_buffer =*/ NULL,
|
/*.mem_buffer =*/ NULL,
|
||||||
/*.no_alloc =*/ true,
|
/*.no_alloc =*/ true,
|
||||||
@ -248,6 +247,26 @@ static void llama_adapter_lora_init_impl(struct llama_model & model, const char
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// get extra buffer types of the CPU
|
||||||
|
// TODO: a more general solution for non-CPU extra buft should be imlpemented in the future
|
||||||
|
// ref: https://github.com/ggml-org/llama.cpp/pull/12593#pullrequestreview-2718659948
|
||||||
|
std::vector<ggml_backend_buffer_type_t> buft_extra;
|
||||||
|
{
|
||||||
|
auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
|
||||||
|
auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev);
|
||||||
|
|
||||||
|
auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t)
|
||||||
|
ggml_backend_reg_get_proc_address(cpu_reg, "ggml_backend_dev_get_extra_bufts");
|
||||||
|
|
||||||
|
if (ggml_backend_dev_get_extra_bufts_fn) {
|
||||||
|
ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(cpu_dev);
|
||||||
|
while (extra_bufts && *extra_bufts) {
|
||||||
|
buft_extra.emplace_back(*extra_bufts);
|
||||||
|
++extra_bufts;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// add tensors
|
// add tensors
|
||||||
for (auto & it : ab_map) {
|
for (auto & it : ab_map) {
|
||||||
const std::string & name = it.first;
|
const std::string & name = it.first;
|
||||||
@ -264,7 +283,23 @@ static void llama_adapter_lora_init_impl(struct llama_model & model, const char
|
|||||||
throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model (hint: maybe wrong base model?)");
|
throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model (hint: maybe wrong base model?)");
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_context * dev_ctx = ctx_for_buft(ggml_backend_buffer_get_type(model_tensor->buffer));
|
auto * buft = ggml_backend_buffer_get_type(model_tensor->buffer);
|
||||||
|
|
||||||
|
// do not load loras to extra buffer types (i.e. bufts for repacking) -> use the CPU in that case
|
||||||
|
for (auto & ex : buft_extra) {
|
||||||
|
if (ex == buft) {
|
||||||
|
LLAMA_LOG_WARN("%s: lora for '%s' cannot use buft '%s', fallback to CPU\n", __func__, model_tensor->name, ggml_backend_buft_name(buft));
|
||||||
|
|
||||||
|
auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
|
||||||
|
buft = ggml_backend_dev_buffer_type(cpu_dev);
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LLAMA_LOG_DEBUG("%s: lora for '%s' -> '%s'\n", __func__, model_tensor->name, ggml_backend_buft_name(buft));
|
||||||
|
|
||||||
|
ggml_context * dev_ctx = ctx_for_buft(buft);
|
||||||
// validate tensor shape
|
// validate tensor shape
|
||||||
if (is_token_embd) {
|
if (is_token_embd) {
|
||||||
// expect B to be non-transposed, A and B are flipped; see llm_build_inp_embd()
|
// expect B to be non-transposed, A and B are flipped; see llm_build_inp_embd()
|
||||||
@ -281,8 +316,8 @@ static void llama_adapter_lora_init_impl(struct llama_model & model, const char
|
|||||||
}
|
}
|
||||||
|
|
||||||
// save tensor to adapter
|
// save tensor to adapter
|
||||||
struct ggml_tensor * tensor_a = ggml_dup_tensor(dev_ctx, w.a);
|
ggml_tensor * tensor_a = ggml_dup_tensor(dev_ctx, w.a);
|
||||||
struct ggml_tensor * tensor_b = ggml_dup_tensor(dev_ctx, w.b);
|
ggml_tensor * tensor_b = ggml_dup_tensor(dev_ctx, w.b);
|
||||||
ggml_set_name(tensor_a, w.a->name);
|
ggml_set_name(tensor_a, w.a->name);
|
||||||
ggml_set_name(tensor_b, w.b->name);
|
ggml_set_name(tensor_b, w.b->name);
|
||||||
adapter.ab_map[name] = llama_adapter_lora_weight(tensor_a, tensor_b);
|
adapter.ab_map[name] = llama_adapter_lora_weight(tensor_a, tensor_b);
|
||||||
@ -308,7 +343,7 @@ static void llama_adapter_lora_init_impl(struct llama_model & model, const char
|
|||||||
{
|
{
|
||||||
llama_file gguf_file(path_lora, "rb");
|
llama_file gguf_file(path_lora, "rb");
|
||||||
std::vector<uint8_t> read_buf;
|
std::vector<uint8_t> read_buf;
|
||||||
auto set_tensor = [&](struct ggml_tensor * orig, struct ggml_tensor * dev) {
|
auto set_tensor = [&](ggml_tensor * orig, ggml_tensor * dev) {
|
||||||
size_t offs = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), gguf_find_tensor(ctx_gguf.get(), orig->name));
|
size_t offs = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), gguf_find_tensor(ctx_gguf.get(), orig->name));
|
||||||
size_t size = ggml_nbytes(orig);
|
size_t size = ggml_nbytes(orig);
|
||||||
read_buf.resize(size);
|
read_buf.resize(size);
|
||||||
@ -327,8 +362,8 @@ static void llama_adapter_lora_init_impl(struct llama_model & model, const char
|
|||||||
LLAMA_LOG_INFO("%s: loaded %zu tensors from lora file\n", __func__, adapter.ab_map.size()*2);
|
LLAMA_LOG_INFO("%s: loaded %zu tensors from lora file\n", __func__, adapter.ab_map.size()*2);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct llama_adapter_lora * llama_adapter_lora_init(struct llama_model * model, const char * path_lora) {
|
llama_adapter_lora * llama_adapter_lora_init(llama_model * model, const char * path_lora) {
|
||||||
struct llama_adapter_lora * adapter = new llama_adapter_lora();
|
llama_adapter_lora * adapter = new llama_adapter_lora();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
llama_adapter_lora_init_impl(*model, path_lora, *adapter);
|
llama_adapter_lora_init_impl(*model, path_lora, *adapter);
|
||||||
@ -342,6 +377,6 @@ struct llama_adapter_lora * llama_adapter_lora_init(struct llama_model * model,
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void llama_adapter_lora_free(struct llama_adapter_lora * adapter) {
|
void llama_adapter_lora_free(llama_adapter_lora * adapter) {
|
||||||
delete adapter;
|
delete adapter;
|
||||||
}
|
}
|
||||||
|
@ -15,11 +15,11 @@
|
|||||||
//
|
//
|
||||||
|
|
||||||
struct llama_adapter_cvec {
|
struct llama_adapter_cvec {
|
||||||
struct ggml_tensor * tensor_for(int il) const;
|
ggml_tensor * tensor_for(int il) const;
|
||||||
|
|
||||||
struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const;
|
ggml_tensor * apply_to(ggml_context * ctx, ggml_tensor * cur, int il) const;
|
||||||
|
|
||||||
int32_t apply(
|
bool apply(
|
||||||
const llama_model & model,
|
const llama_model & model,
|
||||||
const float * data,
|
const float * data,
|
||||||
size_t len,
|
size_t len,
|
||||||
@ -36,7 +36,7 @@ private:
|
|||||||
std::vector<ggml_context_ptr> ctxs;
|
std::vector<ggml_context_ptr> ctxs;
|
||||||
std::vector<ggml_backend_buffer_ptr> bufs;
|
std::vector<ggml_backend_buffer_ptr> bufs;
|
||||||
|
|
||||||
std::vector<struct ggml_tensor *> tensors; // per layer
|
std::vector<ggml_tensor *> tensors; // per layer
|
||||||
};
|
};
|
||||||
|
|
||||||
//
|
//
|
||||||
@ -44,8 +44,8 @@ private:
|
|||||||
//
|
//
|
||||||
|
|
||||||
struct llama_adapter_lora_weight {
|
struct llama_adapter_lora_weight {
|
||||||
struct ggml_tensor * a = nullptr;
|
ggml_tensor * a = nullptr;
|
||||||
struct ggml_tensor * b = nullptr;
|
ggml_tensor * b = nullptr;
|
||||||
|
|
||||||
// get actual scale based on rank and alpha
|
// get actual scale based on rank and alpha
|
||||||
float get_scale(float alpha, float adapter_scale) const {
|
float get_scale(float alpha, float adapter_scale) const {
|
||||||
@ -55,12 +55,12 @@ struct llama_adapter_lora_weight {
|
|||||||
}
|
}
|
||||||
|
|
||||||
llama_adapter_lora_weight() = default;
|
llama_adapter_lora_weight() = default;
|
||||||
llama_adapter_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b) : a(a), b(b) {}
|
llama_adapter_lora_weight(ggml_tensor * a, ggml_tensor * b) : a(a), b(b) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct llama_adapter_lora {
|
struct llama_adapter_lora {
|
||||||
// map tensor name to lora_a_b
|
// map tensor name to lora_a_b
|
||||||
std::unordered_map<std::string, struct llama_adapter_lora_weight> ab_map;
|
std::unordered_map<std::string, llama_adapter_lora_weight> ab_map;
|
||||||
|
|
||||||
std::vector<ggml_context_ptr> ctxs;
|
std::vector<ggml_context_ptr> ctxs;
|
||||||
std::vector<ggml_backend_buffer_ptr> bufs;
|
std::vector<ggml_backend_buffer_ptr> bufs;
|
||||||
@ -70,5 +70,7 @@ struct llama_adapter_lora {
|
|||||||
llama_adapter_lora() = default;
|
llama_adapter_lora() = default;
|
||||||
~llama_adapter_lora() = default;
|
~llama_adapter_lora() = default;
|
||||||
|
|
||||||
llama_adapter_lora_weight * get_weight(struct ggml_tensor * w);
|
llama_adapter_lora_weight * get_weight(ggml_tensor * w);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
using llama_adapter_loras = std::unordered_map<llama_adapter_lora *, float>;
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
|
static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
|
||||||
{ LLM_ARCH_LLAMA, "llama" },
|
{ LLM_ARCH_LLAMA, "llama" },
|
||||||
|
{ LLM_ARCH_LLAMA4, "llama4" },
|
||||||
{ LLM_ARCH_DECI, "deci" },
|
{ LLM_ARCH_DECI, "deci" },
|
||||||
{ LLM_ARCH_FALCON, "falcon" },
|
{ LLM_ARCH_FALCON, "falcon" },
|
||||||
{ LLM_ARCH_GROK, "grok" },
|
{ LLM_ARCH_GROK, "grok" },
|
||||||
@ -25,6 +26,8 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
|
|||||||
{ LLM_ARCH_QWEN2, "qwen2" },
|
{ LLM_ARCH_QWEN2, "qwen2" },
|
||||||
{ LLM_ARCH_QWEN2MOE, "qwen2moe" },
|
{ LLM_ARCH_QWEN2MOE, "qwen2moe" },
|
||||||
{ LLM_ARCH_QWEN2VL, "qwen2vl" },
|
{ LLM_ARCH_QWEN2VL, "qwen2vl" },
|
||||||
|
{ LLM_ARCH_QWEN3, "qwen3" },
|
||||||
|
{ LLM_ARCH_QWEN3MOE, "qwen3moe" },
|
||||||
{ LLM_ARCH_PHI2, "phi2" },
|
{ LLM_ARCH_PHI2, "phi2" },
|
||||||
{ LLM_ARCH_PHI3, "phi3" },
|
{ LLM_ARCH_PHI3, "phi3" },
|
||||||
{ LLM_ARCH_PHIMOE, "phimoe" },
|
{ LLM_ARCH_PHIMOE, "phimoe" },
|
||||||
@ -36,6 +39,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
|
|||||||
{ LLM_ARCH_MINICPM3, "minicpm3" },
|
{ LLM_ARCH_MINICPM3, "minicpm3" },
|
||||||
{ LLM_ARCH_GEMMA, "gemma" },
|
{ LLM_ARCH_GEMMA, "gemma" },
|
||||||
{ LLM_ARCH_GEMMA2, "gemma2" },
|
{ LLM_ARCH_GEMMA2, "gemma2" },
|
||||||
|
{ LLM_ARCH_GEMMA3, "gemma3" },
|
||||||
{ LLM_ARCH_STARCODER2, "starcoder2" },
|
{ LLM_ARCH_STARCODER2, "starcoder2" },
|
||||||
{ LLM_ARCH_MAMBA, "mamba" },
|
{ LLM_ARCH_MAMBA, "mamba" },
|
||||||
{ LLM_ARCH_XVERSE, "xverse" },
|
{ LLM_ARCH_XVERSE, "xverse" },
|
||||||
@ -50,6 +54,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
|
|||||||
{ LLM_ARCH_DEEPSEEK, "deepseek" },
|
{ LLM_ARCH_DEEPSEEK, "deepseek" },
|
||||||
{ LLM_ARCH_DEEPSEEK2, "deepseek2" },
|
{ LLM_ARCH_DEEPSEEK2, "deepseek2" },
|
||||||
{ LLM_ARCH_CHATGLM, "chatglm" },
|
{ LLM_ARCH_CHATGLM, "chatglm" },
|
||||||
|
{ LLM_ARCH_GLM4, "glm4" },
|
||||||
{ LLM_ARCH_BITNET, "bitnet" },
|
{ LLM_ARCH_BITNET, "bitnet" },
|
||||||
{ LLM_ARCH_T5, "t5" },
|
{ LLM_ARCH_T5, "t5" },
|
||||||
{ LLM_ARCH_T5ENCODER, "t5encoder" },
|
{ LLM_ARCH_T5ENCODER, "t5encoder" },
|
||||||
@ -58,10 +63,14 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
|
|||||||
{ LLM_ARCH_EXAONE, "exaone" },
|
{ LLM_ARCH_EXAONE, "exaone" },
|
||||||
{ LLM_ARCH_RWKV6, "rwkv6" },
|
{ LLM_ARCH_RWKV6, "rwkv6" },
|
||||||
{ LLM_ARCH_RWKV6QWEN2, "rwkv6qwen2" },
|
{ LLM_ARCH_RWKV6QWEN2, "rwkv6qwen2" },
|
||||||
|
{ LLM_ARCH_RWKV7, "rwkv7" },
|
||||||
|
{ LLM_ARCH_ARWKV7, "arwkv7" },
|
||||||
{ LLM_ARCH_GRANITE, "granite" },
|
{ LLM_ARCH_GRANITE, "granite" },
|
||||||
{ LLM_ARCH_GRANITE_MOE, "granitemoe" },
|
{ LLM_ARCH_GRANITE_MOE, "granitemoe" },
|
||||||
{ LLM_ARCH_CHAMELEON, "chameleon" },
|
{ LLM_ARCH_CHAMELEON, "chameleon" },
|
||||||
{ LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" },
|
{ LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" },
|
||||||
|
{ LLM_ARCH_PLM, "plm" },
|
||||||
|
{ LLM_ARCH_BAILINGMOE, "bailingmoe" },
|
||||||
{ LLM_ARCH_UNKNOWN, "(unknown)" },
|
{ LLM_ARCH_UNKNOWN, "(unknown)" },
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -70,6 +79,7 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
|
|||||||
{ LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" },
|
{ LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" },
|
||||||
{ LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" },
|
{ LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" },
|
||||||
{ LLM_KV_GENERAL_ALIGNMENT, "general.alignment" },
|
{ LLM_KV_GENERAL_ALIGNMENT, "general.alignment" },
|
||||||
|
{ LLM_KV_GENERAL_FILE_TYPE, "general.file_type" },
|
||||||
{ LLM_KV_GENERAL_NAME, "general.name" },
|
{ LLM_KV_GENERAL_NAME, "general.name" },
|
||||||
{ LLM_KV_GENERAL_AUTHOR, "general.author" },
|
{ LLM_KV_GENERAL_AUTHOR, "general.author" },
|
||||||
{ LLM_KV_GENERAL_VERSION, "general.version" },
|
{ LLM_KV_GENERAL_VERSION, "general.version" },
|
||||||
@ -108,6 +118,7 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
|
|||||||
{ LLM_KV_RESIDUAL_SCALE, "%s.residual_scale" },
|
{ LLM_KV_RESIDUAL_SCALE, "%s.residual_scale" },
|
||||||
{ LLM_KV_EMBEDDING_SCALE, "%s.embedding_scale" },
|
{ LLM_KV_EMBEDDING_SCALE, "%s.embedding_scale" },
|
||||||
{ LLM_KV_TOKEN_SHIFT_COUNT, "%s.token_shift_count" },
|
{ LLM_KV_TOKEN_SHIFT_COUNT, "%s.token_shift_count" },
|
||||||
|
{ LLM_KV_INTERLEAVE_MOE_LAYER_STEP, "%s.interleave_moe_layer_step" },
|
||||||
|
|
||||||
{ LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
|
{ LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
|
||||||
{ LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
|
{ LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
|
||||||
@ -122,9 +133,15 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
|
|||||||
{ LLM_KV_ATTENTION_CAUSAL, "%s.attention.causal" },
|
{ LLM_KV_ATTENTION_CAUSAL, "%s.attention.causal" },
|
||||||
{ LLM_KV_ATTENTION_Q_LORA_RANK, "%s.attention.q_lora_rank" },
|
{ LLM_KV_ATTENTION_Q_LORA_RANK, "%s.attention.q_lora_rank" },
|
||||||
{ LLM_KV_ATTENTION_KV_LORA_RANK, "%s.attention.kv_lora_rank" },
|
{ LLM_KV_ATTENTION_KV_LORA_RANK, "%s.attention.kv_lora_rank" },
|
||||||
|
{ LLM_KV_ATTENTION_DECAY_LORA_RANK, "%s.attention.decay_lora_rank" },
|
||||||
|
{ LLM_KV_ATTENTION_ICLR_LORA_RANK, "%s.attention.iclr_lora_rank" },
|
||||||
|
{ LLM_KV_ATTENTION_VALUE_RESIDUAL_MIX_LORA_RANK, "%s.attention.value_residual_mix_lora_rank" },
|
||||||
|
{ LLM_KV_ATTENTION_GATE_LORA_RANK, "%s.attention.gate_lora_rank" },
|
||||||
{ LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, "%s.attention.relative_buckets_count" },
|
{ LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, "%s.attention.relative_buckets_count" },
|
||||||
{ LLM_KV_ATTENTION_SLIDING_WINDOW, "%s.attention.sliding_window" },
|
{ LLM_KV_ATTENTION_SLIDING_WINDOW, "%s.attention.sliding_window" },
|
||||||
{ LLM_KV_ATTENTION_SCALE, "%s.attention.scale" },
|
{ LLM_KV_ATTENTION_SCALE, "%s.attention.scale" },
|
||||||
|
{ LLM_KV_ATTENTION_KEY_LENGTH_MLA, "%s.attention.key_length_mla" },
|
||||||
|
{ LLM_KV_ATTENTION_VALUE_LENGTH_MLA, "%s.attention.value_length_mla" },
|
||||||
|
|
||||||
{ LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
|
{ LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
|
||||||
{ LLM_KV_ROPE_DIMENSION_SECTIONS, "%s.rope.dimension_sections" },
|
{ LLM_KV_ROPE_DIMENSION_SECTIONS, "%s.rope.dimension_sections" },
|
||||||
@ -223,6 +240,35 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
|
|||||||
{ LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
|
{ LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
LLM_ARCH_LLAMA4,
|
||||||
|
{
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||||
|
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
||||||
|
{ LLM_TENSOR_OUTPUT, "output" },
|
||||||
|
{ LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
|
||||||
|
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
||||||
|
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
||||||
|
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
||||||
|
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||||
|
{ LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
|
||||||
|
{ LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
|
||||||
|
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
||||||
|
{ LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
|
||||||
|
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||||
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||||
|
{ LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
|
||||||
|
{ LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
|
||||||
|
{ LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
|
||||||
|
{ LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
|
||||||
|
{ LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
|
||||||
|
{ LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
|
||||||
|
{ LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
|
||||||
|
{ LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
|
||||||
|
{ LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
LLM_ARCH_DECI,
|
LLM_ARCH_DECI,
|
||||||
{
|
{
|
||||||
@ -554,6 +600,45 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
|
|||||||
{ LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
|
{ LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
LLM_ARCH_QWEN3,
|
||||||
|
{
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||||
|
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
||||||
|
{ LLM_TENSOR_OUTPUT, "output" },
|
||||||
|
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
||||||
|
{ LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
||||||
|
{ LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
||||||
|
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||||
|
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
||||||
|
{ LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
|
||||||
|
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||||
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
LLM_ARCH_QWEN3MOE,
|
||||||
|
{
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||||
|
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
||||||
|
{ LLM_TENSOR_OUTPUT, "output" },
|
||||||
|
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
||||||
|
{ LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
||||||
|
{ LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
||||||
|
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||||
|
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
||||||
|
{ LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
|
||||||
|
{ LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
|
||||||
|
{ LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
|
||||||
|
{ LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
LLM_ARCH_PHI2,
|
LLM_ARCH_PHI2,
|
||||||
{
|
{
|
||||||
@ -766,6 +851,27 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
|
|||||||
{ LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
|
{ LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
LLM_ARCH_GEMMA3,
|
||||||
|
{
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||||
|
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
||||||
|
{ LLM_TENSOR_OUTPUT, "output" },
|
||||||
|
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
||||||
|
{ LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
||||||
|
{ LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
||||||
|
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||||
|
{ LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
|
||||||
|
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
||||||
|
{ LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
|
||||||
|
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||||
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||||
|
{ LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
LLM_ARCH_STARCODER2,
|
LLM_ARCH_STARCODER2,
|
||||||
{
|
{
|
||||||
@ -999,6 +1105,8 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
|
|||||||
{ LLM_TENSOR_ATTN_Q_B, "blk.%d.attn_q_b" },
|
{ LLM_TENSOR_ATTN_Q_B, "blk.%d.attn_q_b" },
|
||||||
{ LLM_TENSOR_ATTN_KV_A_MQA, "blk.%d.attn_kv_a_mqa" },
|
{ LLM_TENSOR_ATTN_KV_A_MQA, "blk.%d.attn_kv_a_mqa" },
|
||||||
{ LLM_TENSOR_ATTN_KV_B, "blk.%d.attn_kv_b" },
|
{ LLM_TENSOR_ATTN_KV_B, "blk.%d.attn_kv_b" },
|
||||||
|
{ LLM_TENSOR_ATTN_K_B, "blk.%d.attn_k_b" },
|
||||||
|
{ LLM_TENSOR_ATTN_V_B, "blk.%d.attn_v_b" },
|
||||||
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||||
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
||||||
{ LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
|
{ LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
|
||||||
@ -1015,6 +1123,22 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
|
|||||||
{ LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" },
|
{ LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" },
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
LLM_ARCH_PLM,
|
||||||
|
{
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||||
|
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
||||||
|
{ LLM_TENSOR_ATTN_KV_A_MQA, "blk.%d.attn_kv_a_mqa" },
|
||||||
|
{ LLM_TENSOR_ATTN_KV_A_NORM, "blk.%d.attn_kv_a_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_KV_B, "blk.%d.attn_kv_b" },
|
||||||
|
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||||
|
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
||||||
|
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||||
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
LLM_ARCH_CHATGLM,
|
LLM_ARCH_CHATGLM,
|
||||||
{
|
{
|
||||||
@ -1033,6 +1157,25 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
|
|||||||
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
LLM_ARCH_GLM4,
|
||||||
|
{
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||||
|
{ LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
|
||||||
|
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
||||||
|
{ LLM_TENSOR_OUTPUT, "output" },
|
||||||
|
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
||||||
|
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
||||||
|
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
||||||
|
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||||
|
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
||||||
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||||
|
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||||
|
{ LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
|
||||||
|
{ LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
LLM_ARCH_BITNET,
|
LLM_ARCH_BITNET,
|
||||||
{
|
{
|
||||||
@ -1217,6 +1360,74 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
|
|||||||
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
LLM_ARCH_RWKV7,
|
||||||
|
{
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
|
||||||
|
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
||||||
|
{ LLM_TENSOR_OUTPUT, "output" },
|
||||||
|
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_W0, "blk.%d.time_mix_w0" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_W1, "blk.%d.time_mix_w1" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_W2, "blk.%d.time_mix_w2" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_A0, "blk.%d.time_mix_a0" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_A1, "blk.%d.time_mix_a1" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_A2, "blk.%d.time_mix_a2" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_V0, "blk.%d.time_mix_v0" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_V1, "blk.%d.time_mix_v1" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_V2, "blk.%d.time_mix_v2" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_G1, "blk.%d.time_mix_g1" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_G2, "blk.%d.time_mix_g2" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_K_K, "blk.%d.time_mix_k_k" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_K_A, "blk.%d.time_mix_k_a" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_R_K, "blk.%d.time_mix_r_k" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_LERP_FUSED, "blk.%d.time_mix_lerp_fused" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_KEY, "blk.%d.time_mix_key" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_VALUE, "blk.%d.time_mix_value" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix_receptance" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_LN, "blk.%d.time_mix_ln" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix_output" },
|
||||||
|
{ LLM_TENSOR_CHANNEL_MIX_LERP_K, "blk.%d.channel_mix_lerp_k" },
|
||||||
|
{ LLM_TENSOR_CHANNEL_MIX_KEY, "blk.%d.channel_mix_key" },
|
||||||
|
{ LLM_TENSOR_CHANNEL_MIX_VALUE, "blk.%d.channel_mix_value" },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
LLM_ARCH_ARWKV7,
|
||||||
|
{
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
|
||||||
|
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
||||||
|
{ LLM_TENSOR_OUTPUT, "output" },
|
||||||
|
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_W0, "blk.%d.time_mix_w0" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_W1, "blk.%d.time_mix_w1" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_W2, "blk.%d.time_mix_w2" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_A0, "blk.%d.time_mix_a0" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_A1, "blk.%d.time_mix_a1" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_A2, "blk.%d.time_mix_a2" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_V0, "blk.%d.time_mix_v0" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_V1, "blk.%d.time_mix_v1" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_V2, "blk.%d.time_mix_v2" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_G1, "blk.%d.time_mix_g1" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_G2, "blk.%d.time_mix_g2" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_K_K, "blk.%d.time_mix_k_k" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_K_A, "blk.%d.time_mix_k_a" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_R_K, "blk.%d.time_mix_r_k" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_LERP_FUSED, "blk.%d.time_mix_lerp_fused" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_KEY, "blk.%d.time_mix_key" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_VALUE, "blk.%d.time_mix_value" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix_receptance" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_LN, "blk.%d.time_mix_ln" },
|
||||||
|
{ LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix_output" },
|
||||||
|
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
||||||
|
{ LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
|
||||||
|
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||||
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
LLM_ARCH_GRANITE,
|
LLM_ARCH_GRANITE,
|
||||||
{
|
{
|
||||||
@ -1296,6 +1507,29 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
|
|||||||
{ LLM_TENSOR_POS_NET_ATTN_OUT, "posnet.%d.attn_output" },
|
{ LLM_TENSOR_POS_NET_ATTN_OUT, "posnet.%d.attn_output" },
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
LLM_ARCH_BAILINGMOE,
|
||||||
|
{
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||||
|
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
||||||
|
{ LLM_TENSOR_OUTPUT, "output" },
|
||||||
|
{ LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
|
||||||
|
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
||||||
|
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
||||||
|
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
||||||
|
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||||
|
{ LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
|
||||||
|
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
||||||
|
{ LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
|
||||||
|
{ LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
|
||||||
|
{ LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
|
||||||
|
{ LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
|
||||||
|
{ LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
|
||||||
|
{ LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
|
||||||
|
{ LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
LLM_ARCH_UNKNOWN,
|
LLM_ARCH_UNKNOWN,
|
||||||
{
|
{
|
||||||
@ -1333,23 +1567,8 @@ static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = {
|
|||||||
{LLM_TENSOR_ATTN_Q_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
{LLM_TENSOR_ATTN_Q_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
{LLM_TENSOR_ATTN_KV_A_MQA, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
{LLM_TENSOR_ATTN_KV_A_MQA, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
{LLM_TENSOR_ATTN_KV_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
{LLM_TENSOR_ATTN_KV_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
{LLM_TENSOR_DEC_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
{LLM_TENSOR_ATTN_K_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
{LLM_TENSOR_DEC_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
{LLM_TENSOR_ATTN_V_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
{LLM_TENSOR_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
|
||||||
{LLM_TENSOR_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
|
||||||
{LLM_TENSOR_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
|
||||||
{LLM_TENSOR_ATTN_QKV, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
|
||||||
{LLM_TENSOR_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
|
||||||
{LLM_TENSOR_FFN_GATE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
|
||||||
{LLM_TENSOR_FFN_DOWN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
|
||||||
{LLM_TENSOR_FFN_UP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
|
||||||
{LLM_TENSOR_FFN_DOWN_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
|
||||||
{LLM_TENSOR_FFN_GATE_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
|
||||||
{LLM_TENSOR_FFN_UP_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
|
||||||
{LLM_TENSOR_ATTN_Q_A, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
|
||||||
{LLM_TENSOR_ATTN_Q_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
|
||||||
{LLM_TENSOR_ATTN_KV_A_MQA, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
|
||||||
{LLM_TENSOR_ATTN_KV_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
|
||||||
{LLM_TENSOR_DEC_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
{LLM_TENSOR_DEC_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
{LLM_TENSOR_DEC_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
{LLM_TENSOR_DEC_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
{LLM_TENSOR_DEC_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
{LLM_TENSOR_DEC_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
@ -1376,6 +1595,12 @@ static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = {
|
|||||||
{LLM_TENSOR_SSM_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
{LLM_TENSOR_SSM_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
{LLM_TENSOR_TIME_MIX_W1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
{LLM_TENSOR_TIME_MIX_W1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
{LLM_TENSOR_TIME_MIX_W2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
{LLM_TENSOR_TIME_MIX_W2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
|
{LLM_TENSOR_TIME_MIX_A1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
|
{LLM_TENSOR_TIME_MIX_A2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
|
{LLM_TENSOR_TIME_MIX_V1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
|
{LLM_TENSOR_TIME_MIX_V2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
|
{LLM_TENSOR_TIME_MIX_G1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
|
{LLM_TENSOR_TIME_MIX_G2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
{LLM_TENSOR_TIME_MIX_DECAY_W1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
{LLM_TENSOR_TIME_MIX_DECAY_W1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
{LLM_TENSOR_TIME_MIX_DECAY_W2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
{LLM_TENSOR_TIME_MIX_DECAY_W2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
{LLM_TENSOR_TIME_MIX_KEY, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
{LLM_TENSOR_TIME_MIX_KEY, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
|
||||||
@ -1394,6 +1619,9 @@ static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = {
|
|||||||
{LLM_TENSOR_TIME_MIX_LN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
|
{LLM_TENSOR_TIME_MIX_LN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
|
||||||
{LLM_TENSOR_CHANNEL_MIX_LERP_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
|
{LLM_TENSOR_CHANNEL_MIX_LERP_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
|
||||||
{LLM_TENSOR_CHANNEL_MIX_LERP_R, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
|
{LLM_TENSOR_CHANNEL_MIX_LERP_R, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
|
||||||
|
{LLM_TENSOR_TIME_MIX_K_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
|
||||||
|
{LLM_TENSOR_TIME_MIX_K_A, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
|
||||||
|
{LLM_TENSOR_TIME_MIX_R_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
|
||||||
{LLM_TENSOR_TIME_MIX_LERP_W, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
|
{LLM_TENSOR_TIME_MIX_LERP_W, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
|
||||||
{LLM_TENSOR_TIME_MIX_LERP_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
|
{LLM_TENSOR_TIME_MIX_LERP_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
|
||||||
{LLM_TENSOR_TIME_MIX_LERP_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
|
{LLM_TENSOR_TIME_MIX_LERP_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
|
||||||
@ -1401,6 +1629,9 @@ static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = {
|
|||||||
{LLM_TENSOR_TIME_MIX_LERP_G, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
|
{LLM_TENSOR_TIME_MIX_LERP_G, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
|
||||||
{LLM_TENSOR_TIME_MIX_LERP_FUSED, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
|
{LLM_TENSOR_TIME_MIX_LERP_FUSED, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
|
||||||
{LLM_TENSOR_TIME_MIX_DECAY, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
|
{LLM_TENSOR_TIME_MIX_DECAY, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
|
||||||
|
{LLM_TENSOR_TIME_MIX_W0, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
|
||||||
|
{LLM_TENSOR_TIME_MIX_A0, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
|
||||||
|
{LLM_TENSOR_TIME_MIX_V0, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
|
||||||
{LLM_TENSOR_TIME_MIX_FIRST, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_RWKV_WKV6}},
|
{LLM_TENSOR_TIME_MIX_FIRST, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_RWKV_WKV6}},
|
||||||
{LLM_TENSOR_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
|
{LLM_TENSOR_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
|
||||||
{LLM_TENSOR_ATTN_NORM_2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
|
{LLM_TENSOR_ATTN_NORM_2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
|
|
||||||
enum llm_arch {
|
enum llm_arch {
|
||||||
LLM_ARCH_LLAMA,
|
LLM_ARCH_LLAMA,
|
||||||
|
LLM_ARCH_LLAMA4,
|
||||||
LLM_ARCH_DECI,
|
LLM_ARCH_DECI,
|
||||||
LLM_ARCH_FALCON,
|
LLM_ARCH_FALCON,
|
||||||
LLM_ARCH_BAICHUAN,
|
LLM_ARCH_BAICHUAN,
|
||||||
@ -29,6 +30,8 @@ enum llm_arch {
|
|||||||
LLM_ARCH_QWEN2,
|
LLM_ARCH_QWEN2,
|
||||||
LLM_ARCH_QWEN2MOE,
|
LLM_ARCH_QWEN2MOE,
|
||||||
LLM_ARCH_QWEN2VL,
|
LLM_ARCH_QWEN2VL,
|
||||||
|
LLM_ARCH_QWEN3,
|
||||||
|
LLM_ARCH_QWEN3MOE,
|
||||||
LLM_ARCH_PHI2,
|
LLM_ARCH_PHI2,
|
||||||
LLM_ARCH_PHI3,
|
LLM_ARCH_PHI3,
|
||||||
LLM_ARCH_PHIMOE,
|
LLM_ARCH_PHIMOE,
|
||||||
@ -40,6 +43,7 @@ enum llm_arch {
|
|||||||
LLM_ARCH_MINICPM3,
|
LLM_ARCH_MINICPM3,
|
||||||
LLM_ARCH_GEMMA,
|
LLM_ARCH_GEMMA,
|
||||||
LLM_ARCH_GEMMA2,
|
LLM_ARCH_GEMMA2,
|
||||||
|
LLM_ARCH_GEMMA3,
|
||||||
LLM_ARCH_STARCODER2,
|
LLM_ARCH_STARCODER2,
|
||||||
LLM_ARCH_MAMBA,
|
LLM_ARCH_MAMBA,
|
||||||
LLM_ARCH_XVERSE,
|
LLM_ARCH_XVERSE,
|
||||||
@ -54,6 +58,7 @@ enum llm_arch {
|
|||||||
LLM_ARCH_DEEPSEEK,
|
LLM_ARCH_DEEPSEEK,
|
||||||
LLM_ARCH_DEEPSEEK2,
|
LLM_ARCH_DEEPSEEK2,
|
||||||
LLM_ARCH_CHATGLM,
|
LLM_ARCH_CHATGLM,
|
||||||
|
LLM_ARCH_GLM4,
|
||||||
LLM_ARCH_BITNET,
|
LLM_ARCH_BITNET,
|
||||||
LLM_ARCH_T5,
|
LLM_ARCH_T5,
|
||||||
LLM_ARCH_T5ENCODER,
|
LLM_ARCH_T5ENCODER,
|
||||||
@ -62,10 +67,14 @@ enum llm_arch {
|
|||||||
LLM_ARCH_EXAONE,
|
LLM_ARCH_EXAONE,
|
||||||
LLM_ARCH_RWKV6,
|
LLM_ARCH_RWKV6,
|
||||||
LLM_ARCH_RWKV6QWEN2,
|
LLM_ARCH_RWKV6QWEN2,
|
||||||
|
LLM_ARCH_RWKV7,
|
||||||
|
LLM_ARCH_ARWKV7,
|
||||||
LLM_ARCH_GRANITE,
|
LLM_ARCH_GRANITE,
|
||||||
LLM_ARCH_GRANITE_MOE,
|
LLM_ARCH_GRANITE_MOE,
|
||||||
LLM_ARCH_CHAMELEON,
|
LLM_ARCH_CHAMELEON,
|
||||||
LLM_ARCH_WAVTOKENIZER_DEC,
|
LLM_ARCH_WAVTOKENIZER_DEC,
|
||||||
|
LLM_ARCH_PLM,
|
||||||
|
LLM_ARCH_BAILINGMOE,
|
||||||
LLM_ARCH_UNKNOWN,
|
LLM_ARCH_UNKNOWN,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -74,6 +83,7 @@ enum llm_kv {
|
|||||||
LLM_KV_GENERAL_ARCHITECTURE,
|
LLM_KV_GENERAL_ARCHITECTURE,
|
||||||
LLM_KV_GENERAL_QUANTIZATION_VERSION,
|
LLM_KV_GENERAL_QUANTIZATION_VERSION,
|
||||||
LLM_KV_GENERAL_ALIGNMENT,
|
LLM_KV_GENERAL_ALIGNMENT,
|
||||||
|
LLM_KV_GENERAL_FILE_TYPE,
|
||||||
LLM_KV_GENERAL_NAME,
|
LLM_KV_GENERAL_NAME,
|
||||||
LLM_KV_GENERAL_AUTHOR,
|
LLM_KV_GENERAL_AUTHOR,
|
||||||
LLM_KV_GENERAL_VERSION,
|
LLM_KV_GENERAL_VERSION,
|
||||||
@ -112,6 +122,7 @@ enum llm_kv {
|
|||||||
LLM_KV_RESIDUAL_SCALE,
|
LLM_KV_RESIDUAL_SCALE,
|
||||||
LLM_KV_EMBEDDING_SCALE,
|
LLM_KV_EMBEDDING_SCALE,
|
||||||
LLM_KV_TOKEN_SHIFT_COUNT,
|
LLM_KV_TOKEN_SHIFT_COUNT,
|
||||||
|
LLM_KV_INTERLEAVE_MOE_LAYER_STEP,
|
||||||
|
|
||||||
LLM_KV_ATTENTION_HEAD_COUNT,
|
LLM_KV_ATTENTION_HEAD_COUNT,
|
||||||
LLM_KV_ATTENTION_HEAD_COUNT_KV,
|
LLM_KV_ATTENTION_HEAD_COUNT_KV,
|
||||||
@ -126,9 +137,15 @@ enum llm_kv {
|
|||||||
LLM_KV_ATTENTION_CAUSAL,
|
LLM_KV_ATTENTION_CAUSAL,
|
||||||
LLM_KV_ATTENTION_Q_LORA_RANK,
|
LLM_KV_ATTENTION_Q_LORA_RANK,
|
||||||
LLM_KV_ATTENTION_KV_LORA_RANK,
|
LLM_KV_ATTENTION_KV_LORA_RANK,
|
||||||
|
LLM_KV_ATTENTION_DECAY_LORA_RANK,
|
||||||
|
LLM_KV_ATTENTION_ICLR_LORA_RANK,
|
||||||
|
LLM_KV_ATTENTION_VALUE_RESIDUAL_MIX_LORA_RANK,
|
||||||
|
LLM_KV_ATTENTION_GATE_LORA_RANK,
|
||||||
LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT,
|
LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT,
|
||||||
LLM_KV_ATTENTION_SLIDING_WINDOW,
|
LLM_KV_ATTENTION_SLIDING_WINDOW,
|
||||||
LLM_KV_ATTENTION_SCALE,
|
LLM_KV_ATTENTION_SCALE,
|
||||||
|
LLM_KV_ATTENTION_KEY_LENGTH_MLA,
|
||||||
|
LLM_KV_ATTENTION_VALUE_LENGTH_MLA,
|
||||||
|
|
||||||
LLM_KV_ROPE_DIMENSION_COUNT,
|
LLM_KV_ROPE_DIMENSION_COUNT,
|
||||||
LLM_KV_ROPE_DIMENSION_SECTIONS,
|
LLM_KV_ROPE_DIMENSION_SECTIONS,
|
||||||
@ -242,6 +259,8 @@ enum llm_tensor {
|
|||||||
LLM_TENSOR_ATTN_Q_NORM,
|
LLM_TENSOR_ATTN_Q_NORM,
|
||||||
LLM_TENSOR_ATTN_K_NORM,
|
LLM_TENSOR_ATTN_K_NORM,
|
||||||
LLM_TENSOR_LAYER_OUT_NORM,
|
LLM_TENSOR_LAYER_OUT_NORM,
|
||||||
|
LLM_TENSOR_POST_ATTN_NORM,
|
||||||
|
LLM_TENSOR_POST_MLP_NORM,
|
||||||
LLM_TENSOR_SSM_IN,
|
LLM_TENSOR_SSM_IN,
|
||||||
LLM_TENSOR_SSM_CONV1D,
|
LLM_TENSOR_SSM_CONV1D,
|
||||||
LLM_TENSOR_SSM_X,
|
LLM_TENSOR_SSM_X,
|
||||||
@ -249,8 +268,20 @@ enum llm_tensor {
|
|||||||
LLM_TENSOR_SSM_A,
|
LLM_TENSOR_SSM_A,
|
||||||
LLM_TENSOR_SSM_D,
|
LLM_TENSOR_SSM_D,
|
||||||
LLM_TENSOR_SSM_OUT,
|
LLM_TENSOR_SSM_OUT,
|
||||||
|
LLM_TENSOR_TIME_MIX_W0,
|
||||||
LLM_TENSOR_TIME_MIX_W1,
|
LLM_TENSOR_TIME_MIX_W1,
|
||||||
LLM_TENSOR_TIME_MIX_W2,
|
LLM_TENSOR_TIME_MIX_W2,
|
||||||
|
LLM_TENSOR_TIME_MIX_A0,
|
||||||
|
LLM_TENSOR_TIME_MIX_A1,
|
||||||
|
LLM_TENSOR_TIME_MIX_A2,
|
||||||
|
LLM_TENSOR_TIME_MIX_V0,
|
||||||
|
LLM_TENSOR_TIME_MIX_V1,
|
||||||
|
LLM_TENSOR_TIME_MIX_V2,
|
||||||
|
LLM_TENSOR_TIME_MIX_G1,
|
||||||
|
LLM_TENSOR_TIME_MIX_G2,
|
||||||
|
LLM_TENSOR_TIME_MIX_K_K,
|
||||||
|
LLM_TENSOR_TIME_MIX_K_A,
|
||||||
|
LLM_TENSOR_TIME_MIX_R_K,
|
||||||
LLM_TENSOR_TIME_MIX_LERP_X,
|
LLM_TENSOR_TIME_MIX_LERP_X,
|
||||||
LLM_TENSOR_TIME_MIX_LERP_W,
|
LLM_TENSOR_TIME_MIX_LERP_W,
|
||||||
LLM_TENSOR_TIME_MIX_LERP_K,
|
LLM_TENSOR_TIME_MIX_LERP_K,
|
||||||
@ -277,6 +308,8 @@ enum llm_tensor {
|
|||||||
LLM_TENSOR_ATTN_Q_B,
|
LLM_TENSOR_ATTN_Q_B,
|
||||||
LLM_TENSOR_ATTN_KV_A_MQA,
|
LLM_TENSOR_ATTN_KV_A_MQA,
|
||||||
LLM_TENSOR_ATTN_KV_B,
|
LLM_TENSOR_ATTN_KV_B,
|
||||||
|
LLM_TENSOR_ATTN_K_B,
|
||||||
|
LLM_TENSOR_ATTN_V_B,
|
||||||
LLM_TENSOR_ATTN_Q_A_NORM,
|
LLM_TENSOR_ATTN_Q_A_NORM,
|
||||||
LLM_TENSOR_ATTN_KV_A_NORM,
|
LLM_TENSOR_ATTN_KV_A_NORM,
|
||||||
LLM_TENSOR_ATTN_SUB_NORM,
|
LLM_TENSOR_ATTN_SUB_NORM,
|
||||||
|
@ -42,9 +42,9 @@ struct llama_sbatch {
|
|||||||
bool logits_all; // TODO: remove once lctx.logits_all is removed too
|
bool logits_all; // TODO: remove once lctx.logits_all is removed too
|
||||||
|
|
||||||
// sorted indices into the batch
|
// sorted indices into the batch
|
||||||
std::vector<size_t> ids;
|
std::vector<int64_t> ids;
|
||||||
// batch indices of the output
|
// batch indices of the output
|
||||||
std::vector<size_t> out_ids;
|
std::vector<int64_t> out_ids;
|
||||||
std::vector<llama_sbatch_seq> seq;
|
std::vector<llama_sbatch_seq> seq;
|
||||||
|
|
||||||
const llama_batch * batch = nullptr;
|
const llama_batch * batch = nullptr;
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
#if __cplusplus >= 202000L
|
#if __cplusplus >= 202000L
|
||||||
#define LU8(x) (const char*)(u8##x)
|
#define LU8(x) (const char*)(u8##x)
|
||||||
@ -58,6 +59,10 @@ static const std::map<std::string, llm_chat_template> LLM_CHAT_TEMPLATES = {
|
|||||||
{ "granite", LLM_CHAT_TEMPLATE_GRANITE },
|
{ "granite", LLM_CHAT_TEMPLATE_GRANITE },
|
||||||
{ "gigachat", LLM_CHAT_TEMPLATE_GIGACHAT },
|
{ "gigachat", LLM_CHAT_TEMPLATE_GIGACHAT },
|
||||||
{ "megrez", LLM_CHAT_TEMPLATE_MEGREZ },
|
{ "megrez", LLM_CHAT_TEMPLATE_MEGREZ },
|
||||||
|
{ "yandex", LLM_CHAT_TEMPLATE_YANDEX },
|
||||||
|
{ "bailing", LLM_CHAT_TEMPLATE_BAILING },
|
||||||
|
{ "llama4", LLM_CHAT_TEMPLATE_LLAMA4 },
|
||||||
|
{ "smolvlm", LLM_CHAT_TEMPLATE_SMOLVLM },
|
||||||
};
|
};
|
||||||
|
|
||||||
llm_chat_template llm_chat_template_from_str(const std::string & name) {
|
llm_chat_template llm_chat_template_from_str(const std::string & name) {
|
||||||
@ -77,6 +82,8 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
|
|||||||
if (tmpl_contains("<|im_start|>")) {
|
if (tmpl_contains("<|im_start|>")) {
|
||||||
return tmpl_contains("<|im_sep|>")
|
return tmpl_contains("<|im_sep|>")
|
||||||
? LLM_CHAT_TEMPLATE_PHI_4
|
? LLM_CHAT_TEMPLATE_PHI_4
|
||||||
|
: tmpl_contains("<end_of_utterance>")
|
||||||
|
? LLM_CHAT_TEMPLATE_SMOLVLM // SmolVLM uses <|im_start|> as BOS, but it is NOT chatml
|
||||||
: LLM_CHAT_TEMPLATE_CHATML;
|
: LLM_CHAT_TEMPLATE_CHATML;
|
||||||
} else if (tmpl.find("mistral") == 0 || tmpl_contains("[INST]")) {
|
} else if (tmpl.find("mistral") == 0 || tmpl_contains("[INST]")) {
|
||||||
if (tmpl_contains("[SYSTEM_PROMPT]")) {
|
if (tmpl_contains("[SYSTEM_PROMPT]")) {
|
||||||
@ -117,6 +124,8 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
|
|||||||
return LLM_CHAT_TEMPLATE_PHI_3;
|
return LLM_CHAT_TEMPLATE_PHI_3;
|
||||||
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) {
|
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) {
|
||||||
return tmpl_contains("</s>") ? LLM_CHAT_TEMPLATE_FALCON_3 : LLM_CHAT_TEMPLATE_GLMEDGE;
|
return tmpl_contains("</s>") ? LLM_CHAT_TEMPLATE_FALCON_3 : LLM_CHAT_TEMPLATE_GLMEDGE;
|
||||||
|
} else if (tmpl_contains("<|{{ item['role'] }}|>") && tmpl_contains("<|begin_of_image|>")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_GLMEDGE;
|
||||||
} else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) {
|
} else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) {
|
||||||
return LLM_CHAT_TEMPLATE_ZEPHYR;
|
return LLM_CHAT_TEMPLATE_ZEPHYR;
|
||||||
} else if (tmpl_contains("bos_token + message['role']")) {
|
} else if (tmpl_contains("bos_token + message['role']")) {
|
||||||
@ -167,6 +176,12 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
|
|||||||
return LLM_CHAT_TEMPLATE_GIGACHAT;
|
return LLM_CHAT_TEMPLATE_GIGACHAT;
|
||||||
} else if (tmpl_contains("<|role_start|>")) {
|
} else if (tmpl_contains("<|role_start|>")) {
|
||||||
return LLM_CHAT_TEMPLATE_MEGREZ;
|
return LLM_CHAT_TEMPLATE_MEGREZ;
|
||||||
|
} else if (tmpl_contains(" Ассистент:")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_YANDEX;
|
||||||
|
} else if (tmpl_contains("<role>ASSISTANT</role>") && tmpl_contains("'HUMAN'")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_BAILING;
|
||||||
|
} else if (tmpl_contains("<|header_start|>") && tmpl_contains("<|header_end|>")) {
|
||||||
|
return LLM_CHAT_TEMPLATE_LLAMA4;
|
||||||
}
|
}
|
||||||
return LLM_CHAT_TEMPLATE_UNKNOWN;
|
return LLM_CHAT_TEMPLATE_UNKNOWN;
|
||||||
}
|
}
|
||||||
@ -566,6 +581,66 @@ int32_t llm_chat_apply_template(
|
|||||||
if (add_ass) {
|
if (add_ass) {
|
||||||
ss << "<|role_start|>assistant<|role_end|>";
|
ss << "<|role_start|>assistant<|role_end|>";
|
||||||
}
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_YANDEX) {
|
||||||
|
// Yandex template ("\n\n" is defined as EOT token)
|
||||||
|
|
||||||
|
ss << "<s>";
|
||||||
|
|
||||||
|
for (size_t i = 0; i < chat.size(); i++) {
|
||||||
|
std::string role(chat[i]->role);
|
||||||
|
if (role == "user") {
|
||||||
|
ss << " Пользователь: " << chat[i]->content << "\n\n";
|
||||||
|
} else if (role == "assistant") {
|
||||||
|
ss << " Ассистент: " << chat[i]->content << "\n\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add generation prompt if needed
|
||||||
|
if (add_ass) {
|
||||||
|
ss << " Ассистент:[SEP]";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_BAILING) {
|
||||||
|
// Bailing (Ling) template
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
|
||||||
|
if (role == "user") {
|
||||||
|
role = "HUMAN";
|
||||||
|
} else {
|
||||||
|
std::transform(role.begin(), role.end(), role.begin(), ::toupper);
|
||||||
|
}
|
||||||
|
|
||||||
|
ss << "<role>" << role << "</role>" << message->content;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<role>ASSISTANT</role>";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_LLAMA4) {
|
||||||
|
// Llama 4
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
ss << "<|header_start|>" << role << "<|header_end|>\n\n" << trim(message->content) << "<|eot|>";
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<|header_start|>assistant<|header_end|>\n\n";
|
||||||
|
}
|
||||||
|
} else if (tmpl == LLM_CHAT_TEMPLATE_SMOLVLM) {
|
||||||
|
// SmolVLM
|
||||||
|
ss << "<|im_start|>"; // uses <|im_start|> as BOS, but the actual content is NOT chatml
|
||||||
|
for (auto message : chat) {
|
||||||
|
std::string role(message->role);
|
||||||
|
if (role == "system") {
|
||||||
|
ss << message->content << "\n\n";
|
||||||
|
} else if (role == "user") {
|
||||||
|
ss << "User: " << message->content << "<end_of_utterance>\n";
|
||||||
|
} else {
|
||||||
|
ss << "Assistant: " << message->content << "<end_of_utterance>\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "Assistant:";
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// template not supported
|
// template not supported
|
||||||
return -1;
|
return -1;
|
||||||
@ -584,4 +659,3 @@ int32_t llama_chat_builtin_templates(const char ** output, size_t len) {
|
|||||||
}
|
}
|
||||||
return (int32_t) LLM_CHAT_TEMPLATES.size();
|
return (int32_t) LLM_CHAT_TEMPLATES.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,6 +38,10 @@ enum llm_chat_template {
|
|||||||
LLM_CHAT_TEMPLATE_GRANITE,
|
LLM_CHAT_TEMPLATE_GRANITE,
|
||||||
LLM_CHAT_TEMPLATE_GIGACHAT,
|
LLM_CHAT_TEMPLATE_GIGACHAT,
|
||||||
LLM_CHAT_TEMPLATE_MEGREZ,
|
LLM_CHAT_TEMPLATE_MEGREZ,
|
||||||
|
LLM_CHAT_TEMPLATE_YANDEX,
|
||||||
|
LLM_CHAT_TEMPLATE_BAILING,
|
||||||
|
LLM_CHAT_TEMPLATE_LLAMA4,
|
||||||
|
LLM_CHAT_TEMPLATE_SMOLVLM,
|
||||||
LLM_CHAT_TEMPLATE_UNKNOWN,
|
LLM_CHAT_TEMPLATE_UNKNOWN,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -3,66 +3,213 @@
|
|||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
#include "llama-batch.h"
|
#include "llama-batch.h"
|
||||||
#include "llama-cparams.h"
|
#include "llama-cparams.h"
|
||||||
#include "llama-model.h"
|
#include "llama-graph.h"
|
||||||
#include "llama-kv-cache.h"
|
|
||||||
#include "llama-adapter.h"
|
#include "llama-adapter.h"
|
||||||
|
|
||||||
#include "ggml-cpp.h"
|
#include "ggml-cpp.h"
|
||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <unordered_map>
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <set>
|
|
||||||
|
struct llama_model;
|
||||||
|
struct llama_kv_cache;
|
||||||
|
|
||||||
|
class llama_io_read_i;
|
||||||
|
class llama_io_write_i;
|
||||||
|
|
||||||
struct llama_context {
|
struct llama_context {
|
||||||
llama_context(const llama_model & model)
|
// init scheduler and compute buffers, reserve worst-case graphs
|
||||||
: model(model)
|
llama_context(
|
||||||
, t_start_us(model.t_start_us)
|
const llama_model & model,
|
||||||
, t_load_us(model.t_load_us) {}
|
llama_context_params params);
|
||||||
|
|
||||||
const struct llama_model & model;
|
~llama_context();
|
||||||
|
|
||||||
struct llama_cparams cparams;
|
void synchronize();
|
||||||
struct llama_sbatch sbatch; // TODO: revisit if needed
|
|
||||||
struct llama_kv_cache kv_self;
|
|
||||||
struct llama_adapter_cvec cvec;
|
|
||||||
|
|
||||||
std::unordered_map<struct llama_adapter_lora *, float> lora;
|
const llama_model & get_model() const;
|
||||||
|
|
||||||
std::vector<ggml_backend_ptr> backends;
|
uint32_t n_ctx() const;
|
||||||
std::vector<std::pair<ggml_backend_t, ggml_backend_set_n_threads_t>> set_n_threads_fns;
|
uint32_t n_ctx_per_seq() const;
|
||||||
|
uint32_t n_batch() const;
|
||||||
|
uint32_t n_ubatch() const;
|
||||||
|
uint32_t n_seq_max() const;
|
||||||
|
|
||||||
ggml_backend_t backend_cpu = nullptr;
|
uint32_t n_threads() const;
|
||||||
|
uint32_t n_threads_batch() const;
|
||||||
|
|
||||||
ggml_threadpool_t threadpool = nullptr;
|
llama_kv_cache * get_kv_self();
|
||||||
ggml_threadpool_t threadpool_batch = nullptr;
|
const llama_kv_cache * get_kv_self() const;
|
||||||
|
|
||||||
bool has_evaluated_once = false;
|
void kv_self_update();
|
||||||
|
|
||||||
mutable int64_t t_start_us;
|
enum llama_pooling_type pooling_type() const;
|
||||||
mutable int64_t t_load_us;
|
|
||||||
mutable int64_t t_p_eval_us = 0;
|
|
||||||
mutable int64_t t_eval_us = 0;
|
|
||||||
|
|
||||||
mutable int64_t t_compute_start_us = 0;
|
float * get_logits();
|
||||||
mutable int64_t n_queued_tokens = 0;
|
float * get_logits_ith(int32_t i);
|
||||||
|
|
||||||
mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
|
float * get_embeddings();
|
||||||
mutable int32_t n_eval = 0; // number of eval calls
|
float * get_embeddings_ith(int32_t i);
|
||||||
|
float * get_embeddings_seq(llama_seq_id seq_id);
|
||||||
|
|
||||||
// host buffer for the model output (logits and embeddings)
|
void attach_threadpool(
|
||||||
ggml_backend_buffer_ptr buf_output;
|
ggml_threadpool_t threadpool,
|
||||||
|
ggml_threadpool_t threadpool_batch);
|
||||||
|
|
||||||
|
void detach_threadpool();
|
||||||
|
|
||||||
|
void set_n_threads(int32_t n_threads, int32_t n_threads_batch);
|
||||||
|
|
||||||
|
void set_abort_callback(bool (*abort_callback)(void * data), void * abort_callback_data);
|
||||||
|
|
||||||
|
void set_embeddings (bool value);
|
||||||
|
void set_causal_attn(bool value);
|
||||||
|
void set_warmup(bool value);
|
||||||
|
|
||||||
|
void set_adapter_lora(
|
||||||
|
llama_adapter_lora * adapter,
|
||||||
|
float scale);
|
||||||
|
|
||||||
|
bool rm_adapter_lora(
|
||||||
|
llama_adapter_lora * adapter);
|
||||||
|
|
||||||
|
void clear_adapter_lora();
|
||||||
|
|
||||||
|
bool apply_adapter_cvec(
|
||||||
|
const float * data,
|
||||||
|
size_t len,
|
||||||
|
int32_t n_embd,
|
||||||
|
int32_t il_start,
|
||||||
|
int32_t il_end);
|
||||||
|
|
||||||
|
int encode(llama_batch & inp_batch);
|
||||||
|
int decode(llama_batch & inp_batch);
|
||||||
|
|
||||||
|
//
|
||||||
|
// state save/load
|
||||||
|
//
|
||||||
|
|
||||||
|
size_t state_get_size();
|
||||||
|
size_t state_get_data( uint8_t * dst, size_t size);
|
||||||
|
size_t state_set_data(const uint8_t * src, size_t size);
|
||||||
|
|
||||||
|
size_t state_seq_get_size(llama_seq_id seq_id);
|
||||||
|
size_t state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size);
|
||||||
|
size_t state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size);
|
||||||
|
|
||||||
|
bool state_load_file(
|
||||||
|
const char * filepath,
|
||||||
|
llama_token * tokens_out,
|
||||||
|
size_t n_token_capacity,
|
||||||
|
size_t * n_token_count_out);
|
||||||
|
|
||||||
|
bool state_save_file(
|
||||||
|
const char * filepath,
|
||||||
|
const llama_token * tokens,
|
||||||
|
size_t n_token_count);
|
||||||
|
|
||||||
|
size_t state_seq_load_file(
|
||||||
|
llama_seq_id seq_id,
|
||||||
|
const char * filepath,
|
||||||
|
llama_token * tokens_out,
|
||||||
|
size_t n_token_capacity,
|
||||||
|
size_t * n_token_count_out);
|
||||||
|
|
||||||
|
size_t state_seq_save_file(
|
||||||
|
llama_seq_id seq_id,
|
||||||
|
const char * filepath,
|
||||||
|
const llama_token * tokens,
|
||||||
|
size_t n_token_count);
|
||||||
|
|
||||||
|
//
|
||||||
|
// perf
|
||||||
|
//
|
||||||
|
|
||||||
|
llama_perf_context_data perf_get_data() const;
|
||||||
|
void perf_reset();
|
||||||
|
|
||||||
|
private:
|
||||||
|
//
|
||||||
|
// output
|
||||||
|
//
|
||||||
|
|
||||||
|
// Make sure enough space is available for outputs.
|
||||||
|
// Returns max number of outputs for which space was reserved.
|
||||||
|
int32_t output_reserve(int32_t n_outputs);
|
||||||
|
|
||||||
|
// make the outputs have the same order they had in the user-provided batch
|
||||||
|
// TODO: maybe remove this
|
||||||
|
void output_reorder();
|
||||||
|
|
||||||
|
//
|
||||||
|
// graph
|
||||||
|
//
|
||||||
|
|
||||||
|
int32_t graph_max_nodes() const;
|
||||||
|
|
||||||
|
// zero-out inputs and create the ctx_compute for the compute graph
|
||||||
|
ggml_cgraph * graph_init();
|
||||||
|
|
||||||
|
llm_graph_result_ptr graph_build(
|
||||||
|
ggml_context * ctx,
|
||||||
|
ggml_cgraph * gf,
|
||||||
|
const llama_ubatch & ubatch,
|
||||||
|
llm_graph_type gtype);
|
||||||
|
|
||||||
|
// returns the result of ggml_backend_sched_graph_compute_async execution
|
||||||
|
ggml_status graph_compute(
|
||||||
|
ggml_cgraph * gf,
|
||||||
|
bool batched);
|
||||||
|
|
||||||
|
llm_graph_cb graph_get_cb() const;
|
||||||
|
|
||||||
|
// used by kv_self_update()
|
||||||
|
ggml_tensor * build_rope_shift(
|
||||||
|
ggml_context * ctx0,
|
||||||
|
ggml_tensor * cur,
|
||||||
|
ggml_tensor * shift,
|
||||||
|
ggml_tensor * factors,
|
||||||
|
float freq_base,
|
||||||
|
float freq_scale,
|
||||||
|
ggml_backend_buffer * bbuf) const;
|
||||||
|
|
||||||
|
llm_graph_result_ptr build_kv_self_shift(
|
||||||
|
ggml_context * ctx0,
|
||||||
|
ggml_cgraph * gf) const;
|
||||||
|
|
||||||
|
llm_graph_result_ptr build_kv_self_defrag(
|
||||||
|
ggml_context * ctx0,
|
||||||
|
ggml_cgraph * gf) const;
|
||||||
|
|
||||||
|
// TODO: read/write lora adapters and cvec
|
||||||
|
size_t state_write_data(llama_io_write_i & io);
|
||||||
|
size_t state_read_data (llama_io_read_i & io);
|
||||||
|
|
||||||
|
size_t state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id);
|
||||||
|
size_t state_seq_read_data (llama_io_read_i & io, llama_seq_id seq_id);
|
||||||
|
|
||||||
|
//
|
||||||
|
// members
|
||||||
|
//
|
||||||
|
|
||||||
|
const llama_model & model;
|
||||||
|
|
||||||
|
llama_cparams cparams;
|
||||||
|
llama_adapter_cvec cvec;
|
||||||
|
llama_adapter_loras loras;
|
||||||
|
llama_sbatch sbatch;
|
||||||
|
|
||||||
|
llama_cross cross; // TODO: tmp for handling cross-attention - need something better probably
|
||||||
|
|
||||||
|
std::unique_ptr<llama_kv_cache_unified> kv_self;
|
||||||
|
|
||||||
|
// TODO: remove
|
||||||
|
bool logits_all = false;
|
||||||
|
|
||||||
// decode output (2-dimensional array: [n_outputs][n_vocab])
|
// decode output (2-dimensional array: [n_outputs][n_vocab])
|
||||||
size_t logits_size = 0; // capacity (of floats) for logits
|
size_t logits_size = 0; // capacity (of floats) for logits
|
||||||
float * logits = nullptr;
|
float * logits = nullptr;
|
||||||
|
|
||||||
std::vector<int32_t> output_ids; // map batch token positions to ids of the logits and embd buffers
|
|
||||||
size_t output_size = 0; // capacity (of tokens positions) for the output buffers
|
|
||||||
int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
|
|
||||||
|
|
||||||
bool logits_all = false;
|
|
||||||
|
|
||||||
// embeddings output (2-dimensional array: [n_outputs][n_embd])
|
// embeddings output (2-dimensional array: [n_outputs][n_embd])
|
||||||
// populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
|
// populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
|
||||||
size_t embd_size = 0; // capacity (of floats) for embeddings
|
size_t embd_size = 0; // capacity (of floats) for embeddings
|
||||||
@ -72,57 +219,47 @@ struct llama_context {
|
|||||||
// populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
|
// populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
|
||||||
std::map<llama_seq_id, std::vector<float>> embd_seq;
|
std::map<llama_seq_id, std::vector<float>> embd_seq;
|
||||||
|
|
||||||
// whether we are computing encoder output or decoder output
|
int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
|
||||||
bool is_encoding = false;
|
int32_t n_outputs_max = 0; // capacity (of tokens positions) for the output buffers
|
||||||
|
|
||||||
// TODO: find a better way to accommodate mutli-dimension position encoding methods
|
std::vector<int32_t> output_ids; // map batch token positions to ids of the logits and embd buffers
|
||||||
// number of position id each token get, 1 for each token in most cases.
|
|
||||||
// when using m-rope, it will be 3 position ids per token to representing 3 dimension coordinate.
|
|
||||||
int n_pos_per_token = 1;
|
|
||||||
|
|
||||||
// output of the encoder part of the encoder-decoder models
|
|
||||||
std::vector<float> embd_enc;
|
|
||||||
std::vector<std::set<llama_seq_id>> seq_ids_enc;
|
|
||||||
|
|
||||||
// memory buffers used to evaluate the model
|
|
||||||
std::vector<uint8_t> buf_compute_meta;
|
|
||||||
ggml_backend_sched_ptr sched;
|
ggml_backend_sched_ptr sched;
|
||||||
|
|
||||||
|
ggml_backend_t backend_cpu = nullptr;
|
||||||
|
std::vector<ggml_backend_ptr> backends;
|
||||||
|
|
||||||
|
ggml_context_ptr ctx_compute;
|
||||||
|
|
||||||
|
ggml_threadpool_t threadpool = nullptr;
|
||||||
|
ggml_threadpool_t threadpool_batch = nullptr;
|
||||||
|
|
||||||
ggml_abort_callback abort_callback = nullptr;
|
ggml_abort_callback abort_callback = nullptr;
|
||||||
void * abort_callback_data = nullptr;
|
void * abort_callback_data = nullptr;
|
||||||
|
|
||||||
// input tensors
|
std::vector<std::pair<ggml_backend_t, ggml_backend_set_n_threads_t>> set_n_threads_fns;
|
||||||
struct ggml_tensor * inp_tokens; // I32 [n_batch]
|
|
||||||
struct ggml_tensor * inp_embd; // F32 [n_embd, n_batch]
|
// buffer types used for the compute buffer of each backend
|
||||||
struct ggml_tensor * inp_pos; // I32 [n_batch]
|
std::vector<ggml_backend_t> backend_ptrs;
|
||||||
struct ggml_tensor * inp_out_ids; // I32 [n_outputs]
|
std::vector<ggml_backend_buffer_type_t> backend_buft;
|
||||||
struct ggml_tensor * inp_KQ_mask; // F32 [kv_size, n_batch]
|
|
||||||
struct ggml_tensor * inp_KQ_mask_swa; // F32 [kv_size, n_batch]
|
// memory buffers used to evaluate the model
|
||||||
struct ggml_tensor * inp_K_shift; // I32 [kv_size]
|
std::vector<uint8_t> buf_compute_meta;
|
||||||
struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch]
|
|
||||||
struct ggml_tensor * inp_cls; // I32 [n_batch]
|
// host buffer for the model output (logits and embeddings)
|
||||||
struct ggml_tensor * inp_s_copy; // I32 [kv_size]
|
ggml_backend_buffer_ptr buf_output;
|
||||||
struct ggml_tensor * inp_s_mask; // F32 [1, n_kv]
|
|
||||||
struct ggml_tensor * inp_s_seq; // I32 [n_kv, n_batch]
|
bool has_evaluated_once = false;
|
||||||
struct ggml_tensor * inp_pos_bucket; // I32 [n_batch|n_kv, n_batch]
|
|
||||||
struct ggml_tensor * inp_embd_enc; // F32 [n_embd, n_outputs_enc]
|
// perf
|
||||||
struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch]
|
mutable int64_t t_start_us = 0;
|
||||||
|
mutable int64_t t_load_us = 0;
|
||||||
|
mutable int64_t t_p_eval_us = 0;
|
||||||
|
mutable int64_t t_eval_us = 0;
|
||||||
|
|
||||||
|
mutable int64_t t_compute_start_us = 0;
|
||||||
|
mutable int64_t n_queued_tokens = 0;
|
||||||
|
|
||||||
|
mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
|
||||||
|
mutable int32_t n_eval = 0; // number of eval calls
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO: make these methods of llama_context
|
|
||||||
void llama_set_k_shift(struct llama_context & lctx);
|
|
||||||
|
|
||||||
void llama_set_s_copy(struct llama_context & lctx);
|
|
||||||
|
|
||||||
void llama_set_inputs(llama_context & lctx, const llama_ubatch & ubatch);
|
|
||||||
|
|
||||||
// Make sure enough space is available for outputs.
|
|
||||||
// Returns max number of outputs for which space was reserved.
|
|
||||||
size_t llama_output_reserve(struct llama_context & lctx, size_t n_outputs);
|
|
||||||
|
|
||||||
// make the outputs have the same order they had in the user-provided batch
|
|
||||||
void llama_output_reorder(struct llama_context & ctx);
|
|
||||||
|
|
||||||
// For internal test use
|
|
||||||
// TODO: remove
|
|
||||||
const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(struct llama_context * ctx);
|
|
||||||
|
@ -29,6 +29,7 @@ struct llama_cparams {
|
|||||||
bool offload_kqv;
|
bool offload_kqv;
|
||||||
bool flash_attn;
|
bool flash_attn;
|
||||||
bool no_perf;
|
bool no_perf;
|
||||||
|
bool warmup;
|
||||||
|
|
||||||
enum llama_pooling_type pooling_type;
|
enum llama_pooling_type pooling_type;
|
||||||
|
|
||||||
|
@ -508,7 +508,7 @@ const char * llama_grammar_parser::parse_sequence(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return pos;
|
return pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char * llama_grammar_parser::parse_rule(const char * src) {
|
const char * llama_grammar_parser::parse_rule(const char * src) {
|
||||||
const char * name_end = parse_name(src);
|
const char * name_end = parse_name(src);
|
||||||
@ -532,7 +532,7 @@ const char * llama_grammar_parser::parse_rule(const char * src) {
|
|||||||
throw std::runtime_error(std::string("expecting newline or end at ") + pos);
|
throw std::runtime_error(std::string("expecting newline or end at ") + pos);
|
||||||
}
|
}
|
||||||
return parse_space(pos, true);
|
return parse_space(pos, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool llama_grammar_parser::parse(const char * src) {
|
bool llama_grammar_parser::parse(const char * src) {
|
||||||
try {
|
try {
|
||||||
@ -969,7 +969,7 @@ struct llama_grammar * llama_grammar_init_impl(
|
|||||||
/* .awaiting_trigger = */ false,
|
/* .awaiting_trigger = */ false,
|
||||||
/* .trigger_buffer = */ "",
|
/* .trigger_buffer = */ "",
|
||||||
/* .trigger_tokens = */ {},
|
/* .trigger_tokens = */ {},
|
||||||
/* .trigger_words = */ {},
|
/* .trigger_patterns = */ {},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -978,19 +978,15 @@ struct llama_grammar * llama_grammar_init_impl(
|
|||||||
const char * grammar_str,
|
const char * grammar_str,
|
||||||
const char * grammar_root,
|
const char * grammar_root,
|
||||||
bool lazy,
|
bool lazy,
|
||||||
const char ** trigger_words,
|
const char ** trigger_patterns,
|
||||||
size_t num_trigger_words,
|
size_t num_trigger_patterns,
|
||||||
const llama_token * trigger_tokens,
|
const llama_token * trigger_tokens,
|
||||||
size_t num_trigger_tokens) {
|
size_t num_trigger_tokens) {
|
||||||
llama_grammar_parser parser;
|
llama_grammar_parser parser;
|
||||||
|
|
||||||
// if there is a grammar, parse it
|
// if there is a grammar, parse it
|
||||||
if (!parser.parse(grammar_str)) {
|
// rules will be empty (default) if there are parse errors
|
||||||
return nullptr;
|
if (!parser.parse(grammar_str) || parser.rules.empty()) {
|
||||||
}
|
|
||||||
|
|
||||||
// will be empty (default) if there are parse errors
|
|
||||||
if (parser.rules.empty()) {
|
|
||||||
fprintf(stderr, "%s: failed to parse grammar\n", __func__);
|
fprintf(stderr, "%s: failed to parse grammar\n", __func__);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
@ -1054,14 +1050,16 @@ struct llama_grammar * llama_grammar_init_impl(
|
|||||||
} while (true);
|
} while (true);
|
||||||
|
|
||||||
std::vector<llama_token> vec_trigger_tokens;
|
std::vector<llama_token> vec_trigger_tokens;
|
||||||
std::vector<std::string> vec_trigger_words;
|
std::vector<llama_grammar_trigger_pattern> vec_trigger_patterns;
|
||||||
for (size_t i = 0; i < num_trigger_tokens; i++) {
|
for (size_t i = 0; i < num_trigger_tokens; i++) {
|
||||||
GGML_ASSERT(trigger_tokens != nullptr);
|
GGML_ASSERT(trigger_tokens != nullptr);
|
||||||
vec_trigger_tokens.push_back(trigger_tokens[i]);
|
vec_trigger_tokens.push_back(trigger_tokens[i]);
|
||||||
}
|
}
|
||||||
for (size_t i = 0; i < num_trigger_words; i++) {
|
for (size_t i = 0; i < num_trigger_patterns; i++) {
|
||||||
GGML_ASSERT(trigger_words != nullptr);
|
GGML_ASSERT(trigger_patterns != nullptr);
|
||||||
vec_trigger_words.push_back(trigger_words[i]);
|
auto & trigger = vec_trigger_patterns.emplace_back();
|
||||||
|
trigger.pattern = trigger_patterns[i];
|
||||||
|
trigger.regex = std::regex(trigger.pattern);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Important: vec_rules has to be moved here, not copied, because stacks contains
|
// Important: vec_rules has to be moved here, not copied, because stacks contains
|
||||||
@ -1076,7 +1074,7 @@ struct llama_grammar * llama_grammar_init_impl(
|
|||||||
/* .awaiting_trigger = */ lazy,
|
/* .awaiting_trigger = */ lazy,
|
||||||
/* .trigger_buffer = */ "",
|
/* .trigger_buffer = */ "",
|
||||||
std::move(vec_trigger_tokens),
|
std::move(vec_trigger_tokens),
|
||||||
std::move(vec_trigger_words),
|
std::move(vec_trigger_patterns),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1089,7 +1087,7 @@ void llama_grammar_free_impl(struct llama_grammar * grammar) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & grammar) {
|
struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & grammar) {
|
||||||
llama_grammar * result = new llama_grammar {
|
auto * result = new llama_grammar {
|
||||||
grammar.vocab,
|
grammar.vocab,
|
||||||
grammar.rules,
|
grammar.rules,
|
||||||
grammar.stacks,
|
grammar.stacks,
|
||||||
@ -1098,7 +1096,7 @@ struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & gra
|
|||||||
grammar.awaiting_trigger,
|
grammar.awaiting_trigger,
|
||||||
grammar.trigger_buffer,
|
grammar.trigger_buffer,
|
||||||
grammar.trigger_tokens,
|
grammar.trigger_tokens,
|
||||||
grammar.trigger_words,
|
grammar.trigger_patterns,
|
||||||
};
|
};
|
||||||
|
|
||||||
// redirect elements in stacks to point to new rules
|
// redirect elements in stacks to point to new rules
|
||||||
@ -1173,20 +1171,22 @@ void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token
|
|||||||
LLAMA_LOG_DEBUG("Grammar triggered on token %u (`%s`)", token, piece.c_str());
|
LLAMA_LOG_DEBUG("Grammar triggered on token %u (`%s`)", token, piece.c_str());
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
// TODO: consider a smarter incremental substring search algorithm (store last position to search from).
|
|
||||||
grammar.trigger_buffer += piece;
|
grammar.trigger_buffer += piece;
|
||||||
for (const auto & word : grammar.trigger_words) {
|
|
||||||
auto pos = grammar.trigger_buffer.find(word);
|
std::smatch match;
|
||||||
if (pos != std::string::npos) {
|
for (const auto & trigger_pattern : grammar.trigger_patterns) {
|
||||||
|
if (std::regex_match(grammar.trigger_buffer, match, trigger_pattern.regex)) {
|
||||||
grammar.awaiting_trigger = false;
|
grammar.awaiting_trigger = false;
|
||||||
auto constrained_str = grammar.trigger_buffer.substr(pos);
|
// get from the first match to the end of the string
|
||||||
|
auto constrained_str = grammar.trigger_buffer.substr(match.position(1));
|
||||||
|
// std::string constrained_str(match[1].first, grammar.trigger_buffer.end());
|
||||||
grammar.trigger_buffer.clear();
|
grammar.trigger_buffer.clear();
|
||||||
llama_grammar_accept_str(grammar, constrained_str);
|
llama_grammar_accept_str(grammar, constrained_str);
|
||||||
LLAMA_LOG_DEBUG("Grammar triggered on word `%s`", word.c_str());
|
LLAMA_LOG_DEBUG("Grammar triggered on regex: '%s'\n", constrained_str.c_str());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
LLAMA_LOG_DEBUG("Grammar still awaiting trigger after token %d (`%s`) (buffer: `%s`)\n", token, piece.c_str(), grammar.trigger_buffer.c_str());
|
LLAMA_LOG_DEBUG("Grammar still awaiting trigger after token %d (`%s`)\n", token, piece.c_str());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
|
#include <regex>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
@ -105,6 +106,11 @@ struct llama_grammar_parser {
|
|||||||
void print(FILE * file);
|
void print(FILE * file);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct llama_grammar_trigger_pattern {
|
||||||
|
std::string pattern;
|
||||||
|
std::regex regex;
|
||||||
|
};
|
||||||
|
|
||||||
struct llama_grammar {
|
struct llama_grammar {
|
||||||
// note: allow null vocab for testing (not great)
|
// note: allow null vocab for testing (not great)
|
||||||
const llama_vocab * vocab;
|
const llama_vocab * vocab;
|
||||||
@ -116,13 +122,16 @@ struct llama_grammar {
|
|||||||
llama_partial_utf8 partial_utf8;
|
llama_partial_utf8 partial_utf8;
|
||||||
|
|
||||||
// lazy grammars wait for trigger words or tokens before constraining the sampling.
|
// lazy grammars wait for trigger words or tokens before constraining the sampling.
|
||||||
// we still ahve trigger_tokens for non-lazy grammars to force printing of special trigger tokens.
|
// we still have trigger_tokens for non-lazy grammars to force printing of special trigger tokens.
|
||||||
// (useful e.g. for tool_choice=required)
|
// (useful e.g. for tool_choice=required)
|
||||||
bool lazy = false;
|
bool lazy = false;
|
||||||
bool awaiting_trigger = false; // Initialized to true for lazy grammars only
|
bool awaiting_trigger = false; // Initialized to true for lazy grammars only
|
||||||
std::string trigger_buffer; // Output buffered by lazy grammar. Will be cleared once trigger is found.
|
std::string trigger_buffer; // Output buffered by lazy grammar. Will be cleared once trigger is found.
|
||||||
std::vector<llama_token> trigger_tokens; // Tokens that trigger a lazy grammar, or tokens to force printing of (even if special).
|
std::vector<llama_token> trigger_tokens; // Tokens that trigger a lazy grammar, or tokens to force printing of (even if special).
|
||||||
std::vector<std::string> trigger_words;
|
std::vector<llama_grammar_trigger_pattern>
|
||||||
|
trigger_patterns; // Regular expressions that trigger a lazy grammar. Must be a full match of the entire generated
|
||||||
|
// string, and the grammar will be given the string from the first match group onwards.
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
//
|
//
|
||||||
@ -141,8 +150,8 @@ struct llama_grammar * llama_grammar_init_impl(
|
|||||||
const char * grammar_str,
|
const char * grammar_str,
|
||||||
const char * grammar_root,
|
const char * grammar_root,
|
||||||
bool lazy,
|
bool lazy,
|
||||||
const char ** trigger_words,
|
const char ** trigger_patterns,
|
||||||
size_t num_trigger_words,
|
size_t num_trigger_patterns,
|
||||||
const llama_token * trigger_tokens,
|
const llama_token * trigger_tokens,
|
||||||
size_t num_trigger_tokens);
|
size_t num_trigger_tokens);
|
||||||
|
|
||||||
|
1706
examples/talk-llama/llama-graph.cpp
Normal file
1706
examples/talk-llama/llama-graph.cpp
Normal file
File diff suppressed because it is too large
Load Diff
596
examples/talk-llama/llama-graph.h
Normal file
596
examples/talk-llama/llama-graph.h
Normal file
@ -0,0 +1,596 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "llama-arch.h"
|
||||||
|
#include "llama-hparams.h"
|
||||||
|
#include "llama-adapter.h"
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <vector>
|
||||||
|
#include <memory>
|
||||||
|
#include <set>
|
||||||
|
#include <functional>
|
||||||
|
|
||||||
|
struct ggml_cgraph;
|
||||||
|
struct ggml_context;
|
||||||
|
struct ggml_tensor;
|
||||||
|
|
||||||
|
struct llama_ubatch;
|
||||||
|
struct llama_cparams;
|
||||||
|
|
||||||
|
class llama_memory_i;
|
||||||
|
class llama_kv_cache_unified;
|
||||||
|
|
||||||
|
// certain models (typically multi-modal) can produce different types of graphs
|
||||||
|
enum llm_graph_type {
|
||||||
|
LLM_GRAPH_TYPE_DEFAULT,
|
||||||
|
LLM_GRAPH_TYPE_ENCODER,
|
||||||
|
LLM_GRAPH_TYPE_DECODER,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum llm_ffn_op_type {
|
||||||
|
LLM_FFN_SILU,
|
||||||
|
LLM_FFN_GELU,
|
||||||
|
LLM_FFN_RELU,
|
||||||
|
LLM_FFN_RELU_SQR,
|
||||||
|
LLM_FFN_SWIGLU,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum llm_ffn_gate_type {
|
||||||
|
LLM_FFN_SEQ,
|
||||||
|
LLM_FFN_PAR, // ffn_gate is parallel to ffn_up
|
||||||
|
};
|
||||||
|
|
||||||
|
enum llm_norm_type {
|
||||||
|
LLM_NORM,
|
||||||
|
LLM_NORM_RMS,
|
||||||
|
LLM_NORM_GROUP,
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO: tmp - need something better to pass the data from the encoder to the decoder
|
||||||
|
struct llama_cross {
|
||||||
|
// the output embeddings from the encoder as a ggml tensor
|
||||||
|
// TODO: this needs more work to be correct, for now copy the embeddings data to host memory
|
||||||
|
// ref: https://github.com/ggml-org/llama.cpp/pull/11213#discussion_r1969892524
|
||||||
|
//ggml_tensor * t_embd = nullptr;
|
||||||
|
|
||||||
|
int64_t n_embd = 0;
|
||||||
|
int64_t n_enc = 0;
|
||||||
|
|
||||||
|
// embeddings data copied to host memory (tmp)
|
||||||
|
std::vector<float> v_embd;
|
||||||
|
|
||||||
|
// needed to construct the cross-attention mask in the decoder
|
||||||
|
std::vector<std::set<llama_seq_id>> seq_ids_enc;
|
||||||
|
};
|
||||||
|
|
||||||
|
//
|
||||||
|
// llm_graph_input
|
||||||
|
//
|
||||||
|
|
||||||
|
class llm_graph_input_i {
|
||||||
|
public:
|
||||||
|
virtual ~llm_graph_input_i() = default;
|
||||||
|
|
||||||
|
virtual void set_input(const llama_ubatch * ubatch) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
using llm_graph_input_ptr = std::unique_ptr<llm_graph_input_i>;
|
||||||
|
|
||||||
|
|
||||||
|
class llm_graph_input_embd : public llm_graph_input_i {
|
||||||
|
public:
|
||||||
|
llm_graph_input_embd() = default;
|
||||||
|
virtual ~llm_graph_input_embd() = default;
|
||||||
|
|
||||||
|
void set_input(const llama_ubatch * ubatch) override;
|
||||||
|
|
||||||
|
ggml_tensor * tokens = nullptr; // I32 [n_batch]
|
||||||
|
ggml_tensor * embd = nullptr; // F32 [n_embd, n_batch]
|
||||||
|
};
|
||||||
|
|
||||||
|
class llm_graph_input_pos : public llm_graph_input_i {
|
||||||
|
public:
|
||||||
|
llm_graph_input_pos(int64_t n_pos_per_token) : n_pos_per_token(n_pos_per_token) {}
|
||||||
|
virtual ~llm_graph_input_pos() = default;
|
||||||
|
|
||||||
|
void set_input(const llama_ubatch * ubatch) override;
|
||||||
|
|
||||||
|
ggml_tensor * pos = nullptr; // I32 [n_batch]
|
||||||
|
|
||||||
|
const int64_t n_pos_per_token = 1;
|
||||||
|
};
|
||||||
|
|
||||||
|
// temperature tuning, used by llama4
|
||||||
|
class llm_graph_input_attn_temp : public llm_graph_input_i {
|
||||||
|
public:
|
||||||
|
llm_graph_input_attn_temp(int64_t n_pos_per_token, uint32_t n_attn_temp_floor_scale, float f_attn_temp_scale)
|
||||||
|
: n_pos_per_token(n_pos_per_token), n_attn_temp_floor_scale(n_attn_temp_floor_scale), f_attn_temp_scale(f_attn_temp_scale) {}
|
||||||
|
virtual ~llm_graph_input_attn_temp() = default;
|
||||||
|
|
||||||
|
void set_input(const llama_ubatch * ubatch) override;
|
||||||
|
|
||||||
|
ggml_tensor * attn_scale = nullptr; // F32 [n_batch]
|
||||||
|
|
||||||
|
const int64_t n_pos_per_token = 1;
|
||||||
|
|
||||||
|
const uint32_t n_attn_temp_floor_scale;
|
||||||
|
const float f_attn_temp_scale;
|
||||||
|
};
|
||||||
|
|
||||||
|
class llm_graph_input_pos_bucket : public llm_graph_input_i {
|
||||||
|
public:
|
||||||
|
llm_graph_input_pos_bucket(const llama_hparams & hparams) : hparams(hparams) {}
|
||||||
|
virtual ~llm_graph_input_pos_bucket() = default;
|
||||||
|
|
||||||
|
void set_input(const llama_ubatch * ubatch) override;
|
||||||
|
|
||||||
|
ggml_tensor * pos_bucket = nullptr; // I32 [n_batch, n_batch]
|
||||||
|
|
||||||
|
const llama_hparams & hparams;
|
||||||
|
};
|
||||||
|
|
||||||
|
class llm_graph_input_pos_bucket_kv : public llm_graph_input_i {
|
||||||
|
public:
|
||||||
|
llm_graph_input_pos_bucket_kv(
|
||||||
|
const llama_hparams & hparams,
|
||||||
|
const llama_kv_cache_unified * kv_self) : hparams(hparams), kv_self(kv_self) {}
|
||||||
|
virtual ~llm_graph_input_pos_bucket_kv() = default;
|
||||||
|
|
||||||
|
void set_input(const llama_ubatch * ubatch) override;
|
||||||
|
|
||||||
|
ggml_tensor * pos_bucket = nullptr; // I32 [n_kv, n_batch]
|
||||||
|
|
||||||
|
const llama_hparams & hparams;
|
||||||
|
const llama_kv_cache_unified * kv_self;
|
||||||
|
};
|
||||||
|
|
||||||
|
class llm_graph_input_out_ids : public llm_graph_input_i {
|
||||||
|
public:
|
||||||
|
llm_graph_input_out_ids(
|
||||||
|
const llama_hparams & hparams,
|
||||||
|
const llama_cparams & cparams,
|
||||||
|
int32_t n_outputs) : hparams(hparams), cparams(cparams), n_outputs(n_outputs) {}
|
||||||
|
virtual ~llm_graph_input_out_ids() = default;
|
||||||
|
|
||||||
|
void set_input(const llama_ubatch * ubatch) override;
|
||||||
|
|
||||||
|
ggml_tensor * out_ids; // I32 [n_outputs]
|
||||||
|
|
||||||
|
const llama_hparams & hparams;
|
||||||
|
const llama_cparams & cparams;
|
||||||
|
|
||||||
|
const int32_t n_outputs;
|
||||||
|
};
|
||||||
|
|
||||||
|
class llm_graph_input_mean : public llm_graph_input_i {
|
||||||
|
public:
|
||||||
|
llm_graph_input_mean(const llama_cparams & cparams) : cparams(cparams) {}
|
||||||
|
virtual ~llm_graph_input_mean() = default;
|
||||||
|
|
||||||
|
void set_input(const llama_ubatch * ubatch) override;
|
||||||
|
|
||||||
|
ggml_tensor * mean; // F32 [n_batch, n_batch]
|
||||||
|
|
||||||
|
const llama_cparams & cparams;
|
||||||
|
};
|
||||||
|
|
||||||
|
class llm_graph_input_cls : public llm_graph_input_i {
|
||||||
|
public:
|
||||||
|
llm_graph_input_cls(const llama_cparams & cparams) : cparams(cparams) {}
|
||||||
|
virtual ~llm_graph_input_cls() = default;
|
||||||
|
|
||||||
|
void set_input(const llama_ubatch * ubatch) override;
|
||||||
|
|
||||||
|
ggml_tensor * cls; // I32 [n_batch]
|
||||||
|
|
||||||
|
const llama_cparams & cparams;
|
||||||
|
};
|
||||||
|
|
||||||
|
class llm_graph_input_s_copy : public llm_graph_input_i {
|
||||||
|
public:
|
||||||
|
llm_graph_input_s_copy(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {}
|
||||||
|
virtual ~llm_graph_input_s_copy() = default;
|
||||||
|
|
||||||
|
void set_input(const llama_ubatch * ubatch) override;
|
||||||
|
|
||||||
|
ggml_tensor * s_copy; // I32 [kv_size]
|
||||||
|
|
||||||
|
const llama_kv_cache_unified * kv_self;
|
||||||
|
};
|
||||||
|
|
||||||
|
class llm_graph_input_s_mask : public llm_graph_input_i {
|
||||||
|
public:
|
||||||
|
llm_graph_input_s_mask(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {}
|
||||||
|
virtual ~llm_graph_input_s_mask() = default;
|
||||||
|
|
||||||
|
void set_input(const llama_ubatch * ubatch) override;
|
||||||
|
|
||||||
|
ggml_tensor * s_mask; // F32 [1, n_kv]
|
||||||
|
|
||||||
|
const llama_kv_cache_unified * kv_self;
|
||||||
|
};
|
||||||
|
|
||||||
|
class llm_graph_input_cross_embd : public llm_graph_input_i {
|
||||||
|
public:
|
||||||
|
llm_graph_input_cross_embd(
|
||||||
|
const llama_cross * cross) : cross(cross) {}
|
||||||
|
virtual ~llm_graph_input_cross_embd() = default;
|
||||||
|
|
||||||
|
void set_input(const llama_ubatch * ubatch) override;
|
||||||
|
|
||||||
|
ggml_tensor * cross_embd; // F32 [n_embd, n_outputs_enc]
|
||||||
|
|
||||||
|
const llama_cross * cross;
|
||||||
|
};
|
||||||
|
|
||||||
|
class llm_graph_input_attn_no_cache : public llm_graph_input_i {
|
||||||
|
public:
|
||||||
|
llm_graph_input_attn_no_cache(const llama_hparams & hparams, const llama_cparams & cparams) :
|
||||||
|
hparams(hparams),
|
||||||
|
cparams(cparams) {
|
||||||
|
}
|
||||||
|
~llm_graph_input_attn_no_cache() = default;
|
||||||
|
|
||||||
|
void set_input(const llama_ubatch * ubatch) override;
|
||||||
|
|
||||||
|
ggml_tensor * get_kq_mask() const { return kq_mask_cnv; }
|
||||||
|
|
||||||
|
ggml_tensor * kq_mask = nullptr; // F32 [n_tokens, n_batch]
|
||||||
|
ggml_tensor * kq_mask_cnv = nullptr; // [n_tokens, n_batch]
|
||||||
|
|
||||||
|
const llama_hparams & hparams;
|
||||||
|
const llama_cparams & cparams;
|
||||||
|
};
|
||||||
|
|
||||||
|
class llm_graph_input_attn_kv_unified : public llm_graph_input_i {
|
||||||
|
public:
|
||||||
|
llm_graph_input_attn_kv_unified(
|
||||||
|
const llama_hparams & hparams,
|
||||||
|
const llama_cparams & cparams,
|
||||||
|
const llama_kv_cache_unified * kv_self) :
|
||||||
|
hparams(hparams),
|
||||||
|
cparams(cparams),
|
||||||
|
kv_self(kv_self) {
|
||||||
|
}
|
||||||
|
~llm_graph_input_attn_kv_unified() = default;
|
||||||
|
|
||||||
|
void set_input(const llama_ubatch * ubatch) override;
|
||||||
|
|
||||||
|
ggml_tensor * get_kq_mask() const { return self_kq_mask_cnv; }
|
||||||
|
ggml_tensor * get_kq_mask_swa() const { return self_kq_mask_swa_cnv; }
|
||||||
|
|
||||||
|
ggml_tensor * self_kq_mask = nullptr; // F32 [n_kv, n_batch]
|
||||||
|
ggml_tensor * self_kq_mask_cnv = nullptr; // [n_kv, n_batch]
|
||||||
|
ggml_tensor * self_kq_mask_swa = nullptr; // F32 [n_kv, n_batch]
|
||||||
|
ggml_tensor * self_kq_mask_swa_cnv = nullptr; // [n_kv, n_batch]
|
||||||
|
|
||||||
|
const llama_hparams & hparams;
|
||||||
|
const llama_cparams & cparams;
|
||||||
|
|
||||||
|
const llama_kv_cache_unified * kv_self;
|
||||||
|
};
|
||||||
|
|
||||||
|
class llm_graph_input_attn_cross : public llm_graph_input_i {
|
||||||
|
public:
|
||||||
|
llm_graph_input_attn_cross(const llama_cross * cross) : cross(cross) {}
|
||||||
|
~llm_graph_input_attn_cross() = default;
|
||||||
|
|
||||||
|
void set_input(const llama_ubatch * ubatch) override;
|
||||||
|
|
||||||
|
ggml_tensor * get_kq_mask_cross() const { return cross_kq_mask_cnv; }
|
||||||
|
|
||||||
|
ggml_tensor * cross_kq_mask = nullptr; // F32 [n_outputs_enc, n_batch]
|
||||||
|
ggml_tensor * cross_kq_mask_cnv = nullptr; // F32 [n_outputs_enc, n_batch]
|
||||||
|
|
||||||
|
const llama_cross * cross = nullptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
//
|
||||||
|
// llm_graph_result
|
||||||
|
//
|
||||||
|
|
||||||
|
// these objects deliver the result from the graph build process back to the llama_context
|
||||||
|
// note that the input tensors created for the graph are referenced here - the goal is to be able to populate their
|
||||||
|
// specific data, by calling the set_inputs() method
|
||||||
|
// along with the input tensors, the object also provides commonly used outputs tensors, such as logits, embeddings, etc.
|
||||||
|
// these are used by the llama_context to extact the relevant data, based on the compute parameters
|
||||||
|
|
||||||
|
class llm_graph_result_i {
|
||||||
|
public:
|
||||||
|
virtual ~llm_graph_result_i() = default;
|
||||||
|
|
||||||
|
virtual ggml_tensor * get_logits() = 0;
|
||||||
|
virtual ggml_tensor * get_embd() = 0;
|
||||||
|
virtual ggml_tensor * get_embd_pooled() = 0;
|
||||||
|
|
||||||
|
virtual void set_inputs(const llama_ubatch * ubatch) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
using llm_graph_result_ptr = std::unique_ptr<llm_graph_result_i>;
|
||||||
|
|
||||||
|
|
||||||
|
class llm_graph_result : public llm_graph_result_i {
|
||||||
|
public:
|
||||||
|
virtual ~llm_graph_result() = default;
|
||||||
|
|
||||||
|
ggml_tensor * get_logits() override { return t_logits; }
|
||||||
|
ggml_tensor * get_embd() override { return t_embd; }
|
||||||
|
ggml_tensor * get_embd_pooled() override { return t_embd_pooled; }
|
||||||
|
|
||||||
|
void set_inputs(const llama_ubatch * ubatch) override {
|
||||||
|
for (auto & input : inputs) {
|
||||||
|
input->set_input(ubatch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
llm_graph_input_i * add_input(llm_graph_input_ptr input) {
|
||||||
|
inputs.emplace_back(std::move(input));
|
||||||
|
return inputs.back().get();
|
||||||
|
}
|
||||||
|
|
||||||
|
// important graph nodes
|
||||||
|
ggml_tensor * t_logits = nullptr;
|
||||||
|
ggml_tensor * t_embd = nullptr;
|
||||||
|
ggml_tensor * t_embd_pooled = nullptr;
|
||||||
|
|
||||||
|
std::vector<llm_graph_input_ptr> inputs;
|
||||||
|
};
|
||||||
|
|
||||||
|
//
|
||||||
|
// llm_graph_context
|
||||||
|
//
|
||||||
|
|
||||||
|
// callback that allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.)
|
||||||
|
using llm_graph_cb = std::function<void(const llama_ubatch & ubatch, ggml_tensor * cur, const char * name, int il)>;
|
||||||
|
|
||||||
|
struct llm_graph_params {
|
||||||
|
ggml_context * ctx;
|
||||||
|
|
||||||
|
const llm_arch arch;
|
||||||
|
|
||||||
|
const llama_hparams & hparams;
|
||||||
|
const llama_cparams & cparams;
|
||||||
|
const llama_ubatch & ubatch;
|
||||||
|
|
||||||
|
ggml_backend_sched * sched;
|
||||||
|
ggml_backend * backend_cpu;
|
||||||
|
|
||||||
|
const llama_adapter_cvec * cvec;
|
||||||
|
const llama_adapter_loras * loras;
|
||||||
|
const llama_memory_i * memory;
|
||||||
|
const llama_cross * cross;
|
||||||
|
|
||||||
|
int32_t n_outputs;
|
||||||
|
|
||||||
|
const llm_graph_cb & cb;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct llm_graph_context {
|
||||||
|
const llm_arch arch;
|
||||||
|
|
||||||
|
const llama_hparams & hparams;
|
||||||
|
const llama_cparams & cparams;
|
||||||
|
const llama_ubatch & ubatch;
|
||||||
|
|
||||||
|
const int64_t n_embd;
|
||||||
|
const int64_t n_layer;
|
||||||
|
const int64_t n_rot;
|
||||||
|
const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train)
|
||||||
|
const int64_t n_ctx_per_seq;
|
||||||
|
const int64_t n_head;
|
||||||
|
const int64_t n_head_kv;
|
||||||
|
const int64_t n_embd_head_k;
|
||||||
|
const int64_t n_embd_k_gqa;
|
||||||
|
const int64_t n_embd_head_v;
|
||||||
|
const int64_t n_embd_v_gqa;
|
||||||
|
const int64_t n_expert;
|
||||||
|
const int64_t n_expert_used;
|
||||||
|
|
||||||
|
const float freq_base;
|
||||||
|
const float freq_scale;
|
||||||
|
const float ext_factor;
|
||||||
|
const float attn_factor;
|
||||||
|
const float beta_fast;
|
||||||
|
const float beta_slow;
|
||||||
|
const float norm_eps;
|
||||||
|
const float norm_rms_eps;
|
||||||
|
|
||||||
|
const int32_t n_tokens;
|
||||||
|
const int32_t n_outputs;
|
||||||
|
const int32_t n_ctx_orig; // yarn
|
||||||
|
|
||||||
|
const enum llama_pooling_type pooling_type;
|
||||||
|
const enum llama_rope_type rope_type;
|
||||||
|
|
||||||
|
ggml_context * ctx0 = nullptr;
|
||||||
|
|
||||||
|
ggml_backend_sched * sched;
|
||||||
|
|
||||||
|
ggml_backend * backend_cpu; // TODO: needed by build_attn_mha, figure out a way to remove?
|
||||||
|
|
||||||
|
const llama_adapter_cvec * cvec;
|
||||||
|
const llama_adapter_loras * loras;
|
||||||
|
const llama_memory_i * memory;
|
||||||
|
const llama_cross * cross;
|
||||||
|
|
||||||
|
const llm_graph_cb & cb_func;
|
||||||
|
|
||||||
|
std::unique_ptr<llm_graph_result> res;
|
||||||
|
|
||||||
|
llm_graph_context(const llm_graph_params & params);
|
||||||
|
|
||||||
|
int64_t n_pos_per_token() const;
|
||||||
|
|
||||||
|
void cb(ggml_tensor * cur, const char * name, int il) const;
|
||||||
|
|
||||||
|
//
|
||||||
|
// common
|
||||||
|
//
|
||||||
|
|
||||||
|
ggml_tensor * build_cvec(
|
||||||
|
ggml_tensor * cur,
|
||||||
|
int il) const;
|
||||||
|
|
||||||
|
// do mat_mul, while optionally apply lora
|
||||||
|
ggml_tensor * build_lora_mm(
|
||||||
|
ggml_tensor * w,
|
||||||
|
ggml_tensor * cur) const;
|
||||||
|
|
||||||
|
// do mat_mul_id, while optionally apply lora
|
||||||
|
ggml_tensor * build_lora_mm_id(
|
||||||
|
ggml_tensor * w, // ggml_tensor * as
|
||||||
|
ggml_tensor * cur, // ggml_tensor * b
|
||||||
|
ggml_tensor * ids) const;
|
||||||
|
|
||||||
|
ggml_tensor * build_norm(
|
||||||
|
ggml_tensor * cur,
|
||||||
|
ggml_tensor * mw,
|
||||||
|
ggml_tensor * mb,
|
||||||
|
llm_norm_type type,
|
||||||
|
int il) const;
|
||||||
|
|
||||||
|
ggml_tensor * build_ffn(
|
||||||
|
ggml_tensor * cur,
|
||||||
|
ggml_tensor * up,
|
||||||
|
ggml_tensor * up_b,
|
||||||
|
ggml_tensor * up_s,
|
||||||
|
ggml_tensor * gate,
|
||||||
|
ggml_tensor * gate_b,
|
||||||
|
ggml_tensor * gate_s,
|
||||||
|
ggml_tensor * down,
|
||||||
|
ggml_tensor * down_b,
|
||||||
|
ggml_tensor * down_s,
|
||||||
|
ggml_tensor * act_scales,
|
||||||
|
llm_ffn_op_type type_op,
|
||||||
|
llm_ffn_gate_type type_gate,
|
||||||
|
int il) const;
|
||||||
|
|
||||||
|
ggml_tensor * build_moe_ffn(
|
||||||
|
ggml_tensor * cur,
|
||||||
|
ggml_tensor * gate_inp,
|
||||||
|
ggml_tensor * up_exps,
|
||||||
|
ggml_tensor * gate_exps,
|
||||||
|
ggml_tensor * down_exps,
|
||||||
|
ggml_tensor * exp_probs_b,
|
||||||
|
int64_t n_expert,
|
||||||
|
int64_t n_expert_used,
|
||||||
|
llm_ffn_op_type type_op,
|
||||||
|
bool norm_w,
|
||||||
|
bool scale_w,
|
||||||
|
float w_scale,
|
||||||
|
llama_expert_gating_func_type gating_op,
|
||||||
|
int il) const;
|
||||||
|
|
||||||
|
//
|
||||||
|
// inputs
|
||||||
|
//
|
||||||
|
|
||||||
|
ggml_tensor * build_inp_embd(ggml_tensor * tok_embd) const;
|
||||||
|
ggml_tensor * build_inp_pos() const;
|
||||||
|
ggml_tensor * build_inp_attn_scale() const;
|
||||||
|
ggml_tensor * build_inp_out_ids() const;
|
||||||
|
ggml_tensor * build_inp_mean() const;
|
||||||
|
ggml_tensor * build_inp_cls() const;
|
||||||
|
ggml_tensor * build_inp_s_copy() const;
|
||||||
|
ggml_tensor * build_inp_s_mask() const;
|
||||||
|
|
||||||
|
ggml_tensor * build_inp_cross_embd() const;
|
||||||
|
ggml_tensor * build_inp_pos_bucket_enc() const;
|
||||||
|
ggml_tensor * build_inp_pos_bucket_dec() const;
|
||||||
|
ggml_tensor * build_pos_bias(ggml_tensor * pos_bucket, ggml_tensor * attn_rel_b) const;
|
||||||
|
|
||||||
|
//
|
||||||
|
// attention
|
||||||
|
//
|
||||||
|
|
||||||
|
ggml_tensor * build_attn_mha(
|
||||||
|
ggml_cgraph * gf,
|
||||||
|
ggml_tensor * q, // [n_embd_head_q, n_tokens, n_head_q]
|
||||||
|
ggml_tensor * k, // [n_embd_head_k, n_tokens, n_head_k]
|
||||||
|
ggml_tensor * v, // [n_embd_head_v, n_tokens, n_head_v] (v_trans == false)
|
||||||
|
ggml_tensor * kq_b,
|
||||||
|
ggml_tensor * kq_mask,
|
||||||
|
ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v]
|
||||||
|
bool v_trans,
|
||||||
|
float kq_scale) const;
|
||||||
|
|
||||||
|
llm_graph_input_attn_no_cache * build_attn_inp_no_cache() const;
|
||||||
|
|
||||||
|
ggml_tensor * build_attn(
|
||||||
|
llm_graph_input_attn_no_cache * inp,
|
||||||
|
ggml_cgraph * gf,
|
||||||
|
ggml_tensor * wo,
|
||||||
|
ggml_tensor * wo_b,
|
||||||
|
ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
|
||||||
|
ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
|
||||||
|
ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
|
||||||
|
ggml_tensor * kq_b,
|
||||||
|
ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v]
|
||||||
|
float kq_scale,
|
||||||
|
int il) const;
|
||||||
|
|
||||||
|
llm_graph_input_attn_kv_unified * build_attn_inp_kv_unified() const;
|
||||||
|
|
||||||
|
ggml_tensor * build_attn(
|
||||||
|
llm_graph_input_attn_kv_unified * inp,
|
||||||
|
ggml_cgraph * gf,
|
||||||
|
ggml_tensor * wo,
|
||||||
|
ggml_tensor * wo_b,
|
||||||
|
ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
|
||||||
|
ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
|
||||||
|
ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
|
||||||
|
ggml_tensor * kq_b,
|
||||||
|
ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v]
|
||||||
|
float kq_scale,
|
||||||
|
int il) const;
|
||||||
|
|
||||||
|
llm_graph_input_attn_cross * build_attn_inp_cross() const;
|
||||||
|
|
||||||
|
ggml_tensor * build_attn(
|
||||||
|
llm_graph_input_attn_cross * inp,
|
||||||
|
ggml_cgraph * gf,
|
||||||
|
ggml_tensor * wo,
|
||||||
|
ggml_tensor * wo_b,
|
||||||
|
ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
|
||||||
|
ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
|
||||||
|
ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
|
||||||
|
ggml_tensor * kq_b,
|
||||||
|
ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v]
|
||||||
|
float kq_scale,
|
||||||
|
int il) const;
|
||||||
|
|
||||||
|
//
|
||||||
|
// recurrent
|
||||||
|
//
|
||||||
|
|
||||||
|
ggml_tensor * build_copy_mask_state(
|
||||||
|
ggml_cgraph * gf,
|
||||||
|
ggml_tensor * s,
|
||||||
|
ggml_tensor * state_copy,
|
||||||
|
ggml_tensor * state_mask,
|
||||||
|
int32_t n_state,
|
||||||
|
int32_t n_seqs) const;
|
||||||
|
|
||||||
|
ggml_tensor * build_rwkv_token_shift_load(
|
||||||
|
ggml_cgraph * gf,
|
||||||
|
ggml_tensor * state_copy,
|
||||||
|
ggml_tensor * state_mask,
|
||||||
|
const llama_ubatch & ubatch,
|
||||||
|
int il) const;
|
||||||
|
|
||||||
|
ggml_tensor * build_rwkv_token_shift_store(
|
||||||
|
ggml_tensor * token_shift,
|
||||||
|
const llama_ubatch & ubatch,
|
||||||
|
int il) const;
|
||||||
|
|
||||||
|
//
|
||||||
|
// pooling
|
||||||
|
//
|
||||||
|
|
||||||
|
void build_pooling(
|
||||||
|
ggml_cgraph * gf,
|
||||||
|
ggml_tensor * cls,
|
||||||
|
ggml_tensor * cls_b,
|
||||||
|
ggml_tensor * cls_out,
|
||||||
|
ggml_tensor * cls_out_b) const;
|
||||||
|
};
|
@ -69,3 +69,11 @@ uint32_t llama_hparams::n_embd_v_s() const {
|
|||||||
// corresponds to Mamba's ssm_states size
|
// corresponds to Mamba's ssm_states size
|
||||||
return ssm_d_state * ssm_d_inner;
|
return ssm_d_state * ssm_d_inner;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool llama_hparams::is_swa(uint32_t il) const {
|
||||||
|
if (il < n_layer) {
|
||||||
|
return n_swa > 0 && n_swa_pattern > 0 && il % n_swa_pattern < (n_swa_pattern - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
GGML_ABORT("fatal error");
|
||||||
|
}
|
||||||
|
@ -36,12 +36,17 @@ struct llama_hparams {
|
|||||||
uint32_t n_layer;
|
uint32_t n_layer;
|
||||||
uint32_t n_rot;
|
uint32_t n_rot;
|
||||||
uint32_t n_swa = 0; // sliding window attention (SWA)
|
uint32_t n_swa = 0; // sliding window attention (SWA)
|
||||||
|
uint32_t n_swa_pattern = 1; // by default, all layers use non-sliding-window attention
|
||||||
uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
|
uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
|
||||||
uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
|
uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
|
||||||
uint32_t n_expert = 0;
|
uint32_t n_expert = 0;
|
||||||
uint32_t n_expert_used = 0;
|
uint32_t n_expert_used = 0;
|
||||||
uint32_t n_rel_attn_bkts = 0;
|
uint32_t n_rel_attn_bkts = 0;
|
||||||
|
|
||||||
|
// note: deepseek2 using MLA converts into MQA with larger heads, then decompresses to MHA
|
||||||
|
uint32_t n_embd_head_k_mla = 0;
|
||||||
|
uint32_t n_embd_head_v_mla = 0;
|
||||||
|
|
||||||
// for WavTokenizer
|
// for WavTokenizer
|
||||||
struct llama_hparams_posnet posnet;
|
struct llama_hparams_posnet posnet;
|
||||||
struct llama_hparams_convnext convnext;
|
struct llama_hparams_convnext convnext;
|
||||||
@ -75,10 +80,16 @@ struct llama_hparams {
|
|||||||
uint32_t time_decay_extra_dim = 0;
|
uint32_t time_decay_extra_dim = 0;
|
||||||
uint32_t wkv_head_size = 0;
|
uint32_t wkv_head_size = 0;
|
||||||
uint32_t token_shift_count = 2;
|
uint32_t token_shift_count = 2;
|
||||||
|
uint32_t n_lora_decay = 0;
|
||||||
|
uint32_t n_lora_iclr = 0;
|
||||||
|
uint32_t n_lora_value_res_mix = 0;
|
||||||
|
uint32_t n_lora_gate = 0;
|
||||||
|
|
||||||
float rope_attn_factor = 1.0f;
|
float rope_attn_factor = 1.0f;
|
||||||
float rope_freq_base_train;
|
float rope_freq_base_train;
|
||||||
|
float rope_freq_base_train_swa;
|
||||||
float rope_freq_scale_train;
|
float rope_freq_scale_train;
|
||||||
|
float rope_freq_scale_train_swa;
|
||||||
uint32_t n_ctx_orig_yarn;
|
uint32_t n_ctx_orig_yarn;
|
||||||
float rope_yarn_log_mul;
|
float rope_yarn_log_mul;
|
||||||
|
|
||||||
@ -105,6 +116,14 @@ struct llama_hparams {
|
|||||||
bool use_alibi = false;
|
bool use_alibi = false;
|
||||||
bool attn_soft_cap = false;
|
bool attn_soft_cap = false;
|
||||||
|
|
||||||
|
uint32_t n_moe_layer_step = 0;
|
||||||
|
bool use_kq_norm = true;
|
||||||
|
uint32_t n_attn_chunk = 0;
|
||||||
|
// values below seems to be fixed on llama4
|
||||||
|
uint32_t n_no_rope_layer_step = 4;
|
||||||
|
uint32_t n_attn_temp_floor_scale = 8192;
|
||||||
|
float f_attn_temp_scale = 0.1;
|
||||||
|
|
||||||
// needed by encoder-decoder models (e.g. T5, FLAN-T5)
|
// needed by encoder-decoder models (e.g. T5, FLAN-T5)
|
||||||
// ref: https://github.com/ggerganov/llama.cpp/pull/8141
|
// ref: https://github.com/ggerganov/llama.cpp/pull/8141
|
||||||
llama_token dec_start_token_id = LLAMA_TOKEN_NULL;
|
llama_token dec_start_token_id = LLAMA_TOKEN_NULL;
|
||||||
@ -133,6 +152,8 @@ struct llama_hparams {
|
|||||||
|
|
||||||
// dimension of the recurrent state embeddings
|
// dimension of the recurrent state embeddings
|
||||||
uint32_t n_embd_v_s() const;
|
uint32_t n_embd_v_s() const;
|
||||||
|
|
||||||
|
bool is_swa(uint32_t il) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
static_assert(std::is_trivially_copyable<llama_hparams>::value, "llama_hparams must be trivially copyable");
|
static_assert(std::is_trivially_copyable<llama_hparams>::value, "llama_hparams must be trivially copyable");
|
||||||
|
@ -6,13 +6,13 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#ifdef __GNUC__
|
#ifdef __GNUC__
|
||||||
#ifdef __MINGW32__
|
# if defined(__MINGW32__) && !defined(__clang__)
|
||||||
#define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
|
# define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
|
||||||
|
# else
|
||||||
|
# define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
|
||||||
|
# endif
|
||||||
#else
|
#else
|
||||||
#define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
|
# define LLAMA_ATTRIBUTE_FORMAT(...)
|
||||||
#endif
|
|
||||||
#else
|
|
||||||
#define LLAMA_ATTRIBUTE_FORMAT(...)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
//
|
//
|
||||||
|
15
examples/talk-llama/llama-io.cpp
Normal file
15
examples/talk-llama/llama-io.cpp
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
#include "llama-io.h"
|
||||||
|
|
||||||
|
void llama_io_write_i::write_string(const std::string & str) {
|
||||||
|
uint32_t str_size = str.size();
|
||||||
|
|
||||||
|
write(&str_size, sizeof(str_size));
|
||||||
|
write(str.data(), str_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void llama_io_read_i::read_string(std::string & str) {
|
||||||
|
uint32_t str_size;
|
||||||
|
read_to(&str_size, sizeof(str_size));
|
||||||
|
|
||||||
|
str.assign((const char *) read(str_size), str_size);
|
||||||
|
}
|
35
examples/talk-llama/llama-io.h
Normal file
35
examples/talk-llama/llama-io.h
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
struct ggml_tensor;
|
||||||
|
|
||||||
|
class llama_io_write_i {
|
||||||
|
public:
|
||||||
|
llama_io_write_i() = default;
|
||||||
|
virtual ~llama_io_write_i() = default;
|
||||||
|
|
||||||
|
virtual void write(const void * src, size_t size) = 0;
|
||||||
|
virtual void write_tensor(const ggml_tensor * tensor, size_t offset, size_t size) = 0;
|
||||||
|
|
||||||
|
// bytes written so far
|
||||||
|
virtual size_t n_bytes() = 0;
|
||||||
|
|
||||||
|
void write_string(const std::string & str);
|
||||||
|
};
|
||||||
|
|
||||||
|
class llama_io_read_i {
|
||||||
|
public:
|
||||||
|
llama_io_read_i() = default;
|
||||||
|
virtual ~llama_io_read_i() = default;
|
||||||
|
|
||||||
|
virtual const uint8_t * read(size_t size) = 0;
|
||||||
|
virtual void read_to(void * dst, size_t size) = 0;
|
||||||
|
|
||||||
|
// bytes read so far
|
||||||
|
virtual size_t n_bytes() = 0;
|
||||||
|
|
||||||
|
void read_string(std::string & str);
|
||||||
|
};
|
File diff suppressed because it is too large
Load Diff
@ -1,12 +1,48 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
|
#include "llama-io.h"
|
||||||
|
#include "llama-memory.h"
|
||||||
|
|
||||||
#include "ggml-cpp.h"
|
#include "ggml-cpp.h"
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
#include <set>
|
#include <set>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
struct llama_cparams;
|
||||||
|
struct llama_hparams;
|
||||||
|
struct llama_ubatch;
|
||||||
|
|
||||||
|
struct llama_kv_cache : public llama_memory_i {
|
||||||
|
using llama_memory_i::llama_memory_i;
|
||||||
|
|
||||||
|
virtual void restore() = 0; // call if batch processing fails - restores the cache state
|
||||||
|
virtual void commit() = 0; // call after successful batch processing - clears any pending state
|
||||||
|
|
||||||
|
virtual int32_t get_n_tokens() const = 0;
|
||||||
|
virtual int32_t get_used_cells() const = 0; // TODO: remove, this is too-specific to the unified cache
|
||||||
|
|
||||||
|
virtual bool get_can_shift() const = 0;
|
||||||
|
|
||||||
|
bool get_can_edit() const override { return get_can_shift(); }
|
||||||
|
};
|
||||||
|
|
||||||
|
struct llama_kv_cache_guard {
|
||||||
|
llama_kv_cache_guard(llama_kv_cache * kv) : kv(kv) {}
|
||||||
|
|
||||||
|
~llama_kv_cache_guard() {
|
||||||
|
kv->restore();
|
||||||
|
}
|
||||||
|
|
||||||
|
void commit() {
|
||||||
|
kv->commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
llama_kv_cache * kv;
|
||||||
|
};
|
||||||
|
|
||||||
struct llama_kv_cell {
|
struct llama_kv_cell {
|
||||||
llama_pos pos = -1;
|
llama_pos pos = -1;
|
||||||
llama_pos delta = 0;
|
llama_pos delta = 0;
|
||||||
@ -29,15 +65,112 @@ struct llama_kv_cell {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// ring-buffer of cached KV data
|
// ring-buffer of cached KV data
|
||||||
struct llama_kv_cache {
|
// TODO: pimpl
|
||||||
|
// TODO: add notion of max sequences
|
||||||
|
class llama_kv_cache_unified : public llama_kv_cache {
|
||||||
|
public:
|
||||||
|
// can be used to query data from the model if needed
|
||||||
|
struct callbacks {
|
||||||
|
std::function<ggml_tensor * (uint32_t n_ctx_per_seq, int il)> get_rope_factors;
|
||||||
|
};
|
||||||
|
|
||||||
|
llama_kv_cache_unified(
|
||||||
|
const llama_hparams & hparams,
|
||||||
|
callbacks cbs);
|
||||||
|
|
||||||
|
virtual ~llama_kv_cache_unified() = default;
|
||||||
|
|
||||||
|
// TODO: become constructor
|
||||||
|
bool init(
|
||||||
|
const llama_model & model, // TODO: do not reference the model
|
||||||
|
const llama_cparams & cparams,
|
||||||
|
ggml_type type_k,
|
||||||
|
ggml_type type_v,
|
||||||
|
uint32_t kv_size,
|
||||||
|
bool offload);
|
||||||
|
|
||||||
|
int32_t get_n_tokens() const override;
|
||||||
|
int32_t get_used_cells() const override;
|
||||||
|
|
||||||
|
size_t total_size() const;
|
||||||
|
|
||||||
|
// TODO: better data structures to reduce the cost of this operation
|
||||||
|
llama_pos pos_max() const;
|
||||||
|
|
||||||
|
void clear() override;
|
||||||
|
void defrag() override;
|
||||||
|
|
||||||
|
virtual void restore() override;
|
||||||
|
virtual void commit() override;
|
||||||
|
|
||||||
|
bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
|
||||||
|
void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
|
||||||
|
void seq_keep(llama_seq_id seq_id) override;
|
||||||
|
void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override;
|
||||||
|
void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
|
||||||
|
|
||||||
|
llama_pos seq_pos_max(llama_seq_id seq_id) const override;
|
||||||
|
|
||||||
|
bool get_can_shift() const override;
|
||||||
|
|
||||||
|
// find an empty slot of size "n_tokens" in the cache
|
||||||
|
// updates the cache head
|
||||||
|
// Note: On success, it's important that cache.head points
|
||||||
|
// to the first cell of the slot.
|
||||||
|
bool find_slot(const llama_ubatch & batch);
|
||||||
|
|
||||||
|
// TODO: maybe not needed
|
||||||
|
uint32_t get_padding(const llama_cparams & cparams) const;
|
||||||
|
|
||||||
|
// find how many cells are currently in use
|
||||||
|
uint32_t cell_max() const;
|
||||||
|
|
||||||
|
size_t size_k_bytes() const;
|
||||||
|
size_t size_v_bytes() const;
|
||||||
|
|
||||||
|
// defrag
|
||||||
|
|
||||||
|
struct {
|
||||||
|
std::vector<uint32_t> ids;
|
||||||
|
} defrag_info;
|
||||||
|
|
||||||
|
// return true if cells have been moved
|
||||||
|
bool defrag_prepare(int32_t n_max_nodes);
|
||||||
|
|
||||||
|
// commit/restore cache
|
||||||
|
|
||||||
|
struct slot_range {
|
||||||
|
uint32_t c0 = 0; // note: these are cell indices, not sequence positions
|
||||||
|
uint32_t c1 = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
// pending cell updates that are not yet committed
|
||||||
|
struct {
|
||||||
|
std::vector<slot_range> ranges;
|
||||||
|
} pending;
|
||||||
|
|
||||||
|
// state write/load
|
||||||
|
|
||||||
|
void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const;
|
||||||
|
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1);
|
||||||
|
|
||||||
|
// members
|
||||||
|
|
||||||
|
const llama_hparams & hparams;
|
||||||
|
|
||||||
|
callbacks cbs;
|
||||||
|
|
||||||
bool has_shift = false;
|
bool has_shift = false;
|
||||||
bool do_defrag = false;
|
bool do_defrag = false;
|
||||||
|
|
||||||
|
// TODO: remove this and implement llama_kv_cache_recurrent instead
|
||||||
bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token
|
bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token
|
||||||
|
|
||||||
bool v_trans = true; // the value tensor is transposed
|
bool v_trans = true; // the value tensor is transposed
|
||||||
bool can_shift = false;
|
bool can_shift = false;
|
||||||
|
|
||||||
// Note: The value of head isn't only used to optimize searching
|
// Note: The value of head isn't only used to optimize searching
|
||||||
// for a free KV slot. llama_decode_internal also uses it, so it
|
// for a free KV slot. llama_decode_impl also uses it, so it
|
||||||
// cannot be freely changed after a slot has been allocated.
|
// cannot be freely changed after a slot has been allocated.
|
||||||
uint32_t head = 0;
|
uint32_t head = 0;
|
||||||
uint32_t size = 0;
|
uint32_t size = 0;
|
||||||
@ -46,173 +179,35 @@ struct llama_kv_cache {
|
|||||||
// computed before each graph build
|
// computed before each graph build
|
||||||
uint32_t n = 0;
|
uint32_t n = 0;
|
||||||
|
|
||||||
ggml_type type_k = GGML_TYPE_F16;
|
|
||||||
ggml_type type_v = GGML_TYPE_F16;
|
|
||||||
|
|
||||||
std::vector<llama_kv_cell> cells;
|
std::vector<llama_kv_cell> cells;
|
||||||
|
|
||||||
std::vector<struct ggml_tensor *> k_l; // per layer
|
std::vector<ggml_tensor *> k_l; // per layer
|
||||||
std::vector<struct ggml_tensor *> v_l;
|
std::vector<ggml_tensor *> v_l;
|
||||||
|
|
||||||
|
private:
|
||||||
|
ggml_type type_k = GGML_TYPE_F16;
|
||||||
|
ggml_type type_v = GGML_TYPE_F16;
|
||||||
|
|
||||||
std::vector<ggml_context_ptr> ctxs;
|
std::vector<ggml_context_ptr> ctxs;
|
||||||
std::vector<ggml_backend_buffer_ptr> bufs;
|
std::vector<ggml_backend_buffer_ptr> bufs;
|
||||||
|
|
||||||
size_t total_size() const {
|
void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
|
||||||
size_t size = 0;
|
void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
|
||||||
for (const auto & buf : bufs) {
|
|
||||||
size += ggml_backend_buffer_get_size(buf.get());
|
|
||||||
}
|
|
||||||
|
|
||||||
return size;
|
bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1);
|
||||||
}
|
bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
|
||||||
|
|
||||||
// TODO: better data structures to reduce the cost of this operation
|
|
||||||
llama_pos max_pos() const {
|
|
||||||
llama_pos max_pos = -1;
|
|
||||||
for (const auto & cell : cells) {
|
|
||||||
max_pos = std::max(max_pos, cell.pos);
|
|
||||||
}
|
|
||||||
|
|
||||||
return max_pos;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// a structure holds information about the slot found in llama_kv_cache_find_slot
|
// TODO: temporary reusing llama_kv_cache_unified -- implement recurrent cache and simplify llama_kv_cache_unified
|
||||||
struct llama_kv_cache_slot_info {
|
//class llama_kv_cache_recurrent : public llama_kv_cache_unified {
|
||||||
std::pair<uint32_t, uint32_t> boundaries; // slot boundaries [begin, end)
|
//public:
|
||||||
bool found = false; // the slot was found
|
// using llama_kv_cache_unified::llama_kv_cache_unified;
|
||||||
|
//};
|
||||||
explicit llama_kv_cache_slot_info(bool found_) : found{found_} {}
|
|
||||||
llama_kv_cache_slot_info(uint32_t begin, uint32_t end) : boundaries{begin, end}, found{true} {}
|
|
||||||
|
|
||||||
operator bool() const { return found; }
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: maybe not needed
|
|
||||||
uint32_t llama_kv_cache_get_padding(const struct llama_cparams & cparams);
|
|
||||||
|
|
||||||
bool llama_kv_cache_init(
|
|
||||||
struct llama_kv_cache & cache,
|
|
||||||
const llama_model & model,
|
|
||||||
const llama_cparams & cparams,
|
|
||||||
ggml_type type_k,
|
|
||||||
ggml_type type_v,
|
|
||||||
uint32_t kv_size,
|
|
||||||
bool offload);
|
|
||||||
|
|
||||||
// find an empty slot of size "n_tokens" in the cache
|
|
||||||
// updates the cache head
|
|
||||||
// returns a structure holding information about the slot found
|
|
||||||
// Note: On success, it's important that cache.head points
|
|
||||||
// to the first cell of the slot.
|
|
||||||
struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
|
|
||||||
struct llama_kv_cache & cache,
|
|
||||||
const struct llama_ubatch & batch);
|
|
||||||
|
|
||||||
// find how many cells are currently in use
|
|
||||||
uint32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache);
|
|
||||||
|
|
||||||
void llama_kv_cache_clear(struct llama_kv_cache & cache);
|
|
||||||
|
|
||||||
bool llama_kv_cache_seq_rm(
|
|
||||||
struct llama_kv_cache & cache,
|
|
||||||
llama_seq_id seq_id,
|
|
||||||
llama_pos p0,
|
|
||||||
llama_pos p1);
|
|
||||||
|
|
||||||
void llama_kv_cache_seq_cp(
|
|
||||||
struct llama_kv_cache & cache,
|
|
||||||
llama_seq_id seq_id_src,
|
|
||||||
llama_seq_id seq_id_dst,
|
|
||||||
llama_pos p0,
|
|
||||||
llama_pos p1);
|
|
||||||
|
|
||||||
void llama_kv_cache_seq_keep(
|
|
||||||
struct llama_kv_cache & cache,
|
|
||||||
llama_seq_id seq_id);
|
|
||||||
|
|
||||||
void llama_kv_cache_seq_add(
|
|
||||||
struct llama_kv_cache & cache,
|
|
||||||
llama_seq_id seq_id,
|
|
||||||
llama_pos p0,
|
|
||||||
llama_pos p1,
|
|
||||||
llama_pos delta);
|
|
||||||
|
|
||||||
void llama_kv_cache_seq_div(
|
|
||||||
struct llama_kv_cache & cache,
|
|
||||||
llama_seq_id seq_id,
|
|
||||||
llama_pos p0,
|
|
||||||
llama_pos p1,
|
|
||||||
int d);
|
|
||||||
|
|
||||||
llama_pos llama_kv_cache_seq_pos_max(
|
|
||||||
struct llama_kv_cache & cache,
|
|
||||||
llama_seq_id seq_id);
|
|
||||||
|
|
||||||
void llama_kv_cache_defrag(struct llama_kv_cache & cache);
|
|
||||||
|
|
||||||
int32_t llama_get_kv_cache_token_count(const struct llama_kv_cache & kv);
|
|
||||||
|
|
||||||
int32_t llama_get_kv_cache_used_cells(const struct llama_kv_cache & kv);
|
|
||||||
|
|
||||||
bool llama_kv_cache_can_shift(const struct llama_kv_cache & kv);
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// kv cache view
|
// kv cache view
|
||||||
//
|
//
|
||||||
|
|
||||||
struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_kv_cache & kv, int32_t n_seq_max);
|
llama_kv_cache_view llama_kv_cache_view_init(const llama_kv_cache & kv, int32_t n_seq_max);
|
||||||
|
|
||||||
void llama_kv_cache_view_update(struct llama_kv_cache_view * view, const struct llama_kv_cache & kv);
|
|
||||||
|
|
||||||
//
|
|
||||||
// kv cache restore
|
|
||||||
//
|
|
||||||
|
|
||||||
// saves the kv_cache state for future recovery.
|
|
||||||
// used to rollback llama_kv_cache_find_slot changes.
|
|
||||||
struct llama_kv_slot_restorer {
|
|
||||||
struct llama_kv_cache_state {
|
|
||||||
uint32_t head = 0;
|
|
||||||
uint32_t n = 0;
|
|
||||||
} old_state;
|
|
||||||
|
|
||||||
// for non-recurrent models only
|
|
||||||
// list of slots to restore
|
|
||||||
std::vector<std::pair<uint32_t, uint32_t>> slot_boundaries;
|
|
||||||
|
|
||||||
bool do_restore = false;
|
|
||||||
|
|
||||||
explicit llama_kv_slot_restorer(const struct llama_kv_cache & cache) {
|
|
||||||
old_state.head = cache.head;
|
|
||||||
old_state.n = cache.n;
|
|
||||||
}
|
|
||||||
|
|
||||||
// saves a slot information for future restoration
|
|
||||||
void save(const struct llama_kv_cache_slot_info & slot) {
|
|
||||||
if (slot) {
|
|
||||||
do_restore = true;
|
|
||||||
if (slot.boundaries.first != slot.boundaries.second) {
|
|
||||||
slot_boundaries.push_back(slot.boundaries);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// must be explicitly called to restore the kv_cache state
|
|
||||||
// and rollback changes from all llama_kv_cache_find_slot calls
|
|
||||||
void restore(struct llama_kv_cache & cache) {
|
|
||||||
if (do_restore) {
|
|
||||||
cache.head = old_state.head;
|
|
||||||
cache.n = old_state.n;
|
|
||||||
|
|
||||||
if (cache.recurrent) { // recurrent models like Mamba or RWKV can't have a state partially erased
|
|
||||||
llama_kv_cache_seq_rm(cache, -1, -1, -1);
|
|
||||||
} else {
|
|
||||||
for (auto & slot : slot_boundaries) {
|
|
||||||
llama_kv_cache_seq_rm(cache, -1, slot.first, slot.second);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
void llama_kv_cache_view_update(llama_kv_cache_view * view, const llama_kv_cache * kv);
|
||||||
|
1
examples/talk-llama/llama-memory.cpp
Normal file
1
examples/talk-llama/llama-memory.cpp
Normal file
@ -0,0 +1 @@
|
|||||||
|
#include "llama-memory.h"
|
21
examples/talk-llama/llama-memory.h
Normal file
21
examples/talk-llama/llama-memory.h
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "llama.h"
|
||||||
|
|
||||||
|
// general concept of LLM memory
|
||||||
|
// the KV cache is a type of LLM memory, but there can be other types
|
||||||
|
class llama_memory_i {
|
||||||
|
public:
|
||||||
|
virtual void clear() = 0;
|
||||||
|
virtual void defrag() = 0;
|
||||||
|
|
||||||
|
virtual bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) = 0;
|
||||||
|
virtual void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) = 0;
|
||||||
|
virtual void seq_keep(llama_seq_id seq_id) = 0;
|
||||||
|
virtual void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) = 0;
|
||||||
|
virtual void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) = 0;
|
||||||
|
|
||||||
|
virtual llama_pos seq_pos_max(llama_seq_id seq_id) const = 0;
|
||||||
|
|
||||||
|
virtual bool get_can_edit() const = 0;
|
||||||
|
};
|
@ -8,6 +8,7 @@
|
|||||||
#include <climits>
|
#include <climits>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
#include <cerrno>
|
#include <cerrno>
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
#ifdef __has_include
|
#ifdef __has_include
|
||||||
#if __has_include(<unistd.h>)
|
#if __has_include(<unistd.h>)
|
||||||
@ -34,6 +35,10 @@
|
|||||||
#include <io.h>
|
#include <io.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(__APPLE__)
|
||||||
|
#include <TargetConditionals.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
// TODO: consider moving to llama-impl.h if needed in more places
|
// TODO: consider moving to llama-impl.h if needed in more places
|
||||||
#if defined(_WIN32)
|
#if defined(_WIN32)
|
||||||
static std::string llama_format_win_err(DWORD err) {
|
static std::string llama_format_win_err(DWORD err) {
|
||||||
@ -471,7 +476,11 @@ struct llama_mlock::impl {
|
|||||||
|
|
||||||
char* errmsg = std::strerror(errno);
|
char* errmsg = std::strerror(errno);
|
||||||
bool suggest = (errno == ENOMEM);
|
bool suggest = (errno == ENOMEM);
|
||||||
|
#if defined(TARGET_OS_VISION) || defined(TARGET_OS_TV) || defined(_AIX)
|
||||||
|
// visionOS/tvOS dont't support RLIMIT_MEMLOCK
|
||||||
|
// Skip resource limit checks on visionOS/tvOS
|
||||||
|
suggest = false;
|
||||||
|
#else
|
||||||
struct rlimit lock_limit;
|
struct rlimit lock_limit;
|
||||||
if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) {
|
if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) {
|
||||||
suggest = false;
|
suggest = false;
|
||||||
@ -479,6 +488,7 @@ struct llama_mlock::impl {
|
|||||||
if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) {
|
if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) {
|
||||||
suggest = false;
|
suggest = false;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
LLAMA_LOG_WARN("warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
|
LLAMA_LOG_WARN("warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
|
||||||
size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
|
size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
@ -445,7 +445,8 @@ llama_model_loader::llama_model_loader(
|
|||||||
std::vector<std::string> & splits,
|
std::vector<std::string> & splits,
|
||||||
bool use_mmap,
|
bool use_mmap,
|
||||||
bool check_tensors,
|
bool check_tensors,
|
||||||
const struct llama_model_kv_override * param_overrides_p) {
|
const llama_model_kv_override * param_overrides_p,
|
||||||
|
const llama_model_tensor_buft_override * param_tensor_buft_overrides_p) {
|
||||||
int trace = 0;
|
int trace = 0;
|
||||||
if (getenv("LLAMA_TRACE")) {
|
if (getenv("LLAMA_TRACE")) {
|
||||||
trace = atoi(getenv("LLAMA_TRACE"));
|
trace = atoi(getenv("LLAMA_TRACE"));
|
||||||
@ -457,6 +458,8 @@ llama_model_loader::llama_model_loader(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tensor_buft_overrides = param_tensor_buft_overrides_p;
|
||||||
|
|
||||||
// Load the main GGUF
|
// Load the main GGUF
|
||||||
struct ggml_context * ctx = NULL;
|
struct ggml_context * ctx = NULL;
|
||||||
struct gguf_init_params params = {
|
struct gguf_init_params params = {
|
||||||
@ -600,7 +603,9 @@ llama_model_loader::llama_model_loader(
|
|||||||
|
|
||||||
if (trace > 0) {
|
if (trace > 0) {
|
||||||
const uint16_t sid = w.idx;
|
const uint16_t sid = w.idx;
|
||||||
LLAMA_LOG_INFO("%s: - tensor split %2d: %32s %-8s [ %s ]\n", __func__, sid, ggml_get_name(tensor), ggml_type_name(type), llama_format_tensor_shape(tensor).c_str());
|
LLAMA_LOG_INFO("%s: - tensor split %2d: %32s %-8s [ %s ] %8.2f MiB\n", __func__,
|
||||||
|
sid, ggml_get_name(tensor), ggml_type_name(type), llama_format_tensor_shape(tensor).c_str(),
|
||||||
|
ggml_nbytes(tensor)/1024.0f/1024.0f);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -640,9 +645,9 @@ llama_model_loader::llama_model_loader(
|
|||||||
ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED);
|
ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED);
|
||||||
|
|
||||||
{
|
{
|
||||||
const int kid = gguf_find_key(meta.get(), "general.file_type"); // TODO: use LLM_KV
|
uint32_t ftype_val = 0;
|
||||||
if (kid >= 0) {
|
if (get_key(LLM_KV_GENERAL_FILE_TYPE, ftype_val, false)) {
|
||||||
ftype = (llama_ftype) gguf_get_val_u32(meta.get(), kid);
|
ftype = (llama_ftype) ftype_val;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,8 +77,9 @@ struct llama_model_loader {
|
|||||||
|
|
||||||
llama_mmaps mappings;
|
llama_mmaps mappings;
|
||||||
|
|
||||||
std::map<std::string, struct llama_tensor_weight, weight_name_comparer> weights_map;
|
std::map<std::string, llama_tensor_weight, weight_name_comparer> weights_map;
|
||||||
std::unordered_map<std::string, struct llama_model_kv_override> kv_overrides;
|
std::unordered_map<std::string, llama_model_kv_override> kv_overrides;
|
||||||
|
const llama_model_tensor_buft_override * tensor_buft_overrides;
|
||||||
|
|
||||||
gguf_context_ptr meta;
|
gguf_context_ptr meta;
|
||||||
std::vector<ggml_context_ptr> contexts;
|
std::vector<ggml_context_ptr> contexts;
|
||||||
@ -95,7 +96,8 @@ struct llama_model_loader {
|
|||||||
std::vector<std::string> & splits, // optional, only need if the split does not follow naming scheme
|
std::vector<std::string> & splits, // optional, only need if the split does not follow naming scheme
|
||||||
bool use_mmap,
|
bool use_mmap,
|
||||||
bool check_tensors,
|
bool check_tensors,
|
||||||
const struct llama_model_kv_override * param_overrides_p);
|
const llama_model_kv_override * param_overrides_p,
|
||||||
|
const llama_model_tensor_buft_override * param_tensor_buft_overrides_p);
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
typename std::enable_if<std::is_integral<T>::value, bool>::type
|
typename std::enable_if<std::is_integral<T>::value, bool>::type
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -2,7 +2,9 @@
|
|||||||
|
|
||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
#include "llama-arch.h"
|
#include "llama-arch.h"
|
||||||
|
#include "llama-graph.h"
|
||||||
#include "llama-hparams.h"
|
#include "llama-hparams.h"
|
||||||
|
#include "llama-memory.h"
|
||||||
#include "llama-vocab.h"
|
#include "llama-vocab.h"
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
@ -10,6 +12,8 @@
|
|||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
struct llama_cparams;
|
||||||
|
struct llama_ubatch;
|
||||||
struct llama_model_loader;
|
struct llama_model_loader;
|
||||||
|
|
||||||
// available models
|
// available models
|
||||||
@ -25,6 +29,7 @@ enum llm_type {
|
|||||||
LLM_TYPE_109M,
|
LLM_TYPE_109M,
|
||||||
LLM_TYPE_137M,
|
LLM_TYPE_137M,
|
||||||
LLM_TYPE_160M,
|
LLM_TYPE_160M,
|
||||||
|
LLM_TYPE_190M,
|
||||||
LLM_TYPE_220M,
|
LLM_TYPE_220M,
|
||||||
LLM_TYPE_250M,
|
LLM_TYPE_250M,
|
||||||
LLM_TYPE_270M,
|
LLM_TYPE_270M,
|
||||||
@ -39,8 +44,10 @@ enum llm_type {
|
|||||||
LLM_TYPE_1_4B,
|
LLM_TYPE_1_4B,
|
||||||
LLM_TYPE_1_5B,
|
LLM_TYPE_1_5B,
|
||||||
LLM_TYPE_1_6B,
|
LLM_TYPE_1_6B,
|
||||||
|
LLM_TYPE_1_8B,
|
||||||
LLM_TYPE_2B,
|
LLM_TYPE_2B,
|
||||||
LLM_TYPE_2_8B,
|
LLM_TYPE_2_8B,
|
||||||
|
LLM_TYPE_2_9B,
|
||||||
LLM_TYPE_3B,
|
LLM_TYPE_3B,
|
||||||
LLM_TYPE_4B,
|
LLM_TYPE_4B,
|
||||||
LLM_TYPE_6B,
|
LLM_TYPE_6B,
|
||||||
@ -78,6 +85,9 @@ enum llm_type {
|
|||||||
LLM_TYPE_10B_128x3_66B,
|
LLM_TYPE_10B_128x3_66B,
|
||||||
LLM_TYPE_57B_A14B,
|
LLM_TYPE_57B_A14B,
|
||||||
LLM_TYPE_27B,
|
LLM_TYPE_27B,
|
||||||
|
LLM_TYPE_290B,
|
||||||
|
LLM_TYPE_17B_16E, // llama4 Scout
|
||||||
|
LLM_TYPE_17B_128E, // llama4 Maverick
|
||||||
};
|
};
|
||||||
|
|
||||||
struct llama_layer_posnet {
|
struct llama_layer_posnet {
|
||||||
@ -161,6 +171,8 @@ struct llama_layer {
|
|||||||
struct ggml_tensor * wq_b = nullptr;
|
struct ggml_tensor * wq_b = nullptr;
|
||||||
struct ggml_tensor * wkv_a_mqa = nullptr;
|
struct ggml_tensor * wkv_a_mqa = nullptr;
|
||||||
struct ggml_tensor * wkv_b = nullptr;
|
struct ggml_tensor * wkv_b = nullptr;
|
||||||
|
struct ggml_tensor * wk_b = nullptr;
|
||||||
|
struct ggml_tensor * wv_b = nullptr;
|
||||||
struct ggml_tensor * wq_cross = nullptr;
|
struct ggml_tensor * wq_cross = nullptr;
|
||||||
struct ggml_tensor * wk_cross = nullptr;
|
struct ggml_tensor * wk_cross = nullptr;
|
||||||
struct ggml_tensor * wv_cross = nullptr;
|
struct ggml_tensor * wv_cross = nullptr;
|
||||||
@ -256,6 +268,20 @@ struct llama_layer {
|
|||||||
struct ggml_tensor * time_mix_receptance_b = nullptr;
|
struct ggml_tensor * time_mix_receptance_b = nullptr;
|
||||||
struct ggml_tensor * time_mix_gate = nullptr;
|
struct ggml_tensor * time_mix_gate = nullptr;
|
||||||
|
|
||||||
|
// rwkv7
|
||||||
|
struct ggml_tensor * time_mix_w0 = nullptr;
|
||||||
|
struct ggml_tensor * time_mix_a0 = nullptr;
|
||||||
|
struct ggml_tensor * time_mix_a1 = nullptr;
|
||||||
|
struct ggml_tensor * time_mix_a2 = nullptr;
|
||||||
|
struct ggml_tensor * time_mix_v0 = nullptr;
|
||||||
|
struct ggml_tensor * time_mix_v1 = nullptr;
|
||||||
|
struct ggml_tensor * time_mix_v2 = nullptr;
|
||||||
|
struct ggml_tensor * time_mix_g1 = nullptr;
|
||||||
|
struct ggml_tensor * time_mix_g2 = nullptr;
|
||||||
|
struct ggml_tensor * time_mix_k_k = nullptr;
|
||||||
|
struct ggml_tensor * time_mix_k_a = nullptr;
|
||||||
|
struct ggml_tensor * time_mix_r_k = nullptr;
|
||||||
|
|
||||||
struct ggml_tensor * time_mix_ln = nullptr;
|
struct ggml_tensor * time_mix_ln = nullptr;
|
||||||
struct ggml_tensor * time_mix_ln_b = nullptr;
|
struct ggml_tensor * time_mix_ln_b = nullptr;
|
||||||
struct ggml_tensor * time_mix_output = nullptr;
|
struct ggml_tensor * time_mix_output = nullptr;
|
||||||
@ -347,7 +373,7 @@ struct llama_model {
|
|||||||
std::string desc() const;
|
std::string desc() const;
|
||||||
|
|
||||||
size_t size() const;
|
size_t size() const;
|
||||||
size_t max_nodes() const;
|
size_t n_tensors() const;
|
||||||
size_t n_devices() const;
|
size_t n_devices() const;
|
||||||
|
|
||||||
// total number of parameters in the model
|
// total number of parameters in the model
|
||||||
@ -360,11 +386,26 @@ struct llama_model {
|
|||||||
|
|
||||||
ggml_backend_buffer_type_t select_buft(int il) const;
|
ggml_backend_buffer_type_t select_buft(int il) const;
|
||||||
|
|
||||||
|
bool has_tensor_overrides() const;
|
||||||
|
|
||||||
const struct ggml_tensor * get_tensor(const char * name) const;
|
const struct ggml_tensor * get_tensor(const char * name) const;
|
||||||
|
|
||||||
|
// TODO: move this to new llm_arch_model_i interface
|
||||||
|
llama_memory_i * create_memory() const; // TODO: params
|
||||||
|
|
||||||
|
// TODO: move this to new llm_arch_model_i interface
|
||||||
|
llm_graph_result_ptr build_graph(
|
||||||
|
const llm_graph_params & params,
|
||||||
|
ggml_cgraph * gf,
|
||||||
|
llm_graph_type type) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct impl;
|
struct impl;
|
||||||
std::unique_ptr<impl> pimpl;
|
std::unique_ptr<impl> pimpl;
|
||||||
};
|
};
|
||||||
|
|
||||||
const char * llm_type_name(llm_type type);
|
const char * llm_type_name(llm_type type);
|
||||||
|
|
||||||
|
// For internal test use
|
||||||
|
// TODO: remove
|
||||||
|
const std::vector<std::pair<std::string, ggml_tensor *>> & llama_internal_get_tensor_map(const llama_model * model);
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include <cinttypes>
|
#include <cinttypes>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
#include <regex>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
|
||||||
@ -47,8 +48,14 @@ struct quantize_state_impl {
|
|||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// changes to this struct must be replicated in quantize.cpp
|
||||||
|
struct tensor_quantization {
|
||||||
|
std::string name;
|
||||||
|
ggml_type quant = GGML_TYPE_COUNT;
|
||||||
|
};
|
||||||
|
|
||||||
static void llama_tensor_dequantize_impl(
|
static void llama_tensor_dequantize_impl(
|
||||||
struct ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers,
|
ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers,
|
||||||
const size_t nelements, const int nthread
|
const size_t nelements, const int nthread
|
||||||
) {
|
) {
|
||||||
if (output.size() < nelements) {
|
if (output.size() < nelements) {
|
||||||
@ -527,7 +534,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::string> splits = {};
|
std::vector<std::string> splits = {};
|
||||||
llama_model_loader ml(fname_inp, splits, use_mmap, /*check_tensors*/ true, kv_overrides);
|
llama_model_loader ml(fname_inp, splits, use_mmap, /*check_tensors*/ true, kv_overrides, nullptr);
|
||||||
ml.init_mappings(false); // no prefetching
|
ml.init_mappings(false); // no prefetching
|
||||||
|
|
||||||
llama_model model(llama_model_default_params());
|
llama_model model(llama_model_default_params());
|
||||||
@ -536,7 +543,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
|
|||||||
model.load_hparams(ml);
|
model.load_hparams(ml);
|
||||||
model.load_stats (ml);
|
model.load_stats (ml);
|
||||||
|
|
||||||
struct quantize_state_impl qs(model, params);
|
quantize_state_impl qs(model, params);
|
||||||
|
|
||||||
if (params->only_copy) {
|
if (params->only_copy) {
|
||||||
ftype = ml.ftype;
|
ftype = ml.ftype;
|
||||||
@ -661,7 +668,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
|
|||||||
// populate the original tensors so we get an initial meta data
|
// populate the original tensors so we get an initial meta data
|
||||||
for (const auto * it : tensors) {
|
for (const auto * it : tensors) {
|
||||||
uint16_t i_split = params->keep_split ? it->idx : 0;
|
uint16_t i_split = params->keep_split ? it->idx : 0;
|
||||||
struct ggml_tensor * tensor = it->tensor;
|
ggml_tensor * tensor = it->tensor;
|
||||||
if (!ctx_outs[i_split]) {
|
if (!ctx_outs[i_split]) {
|
||||||
ctx_outs[i_split].reset(gguf_init_empty());
|
ctx_outs[i_split].reset(gguf_init_empty());
|
||||||
}
|
}
|
||||||
@ -710,7 +717,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
|
|||||||
new_ofstream(0);
|
new_ofstream(0);
|
||||||
for (const auto * it : tensors) {
|
for (const auto * it : tensors) {
|
||||||
const auto & weight = *it;
|
const auto & weight = *it;
|
||||||
struct ggml_tensor * tensor = weight.tensor;
|
ggml_tensor * tensor = weight.tensor;
|
||||||
if (weight.idx != cur_split && params->keep_split) {
|
if (weight.idx != cur_split && params->keep_split) {
|
||||||
close_ofstream();
|
close_ofstream();
|
||||||
new_ofstream(weight.idx);
|
new_ofstream(weight.idx);
|
||||||
@ -756,10 +763,19 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
|
|||||||
// NOTE: can't use LLM_TN here because the layer number is not known
|
// NOTE: can't use LLM_TN here because the layer number is not known
|
||||||
quantize &= name.find("ssm_conv1d.weight") == std::string::npos;
|
quantize &= name.find("ssm_conv1d.weight") == std::string::npos;
|
||||||
|
|
||||||
// do not quantize RWKV's time_mix_first tensors
|
// do not quantize RWKV's small yet 2D weights
|
||||||
quantize &= name.find("time_mix_first.weight") == std::string::npos;
|
quantize &= name.find("time_mix_first.weight") == std::string::npos;
|
||||||
|
quantize &= name.find("time_mix_w0.weight") == std::string::npos;
|
||||||
quantize &= name.find("time_mix_w1.weight") == std::string::npos;
|
quantize &= name.find("time_mix_w1.weight") == std::string::npos;
|
||||||
quantize &= name.find("time_mix_w2.weight") == std::string::npos;
|
quantize &= name.find("time_mix_w2.weight") == std::string::npos;
|
||||||
|
quantize &= name.find("time_mix_v0.weight") == std::string::npos;
|
||||||
|
quantize &= name.find("time_mix_v1.weight") == std::string::npos;
|
||||||
|
quantize &= name.find("time_mix_v2.weight") == std::string::npos;
|
||||||
|
quantize &= name.find("time_mix_a0.weight") == std::string::npos;
|
||||||
|
quantize &= name.find("time_mix_a1.weight") == std::string::npos;
|
||||||
|
quantize &= name.find("time_mix_a2.weight") == std::string::npos;
|
||||||
|
quantize &= name.find("time_mix_g1.weight") == std::string::npos;
|
||||||
|
quantize &= name.find("time_mix_g2.weight") == std::string::npos;
|
||||||
quantize &= name.find("time_mix_decay_w1.weight") == std::string::npos;
|
quantize &= name.find("time_mix_decay_w1.weight") == std::string::npos;
|
||||||
quantize &= name.find("time_mix_decay_w2.weight") == std::string::npos;
|
quantize &= name.find("time_mix_decay_w2.weight") == std::string::npos;
|
||||||
quantize &= name.find("time_mix_lerp_fused.weight") == std::string::npos;
|
quantize &= name.find("time_mix_lerp_fused.weight") == std::string::npos;
|
||||||
@ -767,7 +783,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
|
|||||||
// do not quantize relative position bias (T5)
|
// do not quantize relative position bias (T5)
|
||||||
quantize &= name.find("attn_rel_b.weight") == std::string::npos;
|
quantize &= name.find("attn_rel_b.weight") == std::string::npos;
|
||||||
|
|
||||||
enum ggml_type new_type;
|
ggml_type new_type;
|
||||||
void * new_data;
|
void * new_data;
|
||||||
size_t new_size;
|
size_t new_size;
|
||||||
|
|
||||||
@ -777,6 +793,19 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
|
|||||||
// get more optimal quantization type based on the tensor shape, layer, etc.
|
// get more optimal quantization type based on the tensor shape, layer, etc.
|
||||||
if (!params->pure && ggml_is_quantized(default_type)) {
|
if (!params->pure && ggml_is_quantized(default_type)) {
|
||||||
new_type = llama_tensor_get_type(qs, new_type, tensor, ftype);
|
new_type = llama_tensor_get_type(qs, new_type, tensor, ftype);
|
||||||
|
// unless the user specifies a type
|
||||||
|
if (params->tensor_types) {
|
||||||
|
const std::vector<tensor_quantization> & tensor_types = *static_cast<const std::vector<tensor_quantization> *>(params->tensor_types);
|
||||||
|
for (const auto & [tname, qtype] : tensor_types) {
|
||||||
|
if (std::regex pattern(tname); std::regex_search(tensor->name, pattern)) {
|
||||||
|
if (qtype != new_type) {
|
||||||
|
LLAMA_LOG_DEBUG("(overriding %s -> %s), ", ggml_type_name(new_type), ggml_type_name(qtype));
|
||||||
|
}
|
||||||
|
new_type = qtype;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (params->token_embedding_type < GGML_TYPE_COUNT && strcmp(tensor->name, "token_embd.weight") == 0) {
|
if (params->token_embedding_type < GGML_TYPE_COUNT && strcmp(tensor->name, "token_embd.weight") == 0) {
|
||||||
new_type = params->token_embedding_type;
|
new_type = params->token_embedding_type;
|
||||||
@ -901,8 +930,8 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
|
|||||||
// interface implementation
|
// interface implementation
|
||||||
//
|
//
|
||||||
|
|
||||||
struct llama_model_quantize_params llama_model_quantize_default_params() {
|
llama_model_quantize_params llama_model_quantize_default_params() {
|
||||||
struct llama_model_quantize_params result = {
|
llama_model_quantize_params result = {
|
||||||
/*.nthread =*/ 0,
|
/*.nthread =*/ 0,
|
||||||
/*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
|
/*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
|
||||||
/*.output_tensor_type =*/ GGML_TYPE_COUNT,
|
/*.output_tensor_type =*/ GGML_TYPE_COUNT,
|
||||||
@ -914,6 +943,7 @@ struct llama_model_quantize_params llama_model_quantize_default_params() {
|
|||||||
/*.keep_split =*/ false,
|
/*.keep_split =*/ false,
|
||||||
/*.imatrix =*/ nullptr,
|
/*.imatrix =*/ nullptr,
|
||||||
/*.kv_overrides =*/ nullptr,
|
/*.kv_overrides =*/ nullptr,
|
||||||
|
/*.tensor_type =*/ nullptr,
|
||||||
};
|
};
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
@ -316,6 +316,13 @@ static uint32_t get_rng_seed(uint32_t seed) {
|
|||||||
|
|
||||||
// llama_sampler API
|
// llama_sampler API
|
||||||
|
|
||||||
|
struct llama_sampler * llama_sampler_init(const struct llama_sampler_i * iface, llama_sampler_context_t ctx) {
|
||||||
|
return new llama_sampler {
|
||||||
|
/* .iface = */ iface,
|
||||||
|
/* .ctx = */ ctx,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
const char * llama_sampler_name(const struct llama_sampler * smpl) {
|
const char * llama_sampler_name(const struct llama_sampler * smpl) {
|
||||||
if (!smpl->iface) {
|
if (!smpl->iface) {
|
||||||
return "(null)";
|
return "(null)";
|
||||||
@ -347,10 +354,10 @@ struct llama_sampler * llama_sampler_clone(const struct llama_sampler * smpl) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (smpl->ctx == nullptr) {
|
if (smpl->ctx == nullptr) {
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ smpl->iface,
|
/* .iface = */ smpl->iface,
|
||||||
/* .ctx = */ nullptr,
|
/* .ctx = */ nullptr
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
GGML_ABORT("the sampler does not support cloning");
|
GGML_ABORT("the sampler does not support cloning");
|
||||||
@ -472,15 +479,15 @@ static struct llama_sampler_i llama_sampler_chain_i = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct llama_sampler * llama_sampler_chain_init(struct llama_sampler_chain_params params) {
|
struct llama_sampler * llama_sampler_chain_init(struct llama_sampler_chain_params params) {
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_chain_i,
|
/* .iface = */ &llama_sampler_chain_i,
|
||||||
/* .ctx = */ new llama_sampler_chain {
|
/* .ctx = */ new llama_sampler_chain {
|
||||||
/* .params = */ params,
|
/* .params = */ params,
|
||||||
/* .samplers = */ {},
|
/* .samplers = */ {},
|
||||||
/* .t_sample_us = */ 0,
|
/* .t_sample_us = */ 0,
|
||||||
/* .n_sample = */ 0,
|
/* .n_sample = */ 0,
|
||||||
},
|
}
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
void llama_sampler_chain_add(struct llama_sampler * chain, struct llama_sampler * smpl) {
|
void llama_sampler_chain_add(struct llama_sampler * chain, struct llama_sampler * smpl) {
|
||||||
@ -546,10 +553,10 @@ static struct llama_sampler_i llama_sampler_greedy_i = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct llama_sampler * llama_sampler_init_greedy() {
|
struct llama_sampler * llama_sampler_init_greedy() {
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_greedy_i,
|
/* .iface = */ &llama_sampler_greedy_i,
|
||||||
/* .ctx = */ nullptr,
|
/* .ctx = */ nullptr
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// dist
|
// dist
|
||||||
@ -608,14 +615,14 @@ static struct llama_sampler_i llama_sampler_dist_i = {
|
|||||||
|
|
||||||
struct llama_sampler * llama_sampler_init_dist(uint32_t seed) {
|
struct llama_sampler * llama_sampler_init_dist(uint32_t seed) {
|
||||||
auto seed_cur = get_rng_seed(seed);
|
auto seed_cur = get_rng_seed(seed);
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_dist_i,
|
/* .iface = */ &llama_sampler_dist_i,
|
||||||
/* .ctx = */ new llama_sampler_dist {
|
/* .ctx = */ new llama_sampler_dist {
|
||||||
/* .seed = */ seed,
|
/* .seed = */ seed,
|
||||||
/* .seed_cur = */ seed_cur,
|
/* .seed_cur = */ seed_cur,
|
||||||
/* .rng = */ std::mt19937(seed_cur),
|
/* .rng = */ std::mt19937(seed_cur),
|
||||||
},
|
}
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// softmax
|
// softmax
|
||||||
@ -638,10 +645,10 @@ static struct llama_sampler_i llama_sampler_softmax_i = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct llama_sampler * llama_sampler_init_softmax() {
|
struct llama_sampler * llama_sampler_init_softmax() {
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_softmax_i,
|
/* .iface = */ &llama_sampler_softmax_i,
|
||||||
/* .ctx = */ nullptr,
|
/* .ctx = */ nullptr
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// top-k
|
// top-k
|
||||||
@ -678,12 +685,12 @@ static struct llama_sampler_i llama_sampler_top_k_i = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct llama_sampler * llama_sampler_init_top_k(int32_t k) {
|
struct llama_sampler * llama_sampler_init_top_k(int32_t k) {
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_top_k_i,
|
/* .iface = */ &llama_sampler_top_k_i,
|
||||||
/* .ctx = */ new llama_sampler_top_k {
|
/* .ctx = */ new llama_sampler_top_k {
|
||||||
/* .k = */ k,
|
/* .k = */ k,
|
||||||
},
|
}
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// top-p
|
// top-p
|
||||||
@ -744,13 +751,13 @@ static struct llama_sampler_i llama_sampler_top_p_i = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct llama_sampler * llama_sampler_init_top_p(float p, size_t min_keep) {
|
struct llama_sampler * llama_sampler_init_top_p(float p, size_t min_keep) {
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_top_p_i,
|
/* .iface = */ &llama_sampler_top_p_i,
|
||||||
/* .ctx = */ new llama_sampler_top_p {
|
/* .ctx = */ new llama_sampler_top_p {
|
||||||
/* .p = */ p,
|
/* .p = */ p,
|
||||||
/* .min_keep = */ min_keep,
|
/* .min_keep = */ min_keep,
|
||||||
},
|
}
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// min-p
|
// min-p
|
||||||
@ -840,13 +847,13 @@ static struct llama_sampler_i llama_sampler_min_p_i = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct llama_sampler * llama_sampler_init_min_p(float p, size_t min_keep) {
|
struct llama_sampler * llama_sampler_init_min_p(float p, size_t min_keep) {
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_min_p_i,
|
/* .iface = */ &llama_sampler_min_p_i,
|
||||||
/* .ctx = */ new llama_sampler_min_p {
|
/* .ctx = */ new llama_sampler_min_p {
|
||||||
/* .p = */ p,
|
/* .p = */ p,
|
||||||
/* .min_keep = */ min_keep,
|
/* .min_keep = */ min_keep,
|
||||||
},
|
}
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// typical
|
// typical
|
||||||
@ -939,13 +946,13 @@ static struct llama_sampler_i llama_sampler_typical_i = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct llama_sampler * llama_sampler_init_typical(float p, size_t min_keep) {
|
struct llama_sampler * llama_sampler_init_typical(float p, size_t min_keep) {
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_typical_i,
|
/* .iface = */ &llama_sampler_typical_i,
|
||||||
/* .ctx = */ new llama_sampler_typical {
|
/* .ctx = */ new llama_sampler_typical {
|
||||||
/* .p = */ p,
|
/* .p = */ p,
|
||||||
/* .min_keep = */ min_keep,
|
/* .min_keep = */ min_keep,
|
||||||
},
|
}
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// temp
|
// temp
|
||||||
@ -983,12 +990,12 @@ static struct llama_sampler_i llama_sampler_temp_i = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct llama_sampler * llama_sampler_init_temp(float temp) {
|
struct llama_sampler * llama_sampler_init_temp(float temp) {
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_temp_i,
|
/* .iface = */ &llama_sampler_temp_i,
|
||||||
/* .ctx = */ new llama_sampler_temp {
|
/* .ctx = */ new llama_sampler_temp {
|
||||||
/*.temp = */ temp,
|
/*.temp = */ temp,
|
||||||
},
|
}
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// temp-ext
|
// temp-ext
|
||||||
@ -1093,14 +1100,14 @@ static struct llama_sampler_i llama_sampler_temp_ext_i = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct llama_sampler * llama_sampler_init_temp_ext(float temp, float delta, float exponent) {
|
struct llama_sampler * llama_sampler_init_temp_ext(float temp, float delta, float exponent) {
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_temp_ext_i,
|
/* .iface = */ &llama_sampler_temp_ext_i,
|
||||||
/* .ctx = */ new llama_sampler_temp_ext {
|
/* .ctx = */ new llama_sampler_temp_ext {
|
||||||
/* .temp = */ temp,
|
/* .temp = */ temp,
|
||||||
/* .delta = */ delta,
|
/* .delta = */ delta,
|
||||||
/* .exponent = */ exponent,
|
/* .exponent = */ exponent,
|
||||||
},
|
}
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// xtc
|
// xtc
|
||||||
@ -1185,7 +1192,7 @@ static struct llama_sampler_i llama_sampler_xtc_i = {
|
|||||||
|
|
||||||
struct llama_sampler * llama_sampler_init_xtc(float p, float t, size_t min_keep, uint32_t seed) {
|
struct llama_sampler * llama_sampler_init_xtc(float p, float t, size_t min_keep, uint32_t seed) {
|
||||||
auto seed_cur = get_rng_seed(seed);
|
auto seed_cur = get_rng_seed(seed);
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_xtc_i,
|
/* .iface = */ &llama_sampler_xtc_i,
|
||||||
/* .ctx = */ new llama_sampler_xtc {
|
/* .ctx = */ new llama_sampler_xtc {
|
||||||
/* .probability = */ p,
|
/* .probability = */ p,
|
||||||
@ -1194,8 +1201,8 @@ struct llama_sampler * llama_sampler_init_xtc(float p, float t, size_t min_keep,
|
|||||||
/* .seed = */ seed,
|
/* .seed = */ seed,
|
||||||
/* .seed_cur = */ seed_cur,
|
/* .seed_cur = */ seed_cur,
|
||||||
/* .rng = */ std::mt19937(seed_cur),
|
/* .rng = */ std::mt19937(seed_cur),
|
||||||
},
|
}
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// mirostat
|
// mirostat
|
||||||
@ -1292,7 +1299,7 @@ static struct llama_sampler_i llama_sampler_mirostat_i = {
|
|||||||
|
|
||||||
struct llama_sampler * llama_sampler_init_mirostat(int32_t n_vocab, uint32_t seed, float tau, float eta, int32_t m) {
|
struct llama_sampler * llama_sampler_init_mirostat(int32_t n_vocab, uint32_t seed, float tau, float eta, int32_t m) {
|
||||||
auto seed_cur = get_rng_seed(seed);
|
auto seed_cur = get_rng_seed(seed);
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_mirostat_i,
|
/* .iface = */ &llama_sampler_mirostat_i,
|
||||||
/* .ctx = */ new llama_sampler_mirostat {
|
/* .ctx = */ new llama_sampler_mirostat {
|
||||||
/* .n_vocab = */ n_vocab,
|
/* .n_vocab = */ n_vocab,
|
||||||
@ -1303,8 +1310,8 @@ struct llama_sampler * llama_sampler_init_mirostat(int32_t n_vocab, uint32_t see
|
|||||||
/* .m = */ m,
|
/* .m = */ m,
|
||||||
/* .mu = */ 2.0f*tau,
|
/* .mu = */ 2.0f*tau,
|
||||||
/* .rng = */ std::mt19937(seed_cur),
|
/* .rng = */ std::mt19937(seed_cur),
|
||||||
},
|
}
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// mirostat v2
|
// mirostat v2
|
||||||
@ -1391,7 +1398,7 @@ static struct llama_sampler_i llama_sampler_mirostat_v2_i = {
|
|||||||
|
|
||||||
struct llama_sampler * llama_sampler_init_mirostat_v2(uint32_t seed, float tau, float eta) {
|
struct llama_sampler * llama_sampler_init_mirostat_v2(uint32_t seed, float tau, float eta) {
|
||||||
auto seed_cur = get_rng_seed(seed);
|
auto seed_cur = get_rng_seed(seed);
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_mirostat_v2_i,
|
/* .iface = */ &llama_sampler_mirostat_v2_i,
|
||||||
/* .ctx = */ new llama_sampler_mirostat_v2 {
|
/* .ctx = */ new llama_sampler_mirostat_v2 {
|
||||||
/* .seed = */ seed,
|
/* .seed = */ seed,
|
||||||
@ -1400,8 +1407,8 @@ struct llama_sampler * llama_sampler_init_mirostat_v2(uint32_t seed, float tau,
|
|||||||
/* .eta = */ eta,
|
/* .eta = */ eta,
|
||||||
/* .mu = */ 2.0f*tau,
|
/* .mu = */ 2.0f*tau,
|
||||||
/* .rng = */ std::mt19937(seed_cur),
|
/* .rng = */ std::mt19937(seed_cur),
|
||||||
},
|
}
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// grammar
|
// grammar
|
||||||
@ -1442,7 +1449,9 @@ static struct llama_sampler * llama_sampler_init_grammar_impl(
|
|||||||
const char ** trigger_words,
|
const char ** trigger_words,
|
||||||
size_t num_trigger_words,
|
size_t num_trigger_words,
|
||||||
const llama_token * trigger_tokens,
|
const llama_token * trigger_tokens,
|
||||||
size_t num_trigger_tokens);
|
size_t num_trigger_tokens,
|
||||||
|
const char ** trigger_patterns,
|
||||||
|
size_t num_trigger_patterns);
|
||||||
|
|
||||||
static void llama_sampler_grammar_reset(struct llama_sampler * smpl) {
|
static void llama_sampler_grammar_reset(struct llama_sampler * smpl) {
|
||||||
auto * ctx = (llama_sampler_grammar *) smpl->ctx;
|
auto * ctx = (llama_sampler_grammar *) smpl->ctx;
|
||||||
@ -1450,12 +1459,14 @@ static void llama_sampler_grammar_reset(struct llama_sampler * smpl) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<const char *> trigger_words;
|
std::vector<const char *> trigger_patterns_c;
|
||||||
for (auto & word : ctx->grammar->trigger_words) {
|
trigger_patterns_c.reserve(ctx->grammar->trigger_patterns.size());
|
||||||
trigger_words.push_back(word.c_str());
|
for (auto & trigger_pattern : ctx->grammar->trigger_patterns) {
|
||||||
|
trigger_patterns_c.push_back(trigger_pattern.pattern.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
auto * grammar_new = llama_grammar_init_impl(ctx->grammar->vocab, ctx->grammar_str.c_str(), ctx->grammar_root.c_str(),
|
auto * grammar_new = llama_grammar_init_impl(ctx->grammar->vocab, ctx->grammar_str.c_str(), ctx->grammar_root.c_str(),
|
||||||
ctx->grammar->lazy, trigger_words.data(), trigger_words.size(),
|
ctx->grammar->lazy, trigger_patterns_c.data(), trigger_patterns_c.size(),
|
||||||
ctx->grammar->trigger_tokens.data(), ctx->grammar->trigger_tokens.size());
|
ctx->grammar->trigger_tokens.data(), ctx->grammar->trigger_tokens.size());
|
||||||
|
|
||||||
llama_grammar_free_impl(ctx->grammar);
|
llama_grammar_free_impl(ctx->grammar);
|
||||||
@ -1465,7 +1476,8 @@ static void llama_sampler_grammar_reset(struct llama_sampler * smpl) {
|
|||||||
static struct llama_sampler * llama_sampler_grammar_clone(const struct llama_sampler * smpl) {
|
static struct llama_sampler * llama_sampler_grammar_clone(const struct llama_sampler * smpl) {
|
||||||
const auto * ctx = (const llama_sampler_grammar *) smpl->ctx;
|
const auto * ctx = (const llama_sampler_grammar *) smpl->ctx;
|
||||||
|
|
||||||
auto * result = llama_sampler_init_grammar_impl(ctx->vocab, nullptr, nullptr, false, nullptr, 0, nullptr, 0);
|
auto * result = llama_sampler_init_grammar_impl(ctx->vocab, nullptr, nullptr, false, nullptr, 0, nullptr, 0, nullptr, 0);
|
||||||
|
GGML_ASSERT(result);
|
||||||
|
|
||||||
// copy the state
|
// copy the state
|
||||||
{
|
{
|
||||||
@ -1509,16 +1521,38 @@ static struct llama_sampler * llama_sampler_init_grammar_impl(
|
|||||||
const char ** trigger_words,
|
const char ** trigger_words,
|
||||||
size_t num_trigger_words,
|
size_t num_trigger_words,
|
||||||
const llama_token * trigger_tokens,
|
const llama_token * trigger_tokens,
|
||||||
size_t num_trigger_tokens) {
|
size_t num_trigger_tokens,
|
||||||
|
const char ** trigger_patterns,
|
||||||
|
size_t num_trigger_patterns) {
|
||||||
auto * ctx = new llama_sampler_grammar;
|
auto * ctx = new llama_sampler_grammar;
|
||||||
|
|
||||||
if (grammar_str != nullptr && grammar_str[0] != '\0') {
|
if (grammar_str != nullptr && grammar_str[0] != '\0') {
|
||||||
|
// TODO: remove trigger_words support.
|
||||||
|
if (trigger_words != nullptr && num_trigger_words > 0) {
|
||||||
|
GGML_ASSERT(trigger_patterns == nullptr && num_trigger_patterns == 0);
|
||||||
|
std::string trigger_pattern("[\\s\\S]*?(");
|
||||||
|
for (size_t i = 0; i < num_trigger_words; ++i) {
|
||||||
|
static const std::regex special_chars("[.^$|()*+?\\[\\]{}\\\\]");
|
||||||
|
if (i > 0) {
|
||||||
|
trigger_pattern += "|";
|
||||||
|
}
|
||||||
|
trigger_pattern += std::regex_replace(trigger_words[i], special_chars, "\\$0");
|
||||||
|
}
|
||||||
|
trigger_pattern += ")[\\s\\S]*";
|
||||||
|
auto trigger_pattern_c = trigger_pattern.c_str();
|
||||||
|
trigger_patterns = &trigger_pattern_c;
|
||||||
|
num_trigger_patterns = 1;
|
||||||
|
}
|
||||||
*ctx = {
|
*ctx = {
|
||||||
/* .vocab = */ vocab,
|
/* .vocab = */ vocab,
|
||||||
/* .grammar_str = */ grammar_str,
|
/* .grammar_str = */ grammar_str,
|
||||||
/* .grammar_root = */ grammar_root,
|
/* .grammar_root = */ grammar_root,
|
||||||
/* .grammar = */ llama_grammar_init_impl(vocab, grammar_str, grammar_root, lazy, trigger_words, num_trigger_words, trigger_tokens, num_trigger_tokens),
|
/* .grammar = */ llama_grammar_init_impl(vocab, grammar_str, grammar_root, lazy, trigger_patterns, num_trigger_patterns, trigger_tokens, num_trigger_tokens),
|
||||||
};
|
};
|
||||||
|
if (!ctx->grammar) {
|
||||||
|
delete ctx;
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
*ctx = {
|
*ctx = {
|
||||||
/* .vocab = */ vocab,
|
/* .vocab = */ vocab,
|
||||||
@ -1528,17 +1562,17 @@ static struct llama_sampler * llama_sampler_init_grammar_impl(
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_grammar_i,
|
/* .iface = */ &llama_sampler_grammar_i,
|
||||||
/* .ctx = */ ctx,
|
/* .ctx = */ ctx
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct llama_sampler * llama_sampler_init_grammar(
|
struct llama_sampler * llama_sampler_init_grammar(
|
||||||
const struct llama_vocab * vocab,
|
const struct llama_vocab * vocab,
|
||||||
const char * grammar_str,
|
const char * grammar_str,
|
||||||
const char * grammar_root) {
|
const char * grammar_root) {
|
||||||
return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, /* lazy= */ false, nullptr, 0, nullptr, 0);
|
return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, /* lazy= */ false, nullptr, 0, nullptr, 0, nullptr, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct llama_sampler * llama_sampler_init_grammar_lazy(
|
struct llama_sampler * llama_sampler_init_grammar_lazy(
|
||||||
@ -1549,7 +1583,18 @@ struct llama_sampler * llama_sampler_init_grammar_lazy(
|
|||||||
size_t num_trigger_words,
|
size_t num_trigger_words,
|
||||||
const llama_token * trigger_tokens,
|
const llama_token * trigger_tokens,
|
||||||
size_t num_trigger_tokens) {
|
size_t num_trigger_tokens) {
|
||||||
return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, /* lazy= */ true, trigger_words, num_trigger_words, trigger_tokens, num_trigger_tokens);
|
return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, /* lazy= */ true, trigger_words, num_trigger_words, trigger_tokens, num_trigger_tokens, nullptr, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct llama_sampler * llama_sampler_init_grammar_lazy_patterns(
|
||||||
|
const struct llama_vocab * vocab,
|
||||||
|
const char * grammar_str,
|
||||||
|
const char * grammar_root,
|
||||||
|
const char ** trigger_patterns,
|
||||||
|
size_t num_trigger_patterns,
|
||||||
|
const llama_token * trigger_tokens,
|
||||||
|
size_t num_trigger_tokens) {
|
||||||
|
return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, /* lazy= */ true, nullptr, 0, trigger_tokens, num_trigger_tokens, trigger_patterns, num_trigger_patterns);
|
||||||
}
|
}
|
||||||
|
|
||||||
// penalties
|
// penalties
|
||||||
@ -1678,7 +1723,7 @@ struct llama_sampler * llama_sampler_init_penalties(
|
|||||||
float penalty_present) {
|
float penalty_present) {
|
||||||
penalty_last_n = std::max(penalty_last_n, 0);
|
penalty_last_n = std::max(penalty_last_n, 0);
|
||||||
|
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_penalties_i,
|
/* .iface = */ &llama_sampler_penalties_i,
|
||||||
/* .ctx = */ new llama_sampler_penalties {
|
/* .ctx = */ new llama_sampler_penalties {
|
||||||
/* .penalty_last_n = */ penalty_last_n,
|
/* .penalty_last_n = */ penalty_last_n,
|
||||||
@ -1687,8 +1732,75 @@ struct llama_sampler * llama_sampler_init_penalties(
|
|||||||
/* .penalty_present = */ penalty_present,
|
/* .penalty_present = */ penalty_present,
|
||||||
/* .prev = */ ring_buffer<llama_token>(penalty_last_n),
|
/* .prev = */ ring_buffer<llama_token>(penalty_last_n),
|
||||||
/* .token_count = */ {},
|
/* .token_count = */ {},
|
||||||
},
|
}
|
||||||
};
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// top-n-sigma
|
||||||
|
|
||||||
|
struct llama_sampler_top_n_sigma {
|
||||||
|
const float n;
|
||||||
|
};
|
||||||
|
|
||||||
|
static const char * llama_sampler_top_n_sigma_name(const struct llama_sampler * /*smpl*/) {
|
||||||
|
return "top-n-sigma";
|
||||||
|
}
|
||||||
|
|
||||||
|
static void llama_sampler_top_n_sigma_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
|
||||||
|
const auto * ctx = (llama_sampler_top_n_sigma *) smpl->ctx;
|
||||||
|
|
||||||
|
// find max logit and calculate mean
|
||||||
|
float max = cur_p->data[0].logit;
|
||||||
|
float logits_sum = 0;
|
||||||
|
for (size_t i = 0; i < cur_p->size; ++i) {
|
||||||
|
if (cur_p->data[i].logit > max) {
|
||||||
|
max = cur_p->data[i].logit;
|
||||||
|
}
|
||||||
|
logits_sum += cur_p->data[i].logit;
|
||||||
|
}
|
||||||
|
float mean = logits_sum/cur_p->size;
|
||||||
|
|
||||||
|
// calculate standard deviation
|
||||||
|
float acc = 0;
|
||||||
|
for (size_t i = 0; i < cur_p->size; ++i) {
|
||||||
|
acc += pow(cur_p->data[i].logit - mean, 2);
|
||||||
|
}
|
||||||
|
float std = sqrt(acc/cur_p->size);
|
||||||
|
|
||||||
|
//apply mask
|
||||||
|
for (size_t i = 0; i < cur_p->size; ++i) {
|
||||||
|
if (cur_p->data[i].logit < max - (ctx->n * std)) {
|
||||||
|
cur_p->data[i].logit = -INFINITY;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
llama_sampler_softmax_impl(cur_p);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct llama_sampler * llama_sampler_top_n_sigma_clone(const struct llama_sampler * smpl) {
|
||||||
|
const auto * ctx = (const llama_sampler_top_n_sigma *) smpl->ctx;
|
||||||
|
return llama_sampler_init_top_n_sigma(ctx->n);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void llama_sampler_top_n_sigma_free(struct llama_sampler * smpl) {
|
||||||
|
delete (llama_sampler_top_n_sigma *) smpl->ctx;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct llama_sampler_i llama_sampler_top_n_sigma_i = {
|
||||||
|
/* .name = */ llama_sampler_top_n_sigma_name,
|
||||||
|
/* .accept = */ nullptr,
|
||||||
|
/* .apply = */ llama_sampler_top_n_sigma_apply,
|
||||||
|
/* .reset = */ nullptr,
|
||||||
|
/* .clone = */ llama_sampler_top_n_sigma_clone,
|
||||||
|
/* .free = */ llama_sampler_top_n_sigma_free,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct llama_sampler * llama_sampler_init_top_n_sigma(float n) {
|
||||||
|
return llama_sampler_init(
|
||||||
|
/* .iface = */ &llama_sampler_top_n_sigma_i,
|
||||||
|
/* .ctx = */ new llama_sampler_top_n_sigma {
|
||||||
|
/* .n = */ n,
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// DRY
|
// DRY
|
||||||
@ -2041,7 +2153,7 @@ struct llama_sampler * llama_sampler_init_dry(const struct llama_vocab * vocab,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_dry_i,
|
/* .iface = */ &llama_sampler_dry_i,
|
||||||
/* .ctx = */ new llama_sampler_dry {
|
/* .ctx = */ new llama_sampler_dry {
|
||||||
/* .total_context_size = */ context_size,
|
/* .total_context_size = */ context_size,
|
||||||
@ -2053,8 +2165,8 @@ struct llama_sampler * llama_sampler_init_dry(const struct llama_vocab * vocab,
|
|||||||
/* .dry_repeat_count = */ dry_enabled ? std::vector<int>(effective_dry_penalty_last_n, 0) : std::vector<int>{},
|
/* .dry_repeat_count = */ dry_enabled ? std::vector<int>(effective_dry_penalty_last_n, 0) : std::vector<int>{},
|
||||||
/* .dry_max_token_repeat = */ {},
|
/* .dry_max_token_repeat = */ {},
|
||||||
/* .last_tokens = */ dry_enabled ? ring_buffer<llama_token>(effective_dry_penalty_last_n) : ring_buffer<llama_token>(0),
|
/* .last_tokens = */ dry_enabled ? ring_buffer<llama_token>(effective_dry_penalty_last_n) : ring_buffer<llama_token>(0),
|
||||||
},
|
}
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// wrapper for test-sampling.cpp
|
// wrapper for test-sampling.cpp
|
||||||
@ -2155,14 +2267,14 @@ struct llama_sampler * llama_sampler_init_logit_bias(
|
|||||||
int32_t n_vocab,
|
int32_t n_vocab,
|
||||||
int32_t n_logit_bias,
|
int32_t n_logit_bias,
|
||||||
const llama_logit_bias * logit_bias) {
|
const llama_logit_bias * logit_bias) {
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_logit_bias_i,
|
/* .iface = */ &llama_sampler_logit_bias_i,
|
||||||
/* .ctx = */ new llama_sampler_logit_bias {
|
/* .ctx = */ new llama_sampler_logit_bias {
|
||||||
/* .n_vocab = */ n_vocab,
|
/* .n_vocab = */ n_vocab,
|
||||||
/* .logit_bias = */ std::vector<llama_logit_bias>(logit_bias, logit_bias + n_logit_bias),
|
/* .logit_bias = */ std::vector<llama_logit_bias>(logit_bias, logit_bias + n_logit_bias),
|
||||||
/* .to_search = */ {},
|
/* .to_search = */ {},
|
||||||
},
|
}
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// infill
|
// infill
|
||||||
@ -2377,14 +2489,14 @@ static struct llama_sampler_i llama_sampler_infill_i = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct llama_sampler * llama_sampler_init_infill(const struct llama_vocab * vocab) {
|
struct llama_sampler * llama_sampler_init_infill(const struct llama_vocab * vocab) {
|
||||||
return new llama_sampler {
|
return llama_sampler_init(
|
||||||
/* .iface = */ &llama_sampler_infill_i,
|
/* .iface = */ &llama_sampler_infill_i,
|
||||||
/* .ctx = */ new llama_sampler_infill {
|
/* .ctx = */ new llama_sampler_infill {
|
||||||
/* .vocab = */ vocab,
|
/* .vocab = */ vocab,
|
||||||
/* .buf0 = */ std::vector<char>(512),
|
/* .buf0 = */ std::vector<char>(512),
|
||||||
/* .buf1 = */ std::vector<char>(512),
|
/* .buf1 = */ std::vector<char>(512),
|
||||||
},
|
}
|
||||||
};
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// utils
|
// utils
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
#include <queue>
|
#include <queue>
|
||||||
#include <set>
|
#include <set>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
#include <cctype>
|
||||||
|
|
||||||
//
|
//
|
||||||
// helpers
|
// helpers
|
||||||
@ -341,6 +342,7 @@ struct llm_tokenizer_bpe : llm_tokenizer {
|
|||||||
case LLAMA_VOCAB_PRE_TYPE_MPT:
|
case LLAMA_VOCAB_PRE_TYPE_MPT:
|
||||||
case LLAMA_VOCAB_PRE_TYPE_OLMO:
|
case LLAMA_VOCAB_PRE_TYPE_OLMO:
|
||||||
case LLAMA_VOCAB_PRE_TYPE_JAIS:
|
case LLAMA_VOCAB_PRE_TYPE_JAIS:
|
||||||
|
case LLAMA_VOCAB_PRE_TYPE_TRILLION:
|
||||||
regex_exprs = {
|
regex_exprs = {
|
||||||
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
|
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
|
||||||
};
|
};
|
||||||
@ -392,6 +394,27 @@ struct llm_tokenizer_bpe : llm_tokenizer {
|
|||||||
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
|
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",
|
||||||
};
|
};
|
||||||
break;
|
break;
|
||||||
|
case LLAMA_VOCAB_PRE_TYPE_GPT4O:
|
||||||
|
regex_exprs = {
|
||||||
|
// original regex from tokenizer.json
|
||||||
|
// "[^\\r\\n\\p{L}\\p{N}]?[\\p{Lu}\\p{Lt}\\p{Lm}\\p{Lo}\\p{M}]*[\\p{Ll}\\p{Lm}\\p{Lo}\\p{M}]+(?i:'s|'t|'re|'ve|'m|'ll|'d)?|[^\\r\\n\\p{L}\\p{N}]?[\\p{Lu}\\p{Lt}\\p{Lm}\\p{Lo}\\p{M}]+[\\p{Ll}\\p{Lm}\\p{Lo}\\p{M}]*(?i:'s|'t|'re|'ve|'m|'ll|'d)?|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
|
||||||
|
"[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))*((?=[\\p{L}])([^A-Z]))+(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])?|[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))+((?=[\\p{L}])([^A-Z]))*(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])?|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
|
||||||
|
};
|
||||||
|
break;
|
||||||
|
case LLAMA_VOCAB_PRE_TYPE_SUPERBPE:
|
||||||
|
regex_exprs = {
|
||||||
|
"\\p{N}+",
|
||||||
|
"(?=(\\d{3})+(?!\\d))",
|
||||||
|
};
|
||||||
|
break;
|
||||||
|
case LLAMA_VOCAB_PRE_TYPE_BAILINGMOE:
|
||||||
|
regex_exprs = {
|
||||||
|
// original regex from tokenizer.json
|
||||||
|
// "'(?i:[sdmt]|ll|ve|re)|[^\\r\\n\\p{L}\\p{N}]?+\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]++[\\r\\n]*|\\s*[\\r\\n]|\\s+(?!\\S)|\\s+"
|
||||||
|
// FIXME? Changed possessive quantifiers (?+ and ++) to greedy to avoid errors and imatrix hanging (tried atomic grouping but it's not supported?)
|
||||||
|
"'(?:[sSdDmMtT]|[lL][lL]|[vV][eE]|[rR][eE])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]|\\s+(?!\\S)|\\s+",
|
||||||
|
};
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
// default regex for BPE tokenization pre-processing
|
// default regex for BPE tokenization pre-processing
|
||||||
regex_exprs = {
|
regex_exprs = {
|
||||||
@ -1483,7 +1506,8 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|||||||
tokenizer_pre == "llama3" ||
|
tokenizer_pre == "llama3" ||
|
||||||
tokenizer_pre == "llama-v3" ||
|
tokenizer_pre == "llama-v3" ||
|
||||||
tokenizer_pre == "llama-bpe"||
|
tokenizer_pre == "llama-bpe"||
|
||||||
tokenizer_pre == "falcon3") {
|
tokenizer_pre == "falcon3" ||
|
||||||
|
tokenizer_pre == "pixtral") {
|
||||||
pre_type = LLAMA_VOCAB_PRE_TYPE_LLAMA3;
|
pre_type = LLAMA_VOCAB_PRE_TYPE_LLAMA3;
|
||||||
ignore_merges = true;
|
ignore_merges = true;
|
||||||
add_bos = true;
|
add_bos = true;
|
||||||
@ -1549,6 +1573,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|||||||
pre_type = LLAMA_VOCAB_PRE_TYPE_PORO;
|
pre_type = LLAMA_VOCAB_PRE_TYPE_PORO;
|
||||||
clean_spaces = false;
|
clean_spaces = false;
|
||||||
} else if (
|
} else if (
|
||||||
|
tokenizer_pre == "glm4" ||
|
||||||
tokenizer_pre == "chatglm-bpe") {
|
tokenizer_pre == "chatglm-bpe") {
|
||||||
pre_type = LLAMA_VOCAB_PRE_TYPE_CHATGLM4;
|
pre_type = LLAMA_VOCAB_PRE_TYPE_CHATGLM4;
|
||||||
special_bos_id = LLAMA_TOKEN_NULL;
|
special_bos_id = LLAMA_TOKEN_NULL;
|
||||||
@ -1592,6 +1617,23 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|||||||
} else if (
|
} else if (
|
||||||
tokenizer_pre == "megrez") {
|
tokenizer_pre == "megrez") {
|
||||||
pre_type = LLAMA_VOCAB_PRE_TYPE_QWEN2;
|
pre_type = LLAMA_VOCAB_PRE_TYPE_QWEN2;
|
||||||
|
} else if (
|
||||||
|
tokenizer_pre == "gpt-4o" ||
|
||||||
|
tokenizer_pre == "llama4") {
|
||||||
|
pre_type = LLAMA_VOCAB_PRE_TYPE_GPT4O;
|
||||||
|
clean_spaces = false;
|
||||||
|
} else if (
|
||||||
|
tokenizer_pre == "superbpe") {
|
||||||
|
pre_type = LLAMA_VOCAB_PRE_TYPE_SUPERBPE;
|
||||||
|
clean_spaces = false;
|
||||||
|
} else if (
|
||||||
|
tokenizer_pre == "trillion") {
|
||||||
|
pre_type = LLAMA_VOCAB_PRE_TYPE_TRILLION;
|
||||||
|
clean_spaces = false;
|
||||||
|
} else if (
|
||||||
|
tokenizer_pre == "bailingmoe") {
|
||||||
|
pre_type = LLAMA_VOCAB_PRE_TYPE_BAILINGMOE;
|
||||||
|
clean_spaces = false;
|
||||||
} else {
|
} else {
|
||||||
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
|
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
|
||||||
}
|
}
|
||||||
@ -1769,6 +1811,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|||||||
|| t.first == "<end_of_turn>"
|
|| t.first == "<end_of_turn>"
|
||||||
|| t.first == "<|endoftext|>"
|
|| t.first == "<|endoftext|>"
|
||||||
|| t.first == "<EOT>"
|
|| t.first == "<EOT>"
|
||||||
|
|| t.first == "_<EOT>"
|
||||||
|| t.first == "<|end▁of▁sentence|>" // DeepSeek
|
|| t.first == "<|end▁of▁sentence|>" // DeepSeek
|
||||||
) {
|
) {
|
||||||
special_eot_id = t.second;
|
special_eot_id = t.second;
|
||||||
@ -1799,8 +1842,10 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|||||||
if (false
|
if (false
|
||||||
|| t.first == "<|fim_prefix|>" // Qwen
|
|| t.first == "<|fim_prefix|>" // Qwen
|
||||||
|| t.first == "<fim-prefix>"
|
|| t.first == "<fim-prefix>"
|
||||||
|
|| t.first == "<fim_prefix>" // Granite
|
||||||
|| t.first == "<|fim▁begin|>" // DeepSeek
|
|| t.first == "<|fim▁begin|>" // DeepSeek
|
||||||
|| t.first == "<PRE>"
|
|| t.first == "<PRE>"
|
||||||
|
|| t.first == "▁<PRE>" // CodeLlama
|
||||||
) {
|
) {
|
||||||
special_fim_pre_id = t.second;
|
special_fim_pre_id = t.second;
|
||||||
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
||||||
@ -1816,8 +1861,10 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|||||||
if (false
|
if (false
|
||||||
|| t.first == "<|fim_suffix|>" // Qwen
|
|| t.first == "<|fim_suffix|>" // Qwen
|
||||||
|| t.first == "<fim-suffix>"
|
|| t.first == "<fim-suffix>"
|
||||||
|
|| t.first == "<fim_suffix>" // Granite
|
||||||
|| t.first == "<|fim▁hole|>" // DeepSeek
|
|| t.first == "<|fim▁hole|>" // DeepSeek
|
||||||
|| t.first == "<SUF>"
|
|| t.first == "<SUF>"
|
||||||
|
|| t.first == "▁<SUF>" // CodeLlama
|
||||||
) {
|
) {
|
||||||
special_fim_suf_id = t.second;
|
special_fim_suf_id = t.second;
|
||||||
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
||||||
@ -1833,8 +1880,10 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|||||||
if (false
|
if (false
|
||||||
|| t.first == "<|fim_middle|>" // Qwen
|
|| t.first == "<|fim_middle|>" // Qwen
|
||||||
|| t.first == "<fim-middle>"
|
|| t.first == "<fim-middle>"
|
||||||
|
|| t.first == "<fim_middle>" // Granite
|
||||||
|| t.first == "<|fim▁end|>" // DeepSeek
|
|| t.first == "<|fim▁end|>" // DeepSeek
|
||||||
|| t.first == "<MID>"
|
|| t.first == "<MID>"
|
||||||
|
|| t.first == "▁<MID>" // CodeLlama
|
||||||
) {
|
) {
|
||||||
special_fim_mid_id = t.second;
|
special_fim_mid_id = t.second;
|
||||||
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
||||||
@ -1850,6 +1899,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|||||||
if (false
|
if (false
|
||||||
|| t.first == "<|fim_pad|>" // Qwen
|
|| t.first == "<|fim_pad|>" // Qwen
|
||||||
|| t.first == "<fim-pad>"
|
|| t.first == "<fim-pad>"
|
||||||
|
|| t.first == "<fim_pad>" // Granite
|
||||||
|| t.first == "<PAD>"
|
|| t.first == "<PAD>"
|
||||||
) {
|
) {
|
||||||
special_fim_pad_id = t.second;
|
special_fim_pad_id = t.second;
|
||||||
@ -1868,6 +1918,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|||||||
|| t.first == "<|repo_name|>"
|
|| t.first == "<|repo_name|>"
|
||||||
|| t.first == "<fim-repo>"
|
|| t.first == "<fim-repo>"
|
||||||
|| t.first == "<REPO>"
|
|| t.first == "<REPO>"
|
||||||
|
|| t.first == "<reponame>" // Granite
|
||||||
) {
|
) {
|
||||||
special_fim_rep_id = t.second;
|
special_fim_rep_id = t.second;
|
||||||
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
||||||
@ -1919,6 +1970,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|||||||
|| t.first == "<|endoftext|>"
|
|| t.first == "<|endoftext|>"
|
||||||
|| t.first == "<|eom_id|>"
|
|| t.first == "<|eom_id|>"
|
||||||
|| t.first == "<EOT>"
|
|| t.first == "<EOT>"
|
||||||
|
|| t.first == "_<EOT>"
|
||||||
) {
|
) {
|
||||||
special_eog_ids.insert(t.second);
|
special_eog_ids.insert(t.second);
|
||||||
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
|
||||||
@ -2177,14 +2229,12 @@ void llama_vocab::impl::tokenizer_st_partition(std::forward_list<fragment_buffer
|
|||||||
// find the first occurrence of a given special token in this fragment
|
// find the first occurrence of a given special token in this fragment
|
||||||
// passing offset argument only limit the "search area" but match coordinates
|
// passing offset argument only limit the "search area" but match coordinates
|
||||||
// are still relative to the source full raw_text
|
// are still relative to the source full raw_text
|
||||||
auto match = raw_text.find(text, raw_text_base_offset);
|
// string_view begins at pos 0 for the same reason
|
||||||
|
auto match = std::string_view(raw_text.data(), raw_text_base_offset + raw_text_base_length).find(text, raw_text_base_offset);
|
||||||
|
|
||||||
// no occurrences found, stop processing this fragment for a given special token
|
// no occurrences found, stop processing this fragment for a given special token
|
||||||
if (match == std::string::npos) break;
|
if (match == std::string::npos) break;
|
||||||
|
|
||||||
// check if match is within bounds of offset <-> length
|
|
||||||
if (match + text.length() > raw_text_base_offset + raw_text_base_length) break;
|
|
||||||
|
|
||||||
#ifdef PRETOKENIZERDEBUG
|
#ifdef PRETOKENIZERDEBUG
|
||||||
LLAMA_LOG_WARN("FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
|
LLAMA_LOG_WARN("FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
|
||||||
#endif
|
#endif
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -60,6 +60,7 @@ extern "C" {
|
|||||||
struct llama_model;
|
struct llama_model;
|
||||||
struct llama_context;
|
struct llama_context;
|
||||||
struct llama_sampler;
|
struct llama_sampler;
|
||||||
|
struct llama_kv_cache;
|
||||||
|
|
||||||
typedef int32_t llama_pos;
|
typedef int32_t llama_pos;
|
||||||
typedef int32_t llama_token;
|
typedef int32_t llama_token;
|
||||||
@ -105,6 +106,12 @@ extern "C" {
|
|||||||
LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26,
|
LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26,
|
||||||
LLAMA_VOCAB_PRE_TYPE_MINERVA = 27,
|
LLAMA_VOCAB_PRE_TYPE_MINERVA = 27,
|
||||||
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28,
|
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28,
|
||||||
|
LLAMA_VOCAB_PRE_TYPE_GPT4O = 29,
|
||||||
|
LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30,
|
||||||
|
LLAMA_VOCAB_PRE_TYPE_TRILLION = 31,
|
||||||
|
LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32,
|
||||||
|
LLAMA_VOCAB_PRE_TYPE_LLAMA4 = 33,
|
||||||
|
LLAMA_VOCAB_PRE_TYPE_PIXTRAL = 34,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum llama_rope_type {
|
enum llama_rope_type {
|
||||||
@ -213,7 +220,7 @@ extern "C" {
|
|||||||
LLAMA_SPLIT_MODE_ROW = 2, // split layers and KV across GPUs, use tensor parallelism if supported
|
LLAMA_SPLIT_MODE_ROW = 2, // split layers and KV across GPUs, use tensor parallelism if supported
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO: simplify (https://github.com/ggerganov/llama.cpp/pull/9294#pullrequestreview-2286561979)
|
// TODO: simplify (https://github.com/ggml-org/llama.cpp/pull/9294#pullrequestreview-2286561979)
|
||||||
typedef struct llama_token_data {
|
typedef struct llama_token_data {
|
||||||
llama_token id; // token id
|
llama_token id; // token id
|
||||||
float logit; // log-odds of the token
|
float logit; // log-odds of the token
|
||||||
@ -275,10 +282,18 @@ extern "C" {
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct llama_model_tensor_buft_override {
|
||||||
|
const char * pattern;
|
||||||
|
ggml_backend_buffer_type_t buft;
|
||||||
|
};
|
||||||
|
|
||||||
struct llama_model_params {
|
struct llama_model_params {
|
||||||
// NULL-terminated list of devices to use for offloading (if NULL, all available devices are used)
|
// NULL-terminated list of devices to use for offloading (if NULL, all available devices are used)
|
||||||
ggml_backend_dev_t * devices;
|
ggml_backend_dev_t * devices;
|
||||||
|
|
||||||
|
// NULL-terminated list of buffer types to use for tensors that match a pattern
|
||||||
|
const struct llama_model_tensor_buft_override * tensor_buft_overrides;
|
||||||
|
|
||||||
int32_t n_gpu_layers; // number of layers to store in VRAM
|
int32_t n_gpu_layers; // number of layers to store in VRAM
|
||||||
enum llama_split_mode split_mode; // how to split the model across multiple GPUs
|
enum llama_split_mode split_mode; // how to split the model across multiple GPUs
|
||||||
|
|
||||||
@ -307,7 +322,7 @@ extern "C" {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations
|
// NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations
|
||||||
// https://github.com/ggerganov/llama.cpp/pull/7544
|
// https://github.com/ggml-org/llama.cpp/pull/7544
|
||||||
struct llama_context_params {
|
struct llama_context_params {
|
||||||
uint32_t n_ctx; // text context, 0 = from model
|
uint32_t n_ctx; // text context, 0 = from model
|
||||||
uint32_t n_batch; // logical maximum batch size that can be submitted to llama_decode
|
uint32_t n_batch; // logical maximum batch size that can be submitted to llama_decode
|
||||||
@ -320,7 +335,7 @@ extern "C" {
|
|||||||
enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id
|
enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id
|
||||||
enum llama_attention_type attention_type; // attention type to use for embeddings
|
enum llama_attention_type attention_type; // attention type to use for embeddings
|
||||||
|
|
||||||
// ref: https://github.com/ggerganov/llama.cpp/pull/2054
|
// ref: https://github.com/ggml-org/llama.cpp/pull/2054
|
||||||
float rope_freq_base; // RoPE base frequency, 0 = from model
|
float rope_freq_base; // RoPE base frequency, 0 = from model
|
||||||
float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
|
float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
|
||||||
float yarn_ext_factor; // YaRN extrapolation mix factor, negative = from model
|
float yarn_ext_factor; // YaRN extrapolation mix factor, negative = from model
|
||||||
@ -364,6 +379,7 @@ extern "C" {
|
|||||||
bool keep_split; // quantize to the same number of shards
|
bool keep_split; // quantize to the same number of shards
|
||||||
void * imatrix; // pointer to importance matrix data
|
void * imatrix; // pointer to importance matrix data
|
||||||
void * kv_overrides; // pointer to vector containing overrides
|
void * kv_overrides; // pointer to vector containing overrides
|
||||||
|
void * tensor_types; // pointer to vector containing tensor types
|
||||||
} llama_model_quantize_params;
|
} llama_model_quantize_params;
|
||||||
|
|
||||||
typedef struct llama_logit_bias {
|
typedef struct llama_logit_bias {
|
||||||
@ -385,7 +401,7 @@ extern "C" {
|
|||||||
struct llama_adapter_lora;
|
struct llama_adapter_lora;
|
||||||
|
|
||||||
// Helpers for getting default parameters
|
// Helpers for getting default parameters
|
||||||
// TODO: update API to start accepting pointers to params structs (https://github.com/ggerganov/llama.cpp/discussions/9172)
|
// TODO: update API to start accepting pointers to params structs (https://github.com/ggml-org/llama.cpp/discussions/9172)
|
||||||
LLAMA_API struct llama_model_params llama_model_default_params(void);
|
LLAMA_API struct llama_model_params llama_model_default_params(void);
|
||||||
LLAMA_API struct llama_context_params llama_context_default_params(void);
|
LLAMA_API struct llama_context_params llama_context_default_params(void);
|
||||||
LLAMA_API struct llama_sampler_chain_params llama_sampler_chain_default_params(void);
|
LLAMA_API struct llama_sampler_chain_params llama_sampler_chain_default_params(void);
|
||||||
@ -468,7 +484,8 @@ extern "C" {
|
|||||||
DEPRECATED(LLAMA_API int32_t llama_n_vocab (const struct llama_vocab * vocab), "use llama_vocab_n_tokens instead");
|
DEPRECATED(LLAMA_API int32_t llama_n_vocab (const struct llama_vocab * vocab), "use llama_vocab_n_tokens instead");
|
||||||
|
|
||||||
LLAMA_API const struct llama_model * llama_get_model (const struct llama_context * ctx);
|
LLAMA_API const struct llama_model * llama_get_model (const struct llama_context * ctx);
|
||||||
LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx);
|
LLAMA_API struct llama_kv_cache * llama_get_kv_self ( struct llama_context * ctx);
|
||||||
|
LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); // TODO: rename to llama_get_pooling_type
|
||||||
|
|
||||||
LLAMA_API const struct llama_vocab * llama_model_get_vocab(const struct llama_model * model);
|
LLAMA_API const struct llama_vocab * llama_model_get_vocab(const struct llama_model * model);
|
||||||
LLAMA_API enum llama_rope_type llama_model_rope_type(const struct llama_model * model);
|
LLAMA_API enum llama_rope_type llama_model_rope_type(const struct llama_model * model);
|
||||||
@ -477,6 +494,7 @@ extern "C" {
|
|||||||
LLAMA_API int32_t llama_model_n_embd (const struct llama_model * model);
|
LLAMA_API int32_t llama_model_n_embd (const struct llama_model * model);
|
||||||
LLAMA_API int32_t llama_model_n_layer (const struct llama_model * model);
|
LLAMA_API int32_t llama_model_n_layer (const struct llama_model * model);
|
||||||
LLAMA_API int32_t llama_model_n_head (const struct llama_model * model);
|
LLAMA_API int32_t llama_model_n_head (const struct llama_model * model);
|
||||||
|
LLAMA_API int32_t llama_model_n_head_kv (const struct llama_model * model);
|
||||||
|
|
||||||
// Get the model's RoPE frequency scaling factor
|
// Get the model's RoPE frequency scaling factor
|
||||||
LLAMA_API float llama_model_rope_freq_scale_train(const struct llama_model * model);
|
LLAMA_API float llama_model_rope_freq_scale_train(const struct llama_model * model);
|
||||||
@ -584,7 +602,7 @@ extern "C" {
|
|||||||
// KV cache
|
// KV cache
|
||||||
//
|
//
|
||||||
|
|
||||||
// TODO: remove llama_kv_cache_view_* API
|
// TODO: start using struct llama_kv_cache
|
||||||
|
|
||||||
// Information associated with an individual cell in the KV cache view.
|
// Information associated with an individual cell in the KV cache view.
|
||||||
struct llama_kv_cache_view_cell {
|
struct llama_kv_cache_view_cell {
|
||||||
@ -639,13 +657,19 @@ extern "C" {
|
|||||||
|
|
||||||
// Returns the number of tokens in the KV cache (slow, use only for debug)
|
// Returns the number of tokens in the KV cache (slow, use only for debug)
|
||||||
// If a KV cell has multiple sequences assigned to it, it will be counted multiple times
|
// If a KV cell has multiple sequences assigned to it, it will be counted multiple times
|
||||||
LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx);
|
LLAMA_API int32_t llama_kv_self_n_tokens(const struct llama_context * ctx);
|
||||||
|
|
||||||
|
DEPRECATED(LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx),
|
||||||
|
"use llama_kv_self_n_tokens instead");
|
||||||
|
|
||||||
// Returns the number of used KV cells (i.e. have at least one sequence assigned to them)
|
// Returns the number of used KV cells (i.e. have at least one sequence assigned to them)
|
||||||
LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx);
|
LLAMA_API int32_t llama_kv_self_used_cells(const struct llama_context * ctx);
|
||||||
|
|
||||||
|
DEPRECATED(LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx),
|
||||||
|
"use llama_kv_self_used_cells instead");
|
||||||
|
|
||||||
// Clear the KV cache - both cell info is erased and KV data is zeroed
|
// Clear the KV cache - both cell info is erased and KV data is zeroed
|
||||||
LLAMA_API void llama_kv_cache_clear(
|
LLAMA_API void llama_kv_self_clear(
|
||||||
struct llama_context * ctx);
|
struct llama_context * ctx);
|
||||||
|
|
||||||
// Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
|
// Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
|
||||||
@ -653,7 +677,7 @@ extern "C" {
|
|||||||
// seq_id < 0 : match any sequence
|
// seq_id < 0 : match any sequence
|
||||||
// p0 < 0 : [0, p1]
|
// p0 < 0 : [0, p1]
|
||||||
// p1 < 0 : [p0, inf)
|
// p1 < 0 : [p0, inf)
|
||||||
LLAMA_API bool llama_kv_cache_seq_rm(
|
LLAMA_API bool llama_kv_self_seq_rm(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
llama_seq_id seq_id,
|
llama_seq_id seq_id,
|
||||||
llama_pos p0,
|
llama_pos p0,
|
||||||
@ -663,7 +687,7 @@ extern "C" {
|
|||||||
// Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
|
// Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
|
||||||
// p0 < 0 : [0, p1]
|
// p0 < 0 : [0, p1]
|
||||||
// p1 < 0 : [p0, inf)
|
// p1 < 0 : [p0, inf)
|
||||||
LLAMA_API void llama_kv_cache_seq_cp(
|
LLAMA_API void llama_kv_self_seq_cp(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
llama_seq_id seq_id_src,
|
llama_seq_id seq_id_src,
|
||||||
llama_seq_id seq_id_dst,
|
llama_seq_id seq_id_dst,
|
||||||
@ -671,17 +695,17 @@ extern "C" {
|
|||||||
llama_pos p1);
|
llama_pos p1);
|
||||||
|
|
||||||
// Removes all tokens that do not belong to the specified sequence
|
// Removes all tokens that do not belong to the specified sequence
|
||||||
LLAMA_API void llama_kv_cache_seq_keep(
|
LLAMA_API void llama_kv_self_seq_keep(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
llama_seq_id seq_id);
|
llama_seq_id seq_id);
|
||||||
|
|
||||||
// Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
|
// Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
|
||||||
// If the KV cache is RoPEd, the KV data is updated accordingly:
|
// If the KV cache is RoPEd, the KV data is updated accordingly:
|
||||||
// - lazily on next llama_decode()
|
// - lazily on next llama_decode()
|
||||||
// - explicitly with llama_kv_cache_update()
|
// - explicitly with llama_kv_self_update()
|
||||||
// p0 < 0 : [0, p1]
|
// p0 < 0 : [0, p1]
|
||||||
// p1 < 0 : [p0, inf)
|
// p1 < 0 : [p0, inf)
|
||||||
LLAMA_API void llama_kv_cache_seq_add(
|
LLAMA_API void llama_kv_self_seq_add(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
llama_seq_id seq_id,
|
llama_seq_id seq_id,
|
||||||
llama_pos p0,
|
llama_pos p0,
|
||||||
@ -691,10 +715,10 @@ extern "C" {
|
|||||||
// Integer division of the positions by factor of `d > 1`
|
// Integer division of the positions by factor of `d > 1`
|
||||||
// If the KV cache is RoPEd, the KV data is updated accordingly:
|
// If the KV cache is RoPEd, the KV data is updated accordingly:
|
||||||
// - lazily on next llama_decode()
|
// - lazily on next llama_decode()
|
||||||
// - explicitly with llama_kv_cache_update()
|
// - explicitly with llama_kv_self_update()
|
||||||
// p0 < 0 : [0, p1]
|
// p0 < 0 : [0, p1]
|
||||||
// p1 < 0 : [p0, inf)
|
// p1 < 0 : [p0, inf)
|
||||||
LLAMA_API void llama_kv_cache_seq_div(
|
LLAMA_API void llama_kv_self_seq_div(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
llama_seq_id seq_id,
|
llama_seq_id seq_id,
|
||||||
llama_pos p0,
|
llama_pos p0,
|
||||||
@ -702,24 +726,76 @@ extern "C" {
|
|||||||
int d);
|
int d);
|
||||||
|
|
||||||
// Returns the largest position present in the KV cache for the specified sequence
|
// Returns the largest position present in the KV cache for the specified sequence
|
||||||
LLAMA_API llama_pos llama_kv_cache_seq_pos_max(
|
LLAMA_API llama_pos llama_kv_self_seq_pos_max(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
llama_seq_id seq_id);
|
llama_seq_id seq_id);
|
||||||
|
|
||||||
// TODO: the llama_kv_cache_defrag and llama_kv_cache_update API tightly couples llama_context with llama_kv_cache
|
|
||||||
// how to avoid this?
|
|
||||||
|
|
||||||
// Defragment the KV cache
|
// Defragment the KV cache
|
||||||
// This will be applied:
|
// This will be applied:
|
||||||
// - lazily on next llama_decode()
|
// - lazily on next llama_decode()
|
||||||
// - explicitly with llama_kv_cache_update()
|
// - explicitly with llama_kv_self_update()
|
||||||
LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx);
|
LLAMA_API void llama_kv_self_defrag(struct llama_context * ctx);
|
||||||
|
|
||||||
// Apply the KV cache updates (such as K-shifts, defragmentation, etc.)
|
|
||||||
LLAMA_API void llama_kv_cache_update(struct llama_context * ctx);
|
|
||||||
|
|
||||||
// Check if the context supports KV cache shifting
|
// Check if the context supports KV cache shifting
|
||||||
LLAMA_API bool llama_kv_cache_can_shift(struct llama_context * ctx);
|
LLAMA_API bool llama_kv_self_can_shift(const struct llama_context * ctx);
|
||||||
|
|
||||||
|
// Apply the KV cache updates (such as K-shifts, defragmentation, etc.)
|
||||||
|
LLAMA_API void llama_kv_self_update(struct llama_context * ctx);
|
||||||
|
|
||||||
|
DEPRECATED(LLAMA_API void llama_kv_cache_clear(
|
||||||
|
struct llama_context * ctx),
|
||||||
|
"use llama_kv_self_clear instead");
|
||||||
|
|
||||||
|
DEPRECATED(LLAMA_API bool llama_kv_cache_seq_rm(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
llama_seq_id seq_id,
|
||||||
|
llama_pos p0,
|
||||||
|
llama_pos p1),
|
||||||
|
"use llama_kv_self_seq_rm instead");
|
||||||
|
|
||||||
|
DEPRECATED(LLAMA_API void llama_kv_cache_seq_cp(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
llama_seq_id seq_id_src,
|
||||||
|
llama_seq_id seq_id_dst,
|
||||||
|
llama_pos p0,
|
||||||
|
llama_pos p1),
|
||||||
|
"use llama_kv_self_seq_cp instead");
|
||||||
|
|
||||||
|
DEPRECATED(LLAMA_API void llama_kv_cache_seq_keep(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
llama_seq_id seq_id),
|
||||||
|
"use llama_kv_self_seq_keep instead");
|
||||||
|
|
||||||
|
DEPRECATED(LLAMA_API void llama_kv_cache_seq_add(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
llama_seq_id seq_id,
|
||||||
|
llama_pos p0,
|
||||||
|
llama_pos p1,
|
||||||
|
llama_pos delta),
|
||||||
|
"use llama_kv_self_seq_add instead");
|
||||||
|
|
||||||
|
DEPRECATED(LLAMA_API void llama_kv_cache_seq_div(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
llama_seq_id seq_id,
|
||||||
|
llama_pos p0,
|
||||||
|
llama_pos p1,
|
||||||
|
int d),
|
||||||
|
"use llama_kv_self_seq_div instead");
|
||||||
|
|
||||||
|
DEPRECATED(LLAMA_API llama_pos llama_kv_cache_seq_pos_max(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
llama_seq_id seq_id),
|
||||||
|
"use llama_kv_self_seq_pos_max instead");
|
||||||
|
|
||||||
|
DEPRECATED(LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx),
|
||||||
|
"use llama_kv_self_defrag instead");
|
||||||
|
|
||||||
|
DEPRECATED(LLAMA_API bool llama_kv_cache_can_shift(const struct llama_context * ctx),
|
||||||
|
"use llama_kv_self_can_shift instead");
|
||||||
|
|
||||||
|
DEPRECATED(LLAMA_API void llama_kv_cache_update(struct llama_context * ctx),
|
||||||
|
"use llama_kv_self_update instead");
|
||||||
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// State / sessions
|
// State / sessions
|
||||||
@ -883,6 +959,10 @@ extern "C" {
|
|||||||
// If set to true, the model will only attend to the past tokens
|
// If set to true, the model will only attend to the past tokens
|
||||||
LLAMA_API void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn);
|
LLAMA_API void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn);
|
||||||
|
|
||||||
|
// Set whether the model is in warmup mode or not
|
||||||
|
// If true, all model tensors are activated during llama_decode() to load and cache their weights.
|
||||||
|
LLAMA_API void llama_set_warmup(struct llama_context * ctx, bool warmup);
|
||||||
|
|
||||||
// Set abort callback
|
// Set abort callback
|
||||||
LLAMA_API void llama_set_abort_callback(struct llama_context * ctx, ggml_abort_callback abort_callback, void * abort_callback_data);
|
LLAMA_API void llama_set_abort_callback(struct llama_context * ctx, ggml_abort_callback abort_callback, void * abort_callback_data);
|
||||||
|
|
||||||
@ -1040,7 +1120,7 @@ extern "C" {
|
|||||||
|
|
||||||
/// Apply chat template. Inspired by hf apply_chat_template() on python.
|
/// Apply chat template. Inspired by hf apply_chat_template() on python.
|
||||||
/// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model"
|
/// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model"
|
||||||
/// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template
|
/// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggml-org/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template
|
||||||
/// @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead.
|
/// @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead.
|
||||||
/// @param chat Pointer to a list of multiple llama_chat_message
|
/// @param chat Pointer to a list of multiple llama_chat_message
|
||||||
/// @param n_msg Number of llama_chat_message in this chat
|
/// @param n_msg Number of llama_chat_message in this chat
|
||||||
@ -1114,11 +1194,12 @@ extern "C" {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct llama_sampler {
|
struct llama_sampler {
|
||||||
struct llama_sampler_i * iface;
|
const struct llama_sampler_i * iface;
|
||||||
llama_sampler_context_t ctx;
|
llama_sampler_context_t ctx;
|
||||||
};
|
};
|
||||||
|
|
||||||
// mirror of llama_sampler_i:
|
// mirror of llama_sampler_i:
|
||||||
|
LLAMA_API struct llama_sampler * llama_sampler_init (const struct llama_sampler_i * iface, llama_sampler_context_t ctx);
|
||||||
LLAMA_API const char * llama_sampler_name (const struct llama_sampler * smpl);
|
LLAMA_API const char * llama_sampler_name (const struct llama_sampler * smpl);
|
||||||
LLAMA_API void llama_sampler_accept( struct llama_sampler * smpl, llama_token token);
|
LLAMA_API void llama_sampler_accept( struct llama_sampler * smpl, llama_token token);
|
||||||
LLAMA_API void llama_sampler_apply ( struct llama_sampler * smpl, llama_token_data_array * cur_p);
|
LLAMA_API void llama_sampler_apply ( struct llama_sampler * smpl, llama_token_data_array * cur_p);
|
||||||
@ -1148,7 +1229,7 @@ extern "C" {
|
|||||||
/// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
|
/// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
|
||||||
/// NOTE: Avoid using on the full vocabulary as the sorting can become slow. For example, apply top-k or top-p sampling first.
|
/// NOTE: Avoid using on the full vocabulary as the sorting can become slow. For example, apply top-k or top-p sampling first.
|
||||||
DEPRECATED(LLAMA_API struct llama_sampler * llama_sampler_init_softmax (void),
|
DEPRECATED(LLAMA_API struct llama_sampler * llama_sampler_init_softmax (void),
|
||||||
"will be removed in the future (see https://github.com/ggerganov/llama.cpp/pull/9896#discussion_r1800920915)");
|
"will be removed in the future (see https://github.com/ggml-org/llama.cpp/pull/9896#discussion_r1800920915)");
|
||||||
|
|
||||||
/// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
/// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
||||||
LLAMA_API struct llama_sampler * llama_sampler_init_top_k (int32_t k);
|
LLAMA_API struct llama_sampler * llama_sampler_init_top_k (int32_t k);
|
||||||
@ -1156,7 +1237,7 @@ extern "C" {
|
|||||||
/// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
/// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
||||||
LLAMA_API struct llama_sampler * llama_sampler_init_top_p (float p, size_t min_keep);
|
LLAMA_API struct llama_sampler * llama_sampler_init_top_p (float p, size_t min_keep);
|
||||||
|
|
||||||
/// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841
|
/// @details Minimum P sampling as described in https://github.com/ggml-org/llama.cpp/pull/3841
|
||||||
LLAMA_API struct llama_sampler * llama_sampler_init_min_p (float p, size_t min_keep);
|
LLAMA_API struct llama_sampler * llama_sampler_init_min_p (float p, size_t min_keep);
|
||||||
|
|
||||||
/// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
|
/// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
|
||||||
@ -1171,6 +1252,9 @@ extern "C" {
|
|||||||
/// @details XTC sampler as described in https://github.com/oobabooga/text-generation-webui/pull/6335
|
/// @details XTC sampler as described in https://github.com/oobabooga/text-generation-webui/pull/6335
|
||||||
LLAMA_API struct llama_sampler * llama_sampler_init_xtc (float p, float t, size_t min_keep, uint32_t seed);
|
LLAMA_API struct llama_sampler * llama_sampler_init_xtc (float p, float t, size_t min_keep, uint32_t seed);
|
||||||
|
|
||||||
|
/// @details Top n sigma sampling as described in academic paper "Top-nσ: Not All Logits Are You Need" https://arxiv.org/pdf/2411.07641
|
||||||
|
LLAMA_API struct llama_sampler * llama_sampler_init_top_n_sigma(float n);
|
||||||
|
|
||||||
/// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
|
/// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
|
||||||
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
|
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
|
||||||
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
|
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
|
||||||
@ -1194,23 +1278,39 @@ extern "C" {
|
|||||||
float tau,
|
float tau,
|
||||||
float eta);
|
float eta);
|
||||||
|
|
||||||
|
/// @details Intializes a GBNF grammar, see grammars/README.md for details.
|
||||||
|
/// @param vocab The vocabulary that this grammar will be used with.
|
||||||
|
/// @param grammar_str The production rules for the grammar, encoded as a string. Returns an empty grammar if empty. Returns NULL if parsing of grammar_str fails.
|
||||||
|
/// @param grammar_root The name of the start symbol for the grammar.
|
||||||
LLAMA_API struct llama_sampler * llama_sampler_init_grammar(
|
LLAMA_API struct llama_sampler * llama_sampler_init_grammar(
|
||||||
const struct llama_vocab * vocab,
|
const struct llama_vocab * vocab,
|
||||||
const char * grammar_str,
|
const char * grammar_str,
|
||||||
const char * grammar_root);
|
const char * grammar_root);
|
||||||
|
|
||||||
/// @details Lazy grammar sampler, introduced in https://github.com/ggerganov/llama.cpp/pull/9639
|
DEPRECATED(LLAMA_API struct llama_sampler * llama_sampler_init_grammar_lazy(
|
||||||
/// @param trigger_words A list of words that will trigger the grammar sampler. This may be updated to a loose regex syntax (w/ ^) in a near future.
|
|
||||||
/// @param trigger_tokens A list of tokens that will trigger the grammar sampler.
|
|
||||||
LLAMA_API struct llama_sampler * llama_sampler_init_grammar_lazy(
|
|
||||||
const struct llama_vocab * vocab,
|
const struct llama_vocab * vocab,
|
||||||
const char * grammar_str,
|
const char * grammar_str,
|
||||||
const char * grammar_root,
|
const char * grammar_root,
|
||||||
const char ** trigger_words,
|
const char ** trigger_words,
|
||||||
size_t num_trigger_words,
|
size_t num_trigger_words,
|
||||||
|
const llama_token * trigger_tokens,
|
||||||
|
size_t num_trigger_tokens),
|
||||||
|
"use llama_sampler_init_grammar_lazy_patterns instead");
|
||||||
|
|
||||||
|
|
||||||
|
/// @details Lazy grammar sampler, introduced in https://github.com/ggml-org/llama.cpp/pull/9639
|
||||||
|
/// @param trigger_patterns A list of patterns that will trigger the grammar sampler. Pattern will be matched from the start of the generation output, and grammar sampler will be fed content starting from its first match group.
|
||||||
|
/// @param trigger_tokens A list of tokens that will trigger the grammar sampler. Grammar sampler will be fed content starting from the trigger token included.
|
||||||
|
LLAMA_API struct llama_sampler * llama_sampler_init_grammar_lazy_patterns(
|
||||||
|
const struct llama_vocab * vocab,
|
||||||
|
const char * grammar_str,
|
||||||
|
const char * grammar_root,
|
||||||
|
const char ** trigger_patterns,
|
||||||
|
size_t num_trigger_patterns,
|
||||||
const llama_token * trigger_tokens,
|
const llama_token * trigger_tokens,
|
||||||
size_t num_trigger_tokens);
|
size_t num_trigger_tokens);
|
||||||
|
|
||||||
|
|
||||||
/// NOTE: Avoid using on the full vocabulary as searching for repeated tokens can become slow. For example, apply top-k or top-p sampling first.
|
/// NOTE: Avoid using on the full vocabulary as searching for repeated tokens can become slow. For example, apply top-k or top-p sampling first.
|
||||||
LLAMA_API struct llama_sampler * llama_sampler_init_penalties(
|
LLAMA_API struct llama_sampler * llama_sampler_init_penalties(
|
||||||
int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size)
|
int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size)
|
||||||
|
@ -618,8 +618,15 @@ std::vector<uint32_t> unicode_cpts_from_utf8(const std::string & utf8) {
|
|||||||
result.reserve(utf8.size());
|
result.reserve(utf8.size());
|
||||||
size_t offset = 0;
|
size_t offset = 0;
|
||||||
while (offset < utf8.size()) {
|
while (offset < utf8.size()) {
|
||||||
|
try {
|
||||||
result.push_back(unicode_cpt_from_utf8(utf8, offset));
|
result.push_back(unicode_cpt_from_utf8(utf8, offset));
|
||||||
}
|
}
|
||||||
|
catch (const std::invalid_argument & /*ex*/) {
|
||||||
|
// Silently ignore invalid UTF-8 input to avoid leaking the exception beyond llama_tokenize
|
||||||
|
++offset;
|
||||||
|
result.emplace_back(0xFFFD); // replacement character
|
||||||
|
}
|
||||||
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -701,7 +708,7 @@ std::vector<std::string> unicode_regex_split(const std::string & text, const std
|
|||||||
const auto cpts = unicode_cpts_from_utf8(text);
|
const auto cpts = unicode_cpts_from_utf8(text);
|
||||||
|
|
||||||
// generate a "collapsed" representation of the text, where all codepoints are replaced by a single byte
|
// generate a "collapsed" representation of the text, where all codepoints are replaced by a single byte
|
||||||
// ref: https://github.com/ggerganov/llama.cpp/pull/6920#issuecomment-2081479935
|
// ref: https://github.com/ggml-org/llama.cpp/pull/6920#issuecomment-2081479935
|
||||||
std::string text_collapsed;
|
std::string text_collapsed;
|
||||||
if (need_collapse) {
|
if (need_collapse) {
|
||||||
// collapse all unicode categories
|
// collapse all unicode categories
|
||||||
|
Reference in New Issue
Block a user