talk-llama : sync llama.cpp
Some checks failed
Bindings Tests (Ruby) / ubuntu-22 (push) Has been cancelled
CI / determine-tag (push) Has been cancelled
CI / ubuntu-22 (linux/amd64) (push) Has been cancelled
CI / ubuntu-22 (linux/ppc64le) (push) Has been cancelled
CI / ubuntu-22-arm64 (linux/arm64) (push) Has been cancelled
CI / ubuntu-22-arm-v7 (linux/arm/v7) (push) Has been cancelled
CI / macOS-latest (generic/platform=iOS) (push) Has been cancelled
CI / macOS-latest (generic/platform=macOS) (push) Has been cancelled
CI / macOS-latest (generic/platform=tvOS) (push) Has been cancelled
CI / ubuntu-22-gcc (linux/amd64, Debug) (push) Has been cancelled
CI / ubuntu-22-gcc (linux/amd64, Release) (push) Has been cancelled
CI / ubuntu-22-gcc (linux/ppc64le, Debug) (push) Has been cancelled
CI / ubuntu-22-gcc (linux/ppc64le, Release) (push) Has been cancelled
CI / ubuntu-22-gcc-arm64 (linux/arm64, Debug) (push) Has been cancelled
CI / ubuntu-22-gcc-arm64 (linux/arm64, Release) (push) Has been cancelled
CI / ubuntu-22-gcc-arm-v7 (linux/arm/v7, Debug) (push) Has been cancelled
CI / ubuntu-22-gcc-arm-v7 (linux/arm/v7, Release) (push) Has been cancelled
CI / ubuntu-22-clang (linux/amd64, Debug) (push) Has been cancelled
CI / ubuntu-22-clang (linux/amd64, Release) (push) Has been cancelled
CI / ubuntu-22-clang (linux/arm64, Debug) (push) Has been cancelled
CI / ubuntu-22-clang (linux/arm64, Release) (push) Has been cancelled
CI / ubuntu-22-clang (linux/ppc64le, Debug) (push) Has been cancelled
CI / ubuntu-22-clang (linux/ppc64le, Release) (push) Has been cancelled
CI / ubuntu-22-gcc-sanitized (linux/amd64, ADDRESS) (push) Has been cancelled
CI / ubuntu-22-gcc-sanitized (linux/amd64, THREAD) (push) Has been cancelled
CI / ubuntu-22-gcc-sanitized (linux/amd64, UNDEFINED) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl (linux/amd64, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl (linux/arm/v7, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl (linux/arm64, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl (linux/ppc64le, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl-fp16 (linux/amd64, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl-fp16 (linux/arm/v7, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl-fp16 (linux/arm64, icx, icpx, ON) (push) Has been cancelled
CI / ubuntu-22-cmake-sycl-fp16 (linux/ppc64le, icx, icpx, ON) (push) Has been cancelled
CI / windows-msys2 (Release, clang-x86_64, CLANG64) (push) Has been cancelled
CI / windows-msys2 (Release, ucrt-x86_64, UCRT64) (push) Has been cancelled
CI / windows (Win32, Release, win32-x86, x86, 2.28.5, ON) (push) Has been cancelled
CI / windows (x64, Release, win32-x86-64, x64, 2.28.5, ON) (push) Has been cancelled
CI / windows-blas (Win32, ON, x86, 0.3.29, Release, x86, 2.28.5, ON) (push) Has been cancelled
CI / windows-blas (x64, ON, x64_64, 0.3.29, Release, x64, 2.28.5, ON) (push) Has been cancelled
CI / windows-cublas (x64, Release, ON, 11.8.0, ON, 2.28.5) (push) Has been cancelled
CI / windows-cublas (x64, Release, ON, 12.4.0, ON, 2.28.5) (push) Has been cancelled
CI / emscripten (Release) (push) Has been cancelled
CI / android (push) Has been cancelled
CI / android_java (push) Has been cancelled
CI / quantize (push) Has been cancelled
CI / vad (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/main-intel.Dockerfile platform:linux/amd64 tag:main-intel]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/main-musa.Dockerfile platform:linux/amd64 tag:main-musa]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/main.Dockerfile platform:linux/amd64 tag:main]) (push) Has been cancelled
Examples WASM / deploy-wasm-github-pages (push) Has been cancelled
CI / ios-xcode-build (Release) (push) Has been cancelled
CI / bindings-java (push) Has been cancelled
CI / release (push) Has been cancelled
CI / coreml-base-en (push) Has been cancelled

ggml-ci
This commit is contained in:
Georgi Gerganov
2025-06-20 21:18:44 +03:00
parent d65a579a0a
commit e6c10cf3d5
28 changed files with 2521 additions and 1738 deletions

View File

@ -18,7 +18,8 @@ if (WHISPER_SDL2)
llama-io.cpp llama-io.cpp
llama-kv-cache-unified.cpp llama-kv-cache-unified.cpp
llama-kv-cache-unified-iswa.cpp llama-kv-cache-unified-iswa.cpp
llama-kv-cache-recurrent.cpp llama-memory-recurrent.cpp
llama-memory-hybrid.cpp
llama-memory.cpp llama-memory.cpp
llama-mmap.cpp llama-mmap.cpp
llama-model-loader.cpp llama-model-loader.cpp

View File

@ -147,6 +147,7 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
{ LLM_KV_ATTENTION_SCALE, "%s.attention.scale" }, { LLM_KV_ATTENTION_SCALE, "%s.attention.scale" },
{ LLM_KV_ATTENTION_KEY_LENGTH_MLA, "%s.attention.key_length_mla" }, { LLM_KV_ATTENTION_KEY_LENGTH_MLA, "%s.attention.key_length_mla" },
{ LLM_KV_ATTENTION_VALUE_LENGTH_MLA, "%s.attention.value_length_mla" }, { LLM_KV_ATTENTION_VALUE_LENGTH_MLA, "%s.attention.value_length_mla" },
{ LLM_KV_ATTENTION_LAYER_INDICES, "%s.attention.layer_indices" },
{ LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" }, { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
{ LLM_KV_ROPE_DIMENSION_SECTIONS, "%s.rope.dimension_sections" }, { LLM_KV_ROPE_DIMENSION_SECTIONS, "%s.rope.dimension_sections" },
@ -197,6 +198,7 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
{ LLM_KV_TOKENIZER_MASK_ID, "tokenizer.ggml.mask_token_id" }, { LLM_KV_TOKENIZER_MASK_ID, "tokenizer.ggml.mask_token_id" },
{ LLM_KV_TOKENIZER_ADD_BOS, "tokenizer.ggml.add_bos_token" }, { LLM_KV_TOKENIZER_ADD_BOS, "tokenizer.ggml.add_bos_token" },
{ LLM_KV_TOKENIZER_ADD_EOS, "tokenizer.ggml.add_eos_token" }, { LLM_KV_TOKENIZER_ADD_EOS, "tokenizer.ggml.add_eos_token" },
{ LLM_KV_TOKENIZER_ADD_SEP, "tokenizer.ggml.add_sep_token" },
{ LLM_KV_TOKENIZER_ADD_PREFIX, "tokenizer.ggml.add_space_prefix" }, { LLM_KV_TOKENIZER_ADD_PREFIX, "tokenizer.ggml.add_space_prefix" },
{ LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, "tokenizer.ggml.remove_extra_whitespaces" }, { LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, "tokenizer.ggml.remove_extra_whitespaces" },
{ LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, "tokenizer.ggml.precompiled_charsmap" }, { LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, "tokenizer.ggml.precompiled_charsmap" },
@ -1816,3 +1818,25 @@ llm_arch llm_arch_from_string(const std::string & name) {
const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor) { const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor) {
return LLM_TENSOR_INFOS.at(tensor); return LLM_TENSOR_INFOS.at(tensor);
} }
bool llm_arch_is_recurrent(const llm_arch & arch) {
switch (arch) {
case LLM_ARCH_MAMBA:
case LLM_ARCH_RWKV6:
case LLM_ARCH_RWKV6QWEN2:
case LLM_ARCH_RWKV7:
case LLM_ARCH_ARWKV7:
return true;
default:
return false;
}
}
bool llm_arch_is_hybrid(const llm_arch & arch) {
// TODO: There are currently no hybrid models! Once there are, this will be
// the place to identify them
switch (arch) {
default:
return false;
}
}

View File

@ -151,6 +151,7 @@ enum llm_kv {
LLM_KV_ATTENTION_SCALE, LLM_KV_ATTENTION_SCALE,
LLM_KV_ATTENTION_KEY_LENGTH_MLA, LLM_KV_ATTENTION_KEY_LENGTH_MLA,
LLM_KV_ATTENTION_VALUE_LENGTH_MLA, LLM_KV_ATTENTION_VALUE_LENGTH_MLA,
LLM_KV_ATTENTION_LAYER_INDICES,
LLM_KV_ROPE_DIMENSION_COUNT, LLM_KV_ROPE_DIMENSION_COUNT,
LLM_KV_ROPE_DIMENSION_SECTIONS, LLM_KV_ROPE_DIMENSION_SECTIONS,
@ -193,6 +194,7 @@ enum llm_kv {
LLM_KV_TOKENIZER_MASK_ID, LLM_KV_TOKENIZER_MASK_ID,
LLM_KV_TOKENIZER_ADD_BOS, LLM_KV_TOKENIZER_ADD_BOS,
LLM_KV_TOKENIZER_ADD_EOS, LLM_KV_TOKENIZER_ADD_EOS,
LLM_KV_TOKENIZER_ADD_SEP,
LLM_KV_TOKENIZER_ADD_PREFIX, LLM_KV_TOKENIZER_ADD_PREFIX,
LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, LLM_KV_TOKENIZER_REMOVE_EXTRA_WS,
LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP,
@ -439,3 +441,6 @@ const char * llm_arch_name(llm_arch arch);
llm_arch llm_arch_from_string(const std::string & name); llm_arch llm_arch_from_string(const std::string & name);
const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor); const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor);
bool llm_arch_is_recurrent(const llm_arch & arch);
bool llm_arch_is_hybrid (const llm_arch & arch);

View File

@ -1,7 +1,6 @@
#include "llama-batch.h" #include "llama-batch.h"
#include "llama-impl.h" #include "llama-impl.h"
#include "llama-cparams.h"
#include "llama-vocab.h" #include "llama-vocab.h"
#include "llama-memory.h" #include "llama-memory.h"
@ -10,282 +9,7 @@
#include <algorithm> #include <algorithm>
#include <sstream> #include <sstream>
llama_ubatch llama_sbatch::reserve_ubatch(size_t n_ubatch, bool has_embd) { llama_batch_allocr::llama_batch_allocr(uint32_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) {
// clear empty sequences
// the previous ubatch is assumed to be gone,
// so nothing should refer to values in these sequences anymore.
for (size_t i = seq.size(); i-- > 0;) {
if (seq[i].length == 0) {
seq.pop_back();
} else {
break;
}
}
udatas.push_back({});
auto & udata = udatas.back();
udata.token.resize(!has_embd ? n_ubatch : 0);
udata.embd.resize(has_embd ? n_embd * n_ubatch : 0);
udata.pos.resize(n_ubatch);
udata.n_seq_id.resize(n_ubatch);
udata.seq_id.resize(n_ubatch);
udata.output.resize(n_ubatch);
llama_ubatch ubatch = {
/*equal_seqs =*/ true,
/*n_tokens =*/ 0,
/*n_seq_tokens =*/ 0,
/*n_seqs =*/ 0,
/*token =*/ !has_embd ? udata.token.data() : nullptr,
/*embd =*/ has_embd ? udata.embd.data() : nullptr,
/*pos =*/ udata.pos.data(),
/*n_seq_id =*/ udata.n_seq_id.data(),
/*seq_id =*/ udata.seq_id.data(),
/*output =*/ udata.output.data(),
};
return ubatch;
}
void llama_sbatch::add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length) {
GGML_ASSERT(batch != nullptr);
GGML_ASSERT(length <= seq.length);
// Can only add sequences of equal lengths to a batch,
// otherwise it isn't clear to which sequence a token belongs
GGML_ASSERT(seq.n_seq_id == 0 || ubatch.n_seqs == 0 || length == (size_t) ubatch.n_tokens / ubatch.n_seqs);
GGML_ASSERT((seq.n_seq_id != 0) == ubatch.equal_seqs);
// NOTE: loops are separated for cache-friendliness
if (batch->token) {
if (ubatch.equal_seqs) {
for (size_t i = 0; i < length; ++i) {
ubatch.token[ubatch.n_tokens + i] = batch->token[ids[seq.offset + i]];
}
} else {
// simple split
ubatch.token = batch->token + seq.offset;
}
} else {
ubatch.token = nullptr;
}
if (batch->embd) {
if (ubatch.equal_seqs) {
for (size_t i = 0; i < length; ++i) {
memcpy(
ubatch.embd + (n_embd * (ubatch.n_tokens + i)),
batch->embd + (n_embd * ids[seq.offset + i]),
n_embd * sizeof(float)
);
}
} else {
// simple split
ubatch.embd = batch->embd + (n_embd * seq.offset);
}
} else {
ubatch.embd = nullptr;
}
if (ubatch.equal_seqs) {
for (size_t i = 0; i < length; ++i) {
ubatch.pos[ubatch.n_tokens + i] = batch->pos[ids[seq.offset + i]];
}
} else {
// simple split
ubatch.pos = batch->pos + seq.offset;
}
if (ubatch.equal_seqs) {
ubatch.n_seq_id[ubatch.n_seqs] = seq.n_seq_id;
if (seq.seq_id) {
ubatch.seq_id[ubatch.n_seqs] = seq.seq_id;
}
} else {
// simple split
if (batch->n_seq_id) {
ubatch.n_seq_id = batch->n_seq_id + seq.offset;
} else {
for (size_t i = 0; i < length; ++i) {
ubatch.n_seq_id[ubatch.n_seqs + i] = 1;
}
}
if (batch->seq_id) {
ubatch.seq_id = batch->seq_id + seq.offset;
}
}
if (batch->logits) {
if (ubatch.equal_seqs) {
for (size_t i = 0; i < length; ++i) {
size_t id = ids[seq.offset + i];
int8_t is_output = batch->logits[id];
ubatch.output[ubatch.n_tokens + i] = is_output;
if (is_output) { out_ids.push_back(id); }
}
} else {
// simple split
ubatch.output = batch->logits + seq.offset;
for (size_t i = 0; i < length; ++i) {
if (ubatch.output[i] != 0) { out_ids.push_back(seq.offset + i); }
}
}
} else {
// only get last output
for (size_t i = 0; i < length; ++i) {
size_t id = ids[seq.offset + i];
int8_t is_last = id == ids.size() - 1;
ubatch.output[ubatch.n_tokens + i] = is_last;
if (is_last) { out_ids.push_back(id); }
}
}
if (ubatch.n_tokens == 0 && ubatch.n_seqs == 0) {
ubatch.n_seq_tokens = ubatch.equal_seqs ? length : 1;
}
ubatch.n_tokens += length;
ubatch.n_seqs += ubatch.equal_seqs ? 1 : length; // virtual sequences for simple splits
seq.offset += length;
seq.length -= length;
n_tokens -= length;
GGML_ASSERT(ubatch.n_tokens == ubatch.n_seq_tokens * ubatch.n_seqs);
}
llama_ubatch llama_sbatch::split_simple(size_t n_ubatch) {
n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
ubatch.equal_seqs = false;
if (!seq.empty()) {
llama_sbatch_seq & s = seq[0];
size_t length = s.length < n_ubatch ? s.length : n_ubatch;
GGML_ASSERT(seq.size() == 1 && s.n_seq_id == 0); // don't mix with other splits
add_seq_to_ubatch(ubatch, s, length);
}
return ubatch;
}
llama_ubatch llama_sbatch::split_equal(size_t n_ubatch) {
n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
if (!seq.empty()) {
size_t length = 0;
size_t n_tokens_in_ubatch = 0;
GGML_ASSERT(seq[0].n_seq_id > 0); // should not be mixed with simple splits
// smallest first, because it's easier to split this way;
// starting from the end to pop in constant time.
for (size_t i = seq.size(); i-- > 0;) {
llama_sbatch_seq & s = seq[i];
GGML_ASSERT(s.length > 0);
if (length == 0) {
length = s.length < n_ubatch ? s.length : n_ubatch;
}
add_seq_to_ubatch(ubatch, s, length);
n_tokens_in_ubatch += length;
// shared prompts can't be mixed with any of their sequences,
// so it's safer to compute them in their own ubatch
if (s.n_seq_id > 1) { break; }
// stop when there isn't enough space for another sequence
if (length + n_tokens_in_ubatch > n_ubatch) { break; }
}
}
return ubatch;
}
llama_ubatch llama_sbatch::split_seq(size_t n_ubatch) {
n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
if (!seq.empty()) {
llama_sbatch_seq & s = seq[seq.size() - 1];
size_t length = s.length < n_ubatch ? s.length : n_ubatch;
GGML_ASSERT(s.n_seq_id > 0); // should not be mixed with simple splits
add_seq_to_ubatch(ubatch, s, length);
}
return ubatch;
}
llama_sbatch::llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split) {
GGML_ASSERT(batch.n_tokens >= 0);
this->batch = &batch;
this->n_embd = n_embd;
n_tokens = batch.n_tokens;
ids.resize(n_tokens);
out_ids.clear();
// TODO: reserve out_ids and seq
for (size_t i = 0; i < n_tokens; ++i) {
ids[i] = i;
}
if (simple_split) {
seq.resize(1);
llama_sbatch_seq & s = seq[0];
s.n_seq_id = 0;
s.seq_id = nullptr;
s.offset = 0;
s.length = n_tokens;
return;
}
std::sort(ids.begin(), ids.end(),
[&batch](size_t a, size_t b) {
int32_t n_seq_a = batch.n_seq_id ? batch.n_seq_id[a] : 1;
int32_t n_seq_b = batch.n_seq_id ? batch.n_seq_id[b] : 1;
// sort by seq_id, then by pos
if (n_seq_a == n_seq_b) {
if (batch.seq_id) {
for (int32_t i = 0; i < n_seq_a; ++i) {
llama_seq_id seq_id_a = batch.seq_id[a][i];
llama_seq_id seq_id_b = batch.seq_id[b][i];
// smaller seq_ids go first
if (seq_id_a != seq_id_b) {
return seq_id_a < seq_id_b;
}
}
}
// when all else is equal, sort by pos
if (batch.pos) {
return batch.pos[a] < batch.pos[b];
}
// no pos, sort by id
return a < b;
}
// shared prompts go first
return n_seq_a > n_seq_b;
}
);
// init seq
llama_sbatch_seq * last_seq = nullptr;
for (size_t i = 0; i < n_tokens; ++i) {
const size_t bi = ids[i];
const int32_t n_seqs = batch.n_seq_id[bi];
llama_seq_id * seq_ids = batch.seq_id[bi];
if (last_seq != nullptr) {
bool same = n_seqs == last_seq->n_seq_id;
for (int32_t j = 0; same && j < n_seqs; ++j) {
if (seq_ids[j] != last_seq->seq_id[j]) {
same = false;
}
}
if (same) {
last_seq->length += 1;
continue;
}
}
llama_sbatch_seq new_seq = {n_seqs, seq_ids, i, 1};
seq.push_back(new_seq);
last_seq = &seq.back();
}
// keep shared prompts first at the end, then sort by length descending.
std::sort(seq.begin(), seq.end(),
[](llama_sbatch_seq & a, llama_sbatch_seq & b) {
if (a.n_seq_id == b.n_seq_id) {
return a.length > b.length;
}
return a.n_seq_id < b.n_seq_id;
}
);
}
llama_batch_allocr::llama_batch_allocr() {
const char * LLAMA_BATCH_DEBUG = getenv("LLAMA_BATCH_DEBUG"); const char * LLAMA_BATCH_DEBUG = getenv("LLAMA_BATCH_DEBUG");
debug = LLAMA_BATCH_DEBUG ? atoi(LLAMA_BATCH_DEBUG) : 0; debug = LLAMA_BATCH_DEBUG ? atoi(LLAMA_BATCH_DEBUG) : 0;
@ -294,17 +18,22 @@ llama_batch_allocr::llama_batch_allocr() {
for (auto & cur : seq_cpl) { for (auto & cur : seq_cpl) {
cur.resize(LLAMA_MAX_SEQ); cur.resize(LLAMA_MAX_SEQ);
} }
seq_idx.resize(LLAMA_MAX_SEQ, -1);
} }
bool llama_batch_allocr::init( bool llama_batch_allocr::init(
const llama_batch & batch_inp, const llama_batch & batch_inp,
const llama_vocab & vocab, const llama_vocab & vocab,
const llama_memory_i * memory, const llama_memory_i * memory,
bool embd_all) { uint32_t n_embd,
bool output_all) {
clear(); clear();
batch = batch_inp; batch = batch_inp;
this->vocab = &vocab;
GGML_ASSERT(batch.n_tokens > 0); GGML_ASSERT(batch.n_tokens > 0);
// //
@ -359,6 +88,7 @@ bool llama_batch_allocr::init(
llama_pos p0[LLAMA_MAX_SEQ]; llama_pos p0[LLAMA_MAX_SEQ];
for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) { for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
if (!memory) { if (!memory) {
// if no memory -> start from 0
p0[s] = 0; p0[s] = 0;
} else { } else {
p0[s] = memory->seq_pos_max(s) + 1; p0[s] = memory->seq_pos_max(s) + 1;
@ -370,8 +100,11 @@ bool llama_batch_allocr::init(
pos[i] = p0[seq_id]; pos[i] = p0[seq_id];
// update the starting position for all sequences that are assigned to the this token
for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) { for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
p0[batch.seq_id[i][s]] = pos[i] + 1; const llama_seq_id seq_id = batch.seq_id[i][s];
p0[seq_id] = pos[i] + 1;
} }
} }
@ -379,7 +112,7 @@ bool llama_batch_allocr::init(
} }
if (!batch.logits) { if (!batch.logits) {
if (embd_all) { if (output_all) {
// return the output for all tokens // return the output for all tokens
output.resize(batch.n_tokens, true); output.resize(batch.n_tokens, true);
} else { } else {
@ -389,7 +122,7 @@ bool llama_batch_allocr::init(
} }
batch.logits = output.data(); batch.logits = output.data();
} else if (embd_all) { } else if (output_all) {
bool warn = false; bool warn = false;
for (int32_t i = 0; i < batch.n_tokens; ++i) { for (int32_t i = 0; i < batch.n_tokens; ++i) {
@ -410,6 +143,9 @@ bool llama_batch_allocr::init(
// compute stats // compute stats
// //
this->n_embd = n_embd;
// count the outputs in this batch
for (int32_t i = 0; i < batch.n_tokens; ++i) { for (int32_t i = 0; i < batch.n_tokens; ++i) {
n_outputs += batch.logits[i] != 0; n_outputs += batch.logits[i] != 0;
} }
@ -417,66 +153,68 @@ bool llama_batch_allocr::init(
// determine coupled sequences // determine coupled sequences
// these are pairs of sequences that have at least one token in the input batch that is assigned to both of them // these are pairs of sequences that have at least one token in the input batch that is assigned to both of them
for (int32_t i = 0; i < batch.n_tokens; ++i) { for (int32_t i = 0; i < batch.n_tokens; ++i) {
for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
seq_pos[batch.seq_id[i][s]].insert(batch.pos[i]);
if (s > 0) {
const llama_seq_id s0 = batch.seq_id[i][0]; const llama_seq_id s0 = batch.seq_id[i][0];
for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
const llama_seq_id s1 = batch.seq_id[i][s]; const llama_seq_id s1 = batch.seq_id[i][s];
seq_pos[s1].insert(batch.pos[i]);
if (s > 0) {
// mark that sequence s1 is coupled to s0 // mark that sequence s1 is coupled to s0
seq_cpl[s1][s0] = true; seq_cpl[s1][s0] = true;
// note: the other way around is not necessary for now // note: tracking the other way around is not necessary for now
//seq_cpl[s0][s1] = true; //seq_cpl[s0][s1] = true;
} }
} }
} }
// precompute the sequence sets for each token and determine the unique sequence ids that participate in the batch
{
seq_set_t seq_set_unq;
for (int32_t i = 0; i < batch.n_tokens; ++i) {
seq_set_t cur;
for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
const llama_seq_id seq_id = batch.seq_id[i][s];
cur .set(seq_id);
seq_set_unq.set(seq_id);
}
seq_set.push_back(cur);
seq_set_map[cur].push_back(i);
}
for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
if (seq_set_unq.test(s)) {
seq_idx[s] = seq_id_unq.size();
seq_id_unq.push_back(s);
}
}
}
if (debug > 0) { if (debug > 0) {
LLAMA_LOG_DEBUG("%s: input batch info:\n", __func__); LLAMA_LOG_DEBUG("%s: input batch info:\n", __func__);
LLAMA_LOG_DEBUG("%s: n_tokens = %d\n", __func__, batch.n_tokens);
LLAMA_LOG_DEBUG("%s: token = %p\n", __func__, (void *) batch.token);
LLAMA_LOG_DEBUG("%s: embd = %p\n", __func__, (void *) batch.embd);
LLAMA_LOG_DEBUG("%s: pos = %p\n", __func__, (void *) batch.pos);
LLAMA_LOG_DEBUG("%s: n_seq_id = %p\n", __func__, (void *) batch.n_seq_id);
LLAMA_LOG_DEBUG("%s: seq_id = %p\n", __func__, (void *) batch.seq_id);
LLAMA_LOG_DEBUG("%s: logits = %p\n", __func__, (void *) batch.logits);
LLAMA_LOG_DEBUG("%s: n_outputs = %d\n", __func__, n_outputs);
if (debug > 1) { llama_ubatch ubatch {
int seq_id_max = 0; /*.equal_seqs =*/ false,
for (int32_t i = 0; i < batch.n_tokens; ++i) { /*.n_tokens =*/ (uint32_t) batch.n_tokens,
for (int s = 0; s < batch.n_seq_id[i]; ++s) { /*.n_seq_tokens =*/ (uint32_t) 1,
for (int s = 0; s < batch.n_seq_id[i]; ++s) { /*.n_seqs =*/ (uint32_t) batch.n_tokens,
seq_id_max = std::max(seq_id_max, batch.seq_id[i][s]); /*.n_seqs_unq =*/ (uint32_t) this->seq_id_unq.size(),
} /*.token =*/ batch.token,
} /*.embd =*/ batch.embd,
} /*.pos =*/ batch.pos,
++seq_id_max; /*.n_seq_id =*/ batch.n_seq_id,
/*.seq_id =*/ batch.seq_id,
/*.seq_id_unq =*/ this->seq_id_unq.data(),
/*.seq_idx =*/ this->seq_idx.data(),
/*.output =*/ batch.logits,
};
LLAMA_LOG_DEBUG("%s: token = [\n", __func__); ubatch_print(ubatch, debug);
for (int32_t i = 0; i < batch.n_tokens; ++i) {
std::vector<int8_t> seq_id(seq_id_max);
for (int s = 0; s < batch.n_seq_id[i]; ++s) {
seq_id[batch.seq_id[i][s]] = 1;
}
std::stringstream ss;
for (int s = 0; s < seq_id_max; ++s) {
if (seq_id[s]) {
ss << s%10;
} else {
ss << ".";
}
}
LLAMA_LOG_DEBUG("%s: %4d: id = %6d (%16s), pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n",
__func__, i, batch.token[i], vocab.token_to_piece(batch.token[i]).c_str(),
batch.pos[i], batch.n_seq_id[i], ss.str().c_str(), batch.logits[i]);
}
LLAMA_LOG_DEBUG("%s: ]\n", __func__);
LLAMA_LOG_DEBUG("%s: seq = [\n", __func__); LLAMA_LOG_DEBUG("%s: seq = [\n", __func__);
for (int s0 = 0; s0 < (int) seq_pos.size(); ++s0) { for (int s0 = 0; s0 < (int) seq_pos.size(); ++s0) {
@ -496,7 +234,6 @@ bool llama_batch_allocr::init(
} }
LLAMA_LOG_DEBUG("%s: ]\n", __func__); LLAMA_LOG_DEBUG("%s: ]\n", __func__);
} }
}
// //
// consistency checks // consistency checks
@ -507,10 +244,23 @@ bool llama_batch_allocr::init(
continue; continue;
} }
if (memory && seq_pos_min(s) != memory->seq_pos_max(s) + 1) { if (memory) {
if (batch.token) {
if (seq_pos_min(s) != memory->seq_pos_max(s) + 1) {
LLAMA_LOG_ERROR("%s: sequence %d does not start from the last position stored in the memory\n", __func__, s); LLAMA_LOG_ERROR("%s: sequence %d does not start from the last position stored in the memory\n", __func__, s);
return false; return false;
} }
} else {
assert(batch.embd);
// for embeddings (typically used as vision input), we allow them to have repeating positions
// ref: https://github.com/ggml-org/llama.cpp/issues/13694#issuecomment-2983871762
if (seq_pos_min(s) != memory->seq_pos_max(s) && seq_pos_min(s) != memory->seq_pos_max(s) + 1) {
LLAMA_LOG_ERROR("%s: sequence %d does not start from the last position stored in the memory\n", __func__, s);
return false;
}
}
}
if (seq_pos_max(s) - seq_pos_min(s) + 1 > (int) seq_pos[s].size()) { if (seq_pos_max(s) - seq_pos_min(s) + 1 > (int) seq_pos[s].size()) {
LLAMA_LOG_ERROR("%s: sequence %d positions are not continuous\n", __func__, s); LLAMA_LOG_ERROR("%s: sequence %d positions are not continuous\n", __func__, s);
@ -532,17 +282,120 @@ bool llama_batch_allocr::init(
} }
} }
// disallow partial sequence sub-sets:
//
// invalid: x
// i: 0 1 2 ...
// ---------------------------------------
// seq_id[i][0]: 0 0 1
// seq_id[i][1]: 1 1 2
// seq_id[i][2]: 2
//
// disallow decreasing sequence positions:
//
// invalid: x
// i: 0 1 2 3 4 5 6 ...
// ---------------------------------------
// pos[i]: 4 5 0 1 6 2 3
// seq_id[i][0]: 0 0 1 1 0 1 0
//
{
seq_set_t cur_seq_set[LLAMA_MAX_SEQ];
for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
cur_seq_set[s].set();
}
llama_pos cur_seq_pos[LLAMA_MAX_SEQ];
for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
cur_seq_pos[s] = -1;
}
for (int32_t i = 0; i < batch.n_tokens; ++i) {
const llama_pos pos = batch.pos[i];
for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) {
const llama_seq_id seq_id = batch.seq_id[i][s];
cur_seq_set[seq_id] &= seq_set[i];
if (cur_seq_set[seq_id].none()) {
LLAMA_LOG_ERROR("%s: sequence %d belongs to incompatible sequence sets (not allowed)\n", __func__, seq_id);
return false;
}
if (pos < cur_seq_pos[seq_id]) {
LLAMA_LOG_ERROR("%s: sequence %d positions are decreasing (not allowed)\n", __func__, seq_id);
return false;
}
}
}
}
split_reset();
return true; return true;
} }
llama_ubatch llama_batch_allocr::ubatch_reserve(uint32_t n_seq_tokens, uint32_t n_seqs) {
const uint32_t n_tokens = n_seq_tokens*n_seqs;
clear();
split_reset();
ubatches.emplace_back();
auto & ubatch = ubatches.back();
ubatch.token .resize(n_tokens);
ubatch.embd .clear();
ubatch.pos .resize(n_tokens);
ubatch.n_seq_id .resize(n_tokens);
ubatch.seq_id .resize(n_tokens);
ubatch.seq_id_unq.resize(0);
ubatch.seq_idx .resize(LLAMA_MAX_SEQ, -1);
ubatch.output .resize(n_tokens);
for (uint32_t s = 0; s < n_seqs; ++s) {
ubatch.seq_idx[s] = s;
ubatch.seq_id_unq.push_back(s);
}
llama_ubatch res {
/*.equal_seqs =*/ true,
/*.n_tokens =*/ n_tokens,
/*.n_seq_tokens =*/ n_seq_tokens,
/*.n_seqs =*/ n_seqs,
/*.n_seqs_unq =*/ n_seqs,
/*.token =*/ ubatch.token.data(),
/*.embd =*/ nullptr,
/*.pos =*/ ubatch.pos.data(),
/*.n_seq_id =*/ ubatch.n_seq_id.data(),
/*.seq_id =*/ ubatch.seq_id.data(),
/*.seq_id_unq =*/ ubatch.seq_id_unq.data(),
/*.seq_idx =*/ ubatch.seq_idx.data(),
/*.output =*/ ubatch.output.data(),
};
return res;
}
const llama_batch & llama_batch_allocr::get_batch() const { const llama_batch & llama_batch_allocr::get_batch() const {
return batch; return batch;
} }
uint32_t llama_batch_allocr::get_n_tokens() const {
return batch.n_tokens;
}
uint32_t llama_batch_allocr::get_n_outputs() const { uint32_t llama_batch_allocr::get_n_outputs() const {
return n_outputs; return n_outputs;
} }
std::vector<int32_t> & llama_batch_allocr::get_out_ids() {
return out_ids;
}
llama_pos llama_batch_allocr::seq_pos_min(llama_seq_id seq_id) const { llama_pos llama_batch_allocr::seq_pos_min(llama_seq_id seq_id) const {
return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].begin(); return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].begin();
} }
@ -551,13 +404,187 @@ llama_pos llama_batch_allocr::seq_pos_max(llama_seq_id seq_id) const {
return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].rbegin(); return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].rbegin();
} }
void llama_batch_allocr::split_reset() {
out_ids.clear();
used.clear();
used.resize(get_n_tokens(), false);
ubatches.clear();
}
llama_ubatch llama_batch_allocr::split_simple(uint32_t n_ubatch) {
// find the first unused token
uint32_t cur_idx = 0;
while (cur_idx < used.size() && used[cur_idx]) {
++cur_idx;
}
// we are done
if (cur_idx >= used.size()) {
return {};
}
std::vector<int32_t> idxs;
while (true) {
idxs.push_back(cur_idx);
used[cur_idx] = true;
++cur_idx;
if (cur_idx >= used.size()) {
break;
}
if (idxs.size() >= n_ubatch) {
break;
}
}
return ubatch_add(idxs, idxs.size(), false);
}
llama_ubatch llama_batch_allocr::split_equal(uint32_t n_ubatch) {
std::vector<seq_set_t> cur_seq_set;
// determine the non-overlapping sequence sets participating in this ubatch
for (int32_t i = 0; i < batch.n_tokens; ++i) {
if (used[i]) {
continue;
}
bool add = true;
for (uint32_t s = 0; s < cur_seq_set.size(); ++s) {
// no overlap with existing sequence sets:
if (!(cur_seq_set[s] & seq_set[i]).none()) {
add = false;
break;
}
}
if (add) {
cur_seq_set.push_back(seq_set[i]);
if (cur_seq_set.size() > n_ubatch) {
break;
}
}
}
const uint32_t n_seqs = cur_seq_set.size();
// we are done
if (n_seqs == 0) {
return {};
}
// the current batch index of each sequence set
std::vector<int32_t> cur_idx(n_seqs, 0);
for (uint32_t s = 0; s < n_seqs; ++s) {
while (used[seq_set_map[cur_seq_set[s]][cur_idx[s]]]) {
++cur_idx[s];
}
}
// the list of batch indices for each sequence set
// at the end we will concat these to get the final ubatch
std::vector<idx_vec_t> idxs_per_seq(n_seqs);
while (true) {
// we can only add new n_seq_tokens tokens if all the sequence sets have at least one more unused token and
// if we haven't reached n_ubatch
bool can_expand = true;
for (uint32_t s = 0; s < n_seqs; ++s) {
if (cur_idx[s] >= (int32_t) seq_set_map[cur_seq_set[s]].size()) {
can_expand = false;
break;
}
}
if (!can_expand) {
break;
}
for (uint32_t s = 0; s < n_seqs; ++s) {
const int32_t idx = seq_set_map[cur_seq_set[s]][cur_idx[s]];
idxs_per_seq[s].push_back(idx);
used[idx] = true;
++cur_idx[s];
}
if ((idxs_per_seq[0].size() + 1)*n_seqs > n_ubatch) {
break;
}
}
// concat the per-sequence-set lists
std::vector<int32_t> idxs;
for (uint32_t s = 0; s < n_seqs; ++s) {
idxs.insert(idxs.end(), idxs_per_seq[s].begin(), idxs_per_seq[s].end());
}
return ubatch_add(idxs, n_seqs, true);
}
llama_ubatch llama_batch_allocr::split_seq(uint32_t n_ubatch) {
// find the first unused token
uint32_t cur_idx = 0;
while (cur_idx < used.size() && used[cur_idx]) {
++cur_idx;
}
// we are done
if (cur_idx >= used.size()) {
return {};
}
// this is the starting sequence set
// we allow adding tokens only if their sequence set is a subset of the current sequence set
auto cur_seq_set = seq_set[cur_idx];
std::vector<int32_t> idxs;
while (true) {
idxs.push_back(cur_idx);
used[cur_idx] = true;
if (idxs.size() >= n_ubatch) {
break;
}
do {
++cur_idx;
} while (cur_idx < get_n_tokens() && (used[cur_idx] || ((cur_seq_set & seq_set[cur_idx]) != seq_set[cur_idx])));
if (cur_idx == get_n_tokens()) {
break;
}
cur_seq_set = seq_set[cur_idx];
}
return ubatch_add(idxs, 1, true);
}
void llama_batch_allocr::clear() { void llama_batch_allocr::clear() {
n_outputs = 0; n_outputs = 0;
batch = {}; batch = {};
pos .clear(); pos .clear();
n_seq_id .clear(); n_seq_id .clear();
seq_id .clear(); seq_id .clear();
seq_id_unq.clear();
output .clear(); output .clear();
for (auto & cur : seq_pos) { for (auto & cur : seq_pos) {
@ -567,6 +594,177 @@ void llama_batch_allocr::clear() {
for (auto & cur : seq_cpl) { for (auto & cur : seq_cpl) {
std::fill(cur.begin(), cur.end(), false); std::fill(cur.begin(), cur.end(), false);
} }
seq_set.clear();
seq_set_map.clear();
std::fill(seq_idx.begin(), seq_idx.end(), -1);
}
llama_ubatch llama_batch_allocr::ubatch_add(const std::vector<int32_t> & idxs, uint32_t n_seqs, bool equal_seqs) {
const uint32_t n_tokens = idxs.size();
assert(n_tokens%n_seqs == 0);
ubatches.emplace_back();
auto & ubatch = ubatches.back();
const int32_t n_pos_cur = batch.embd ? n_pos_per_embd : 1;
const int64_t n_embd_all = batch.embd ? (int64_t) n_tokens*n_embd : 0;
const int64_t n_pos_all = (int64_t) n_tokens*n_pos_cur;
ubatch.token .resize(n_tokens);
ubatch.embd .resize(n_embd_all);
ubatch.pos .resize(n_pos_all);
ubatch.n_seq_id .resize(n_tokens);
ubatch.seq_id .resize(n_tokens);
ubatch.seq_id_unq.resize(0);
ubatch.seq_idx .resize(LLAMA_MAX_SEQ, -1);
ubatch.output .resize(n_tokens);
seq_set_t seq_set_unq;
for (size_t i = 0; i < idxs.size(); ++i) {
if (batch.token) {
ubatch.token[i] = batch.token[idxs[i]];
}
if (batch.embd) {
memcpy(ubatch.embd.data() + i*n_embd, batch.embd + (int64_t) idxs[i]*n_embd, n_embd*sizeof(float));
}
for (int j = 0; j < n_pos_cur; ++j) {
ubatch.pos[j*n_tokens + i] = batch.pos[j*batch.n_tokens + idxs[i]];
}
ubatch.n_seq_id[i] = batch.n_seq_id[idxs[i]];
ubatch.seq_id[i] = batch.seq_id[idxs[i]];
ubatch.output[i] = batch.logits[idxs[i]];
for (int s = 0; s < ubatch.n_seq_id[i]; ++s) {
seq_set_unq.set(ubatch.seq_id[i][s]);
}
if (ubatch.output[i]) {
out_ids.push_back(idxs[i]);
}
}
for (int32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
if (seq_set_unq.test(s)) {
ubatch.seq_idx[s] = ubatch.seq_id_unq.size();
ubatch.seq_id_unq.push_back(s);
}
}
llama_ubatch res {
/*.equal_seqs =*/ equal_seqs,
/*.n_tokens =*/ n_tokens,
/*.n_seq_tokens =*/ n_tokens/n_seqs,
/*.n_seqs =*/ n_seqs,
/*.n_seqs_unq =*/ (uint32_t) ubatch.seq_id_unq.size(),
/*.token =*/ batch.token ? ubatch.token.data() : nullptr,
/*.embd =*/ batch.embd ? ubatch.embd.data() : nullptr,
/*.pos =*/ ubatch.pos.data(),
/*.n_seq_id =*/ ubatch.n_seq_id.data(),
/*.seq_id =*/ ubatch.seq_id.data(),
/*.seq_id_unq =*/ ubatch.seq_id_unq.data(),
/*.seq_idx =*/ ubatch.seq_idx.data(),
/*.output =*/ ubatch.output.data(),
};
if (debug > 0) {
LLAMA_LOG_DEBUG("%s: added ubatch %d to split:\n", __func__, (int) ubatches.size() - 1);
ubatch_print(res, debug);
}
return res;
}
void llama_batch_allocr::ubatch_print(const llama_ubatch & ubatch, int debug) {
if (debug > 0) {
LLAMA_LOG_DEBUG("%s: equal_seqs = %d\n", __func__, ubatch.equal_seqs);
LLAMA_LOG_DEBUG("%s: n_tokens = %d\n", __func__, ubatch.n_tokens);
LLAMA_LOG_DEBUG("%s: n_seq_tokens = %d\n", __func__, ubatch.n_seq_tokens);
LLAMA_LOG_DEBUG("%s: n_seqs = %d\n", __func__, ubatch.n_seqs);
LLAMA_LOG_DEBUG("%s: n_seqs_unq = %d\n", __func__, ubatch.n_seqs_unq);
std::stringstream ss_seq_id_unq;
std::stringstream ss_seq_idx;
ss_seq_id_unq << "[ ";
ss_seq_idx << "[";
for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
ss_seq_id_unq << ubatch.seq_id_unq[s] << " ";
}
for (uint32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
if (ubatch.seq_idx[s] >= 0) {
ss_seq_idx << ubatch.seq_idx[s]%10;
} else {
ss_seq_idx << ".";
}
}
ss_seq_id_unq << "]";
ss_seq_idx << "]";
LLAMA_LOG_DEBUG("%s: token = %p\n", __func__, (void *) ubatch.token);
LLAMA_LOG_DEBUG("%s: embd = %p\n", __func__, (void *) ubatch.embd);
LLAMA_LOG_DEBUG("%s: pos = %p\n", __func__, (void *) ubatch.pos);
LLAMA_LOG_DEBUG("%s: n_seq_id = %p\n", __func__, (void *) ubatch.n_seq_id);
LLAMA_LOG_DEBUG("%s: seq_id = %p\n", __func__, (void *) ubatch.seq_id);
LLAMA_LOG_DEBUG("%s: seq_id_unq = %s\n", __func__, ss_seq_id_unq.str().c_str());
LLAMA_LOG_DEBUG("%s: seq_idx = %s\n", __func__, ss_seq_idx.str().c_str());
LLAMA_LOG_DEBUG("%s: output = %p\n", __func__, (void *) ubatch.output);
LLAMA_LOG_DEBUG("%s: n_outputs = %d\n", __func__, n_outputs);
if (debug > 1) {
int seq_id_max = 0;
for (uint32_t i = 0; i < ubatch.n_tokens; ++i) {
for (int s = 0; s < ubatch.n_seq_id[i]; ++s) {
for (int s = 0; s < ubatch.n_seq_id[i]; ++s) {
seq_id_max = std::max(seq_id_max, ubatch.seq_id[i][s]);
}
}
}
++seq_id_max;
LLAMA_LOG_DEBUG("%s: token = [\n", __func__);
for (uint32_t i = 0; i < ubatch.n_tokens; ++i) {
std::vector<int8_t> seq_id(seq_id_max);
for (int s = 0; s < ubatch.n_seq_id[i]; ++s) {
seq_id[ubatch.seq_id[i][s]] = 1;
}
std::stringstream ss;
for (int s = 0; s < seq_id_max; ++s) {
if (seq_id[s]) {
ss << s%10;
} else {
ss << ".";
}
}
if (ubatch.token) {
LLAMA_LOG_DEBUG("%s: %4d: id = %6d (%16s), pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n",
__func__, i, ubatch.token[i], vocab->token_to_piece(ubatch.token[i]).c_str(),
ubatch.pos[i], ubatch.n_seq_id[i], ss.str().c_str(), ubatch.output[i]);
} else {
LLAMA_LOG_DEBUG("%s: %4d: [embd], pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n",
__func__, i, ubatch.pos[i], ubatch.n_seq_id[i], ss.str().c_str(), ubatch.output[i]);
}
}
LLAMA_LOG_DEBUG("%s: ]\n", __func__);
}
}
} }
// //

View File

@ -2,86 +2,44 @@
#include "llama.h" #include "llama.h"
#include "llama-cparams.h"
#include <array> #include <array>
#include <vector> #include <vector>
#include <set> #include <set>
#include <bitset>
#include <unordered_map>
// very similar to llama_batch, // keep this struct lightweight
// but has more metadata about sequences // it points to data in `llama_batch_allocr`
struct llama_ubatch { struct llama_ubatch {
bool equal_seqs; bool equal_seqs;
// TODO: whole_seqs for embeddings? // TODO: whole_seqs for embeddings?
uint32_t n_tokens; // total tokens (n_seq_tokens * n_seqs) uint32_t n_tokens; // total tokens (n_seq_tokens * n_seqs)
uint32_t n_seq_tokens; // tokens per sequence uint32_t n_seq_tokens; // tokens per sequence set
uint32_t n_seqs; uint32_t n_seqs; // sequence sets in the ubatch
uint32_t n_seqs_unq; // unique sequence ids in the ubatch
llama_token * token; // [n_tokens] // seq_id_unq: unique sequence ids in the ubatch
float * embd; // [n_embd, n_tokens] // seq_idx: indices of the unique sequence ids in the ubatch in [0, n_seqs_unq)
llama_pos * pos; // [n_tokens] // used for extracting sequence pooled embeddings
int32_t * n_seq_id; // [n_seqs]
llama_seq_id ** seq_id; // [n_seqs] // // size | idx | val
int8_t * output; // [n_tokens] llama_token * token; // [n_tokens] | i | id, token
float * embd; // [n_embd, n_tokens] | i | embd
llama_pos * pos; // [n_tokens] | i | pos
int32_t * n_seq_id; // [n_tokens] | i | -
llama_seq_id ** seq_id; // [n_tokens] | s | s0, s1, seq_id
llama_seq_id * seq_id_unq; // [n_seqs_unq] | s | seq_id
int32_t * seq_idx; // [LLAMA_MAX_SEQ] | - | seq_idx
int8_t * output; // [n_tokens] | i | -
}; };
struct llama_sbatch_seq { // a helper for sanitizing, fulfilling and splitting a batch
int32_t n_seq_id;
llama_seq_id * seq_id;
size_t offset;
size_t length;
};
// sequence-length-aware batch splitting
struct llama_sbatch {
// tokens left in this batch
size_t n_tokens;
size_t n_embd;
// sorted indices into the batch
std::vector<int64_t> ids;
// batch indices of the output
std::vector<int64_t> out_ids;
std::vector<llama_sbatch_seq> seq;
const llama_batch * batch = nullptr;
// buffers for the ubatches
// TODO: very hacky, this needs a complete rework
struct ubatch_data {
std::vector<llama_token> token;
std::vector<float> embd;
std::vector<llama_pos> pos;
std::vector<int32_t> n_seq_id;
std::vector<llama_seq_id *> seq_id;
std::vector<int8_t> output;
};
std::vector<ubatch_data> udatas;
llama_ubatch reserve_ubatch(size_t n_ubatch, bool has_embd = false);
void add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length);
// simple split, unknown number of sequences of unequal lengths
llama_ubatch split_simple(size_t n_ubatch);
// make batches of equal-length sequences
llama_ubatch split_equal(size_t n_ubatch);
// sequence-wise split
llama_ubatch split_seq(size_t n_ubatch);
llama_sbatch() = default;
llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split = false);
};
// a helper for sanitizing and fulfilling a batch
class llama_batch_allocr { class llama_batch_allocr {
public: public:
llama_batch_allocr(); llama_batch_allocr(uint32_t n_pos_per_embd);
// sanitize and auto-gen missing data in the input batch // sanitize and auto-gen missing data in the input batch
// memory is optional. if provided will be used to check for sequence continuity and to determine the positions // memory is optional. if provided will be used to check for sequence continuity and to determine the positions
@ -89,20 +47,57 @@ public:
const llama_batch & batch_inp, const llama_batch & batch_inp,
const llama_vocab & vocab, const llama_vocab & vocab,
const llama_memory_i * memory, const llama_memory_i * memory,
bool embd_all); uint32_t n_embd,
bool output_all);
const llama_batch & get_batch() const; const llama_batch & get_batch() const;
uint32_t get_n_tokens() const;
uint32_t get_n_outputs() const; uint32_t get_n_outputs() const;
// the array of output indices in the order they were encountered during the ubatch splitting
std::vector<int32_t> & get_out_ids();
// min/max positions of each sequence in the current ubatch
llama_pos seq_pos_min(llama_seq_id seq_id) const; llama_pos seq_pos_min(llama_seq_id seq_id) const;
llama_pos seq_pos_max(llama_seq_id seq_id) const; llama_pos seq_pos_max(llama_seq_id seq_id) const;
// call once before splitting the batch to reset the internal state
void split_reset();
// simple split, unknown number of sequence sets of unequal lengths
llama_ubatch split_simple(uint32_t n_ubatch);
// make ubatches of equal-length sequences sets
llama_ubatch split_equal(uint32_t n_ubatch);
// sequence-set-wise split - each ubatch contains a single sequence-set
llama_ubatch split_seq(uint32_t n_ubatch);
// a helper method for creating a well-defined ubatch of tokens
// TODO: support embeddings if needed in the future
llama_ubatch ubatch_reserve(uint32_t n_seq_tokens, uint32_t n_seqs);
private: private:
void clear(); void clear();
// create the next ubatch based on the provided batch indices (idxs) and the number of sequence sets (n_seqs)
// return llama_ubatch.n_tokens == 0 if the entire batch was consumed
llama_ubatch ubatch_add(const std::vector<int32_t> & idxs, uint32_t n_seqs, bool equal_seqs);
// for debugging, start with LLAMA_BATCH_DEBUG=2
void ubatch_print(const llama_ubatch & ubatch, int debug);
llama_batch batch; llama_batch batch;
// only for debugging purposes
const llama_vocab * vocab;
// TODO: this is more of a temporary solution until we have a better way to handle multiple positions per token/embd
// ref: https://github.com/ggml-org/llama.cpp/issues/13694#issuecomment-2983871762
const uint32_t n_pos_per_embd;
uint32_t n_embd;
uint32_t n_outputs; uint32_t n_outputs;
std::array<llama_seq_id, 1> seq_id_0 = { 0 }; // default sequence id std::array<llama_seq_id, 1> seq_id_0 = { 0 }; // default sequence id
@ -110,10 +105,43 @@ private:
std::vector<llama_pos> pos; std::vector<llama_pos> pos;
std::vector<int32_t> n_seq_id; std::vector<int32_t> n_seq_id;
std::vector<llama_seq_id *> seq_id; std::vector<llama_seq_id *> seq_id;
std::vector<llama_seq_id> seq_id_unq;
std::vector<int32_t> seq_idx;
std::vector<int8_t> output; std::vector<int8_t> output;
std::vector<std::set<llama_pos>> seq_pos; // seq_pos[s]: the set of positions in sequence s using pos_set_t = std::set<llama_pos>;
std::vector<std::vector<bool>> seq_cpl; // seq_cpl[s0][s1]: if sequence s0 is coupled to sequence s1 using seq_cpl_t = std::vector<bool>;
std::vector<pos_set_t> seq_pos; // seq_pos[s]: the set of positions in sequence s
std::vector<seq_cpl_t> seq_cpl; // seq_cpl[s0][s1]: if sequence s0 is coupled to sequence s1
using idx_vec_t = std::vector<int32_t>;
using seq_set_t = std::bitset<LLAMA_MAX_SEQ>;
std::vector<seq_set_t> seq_set; // seq_set[i]: the sequence set of token i
std::unordered_map<seq_set_t, idx_vec_t> seq_set_map; // the indices at which the sequence set appears
// batch indices of the output
std::vector<int32_t> out_ids;
// used[i] indicates if token i has already been used in a previous ubatch
std::vector<bool> used;
// llama_ubatch points to this data:
struct ubatch {
std::vector<llama_token> token;
std::vector<float> embd;
std::vector<llama_pos> pos;
std::vector<int32_t> n_seq_id;
std::vector<llama_seq_id *> seq_id;
std::vector<llama_seq_id> seq_id_unq;
std::vector<int32_t> seq_idx;
std::vector<int8_t> output;
};
// current splitting state:
std::vector<ubatch> ubatches;
int debug; int debug;
}; };

View File

@ -333,7 +333,7 @@ int32_t llm_chat_apply_template(
std::string role(message->role); std::string role(message->role);
if (role == "system") { if (role == "system") {
// there is no system message for gemma, but we will merge it with user prompt, so nothing is broken // there is no system message for gemma, but we will merge it with user prompt, so nothing is broken
system_prompt = trim(message->content); system_prompt += trim(message->content);
continue; continue;
} }
// in gemma, "assistant" is "model" // in gemma, "assistant" is "model"
@ -355,7 +355,7 @@ int32_t llm_chat_apply_template(
std::string role(message->role); std::string role(message->role);
if (role == "system") { if (role == "system") {
// there is no system message support, we will merge it with user prompt // there is no system message support, we will merge it with user prompt
system_prompt = message->content; system_prompt += message->content;
continue; continue;
} else if (role == "user") { } else if (role == "user") {
ss << "Human: "; ss << "Human: ";

View File

@ -20,7 +20,7 @@ llama_context::llama_context(
const llama_model & model, const llama_model & model,
llama_context_params params) : llama_context_params params) :
model(model), model(model),
batch_allocr(std::make_unique<llama_batch_allocr>()) { balloc(std::make_unique<llama_batch_allocr>(model.hparams.n_pos_per_embd())) {
LLAMA_LOG_INFO("%s: constructing llama_context\n", __func__); LLAMA_LOG_INFO("%s: constructing llama_context\n", __func__);
t_start_us = model.t_start_us; t_start_us = model.t_start_us;
@ -722,22 +722,26 @@ llm_graph_result_ptr llama_context::process_ubatch(const llama_ubatch & ubatch,
} }
int llama_context::encode(const llama_batch & batch_inp) { int llama_context::encode(const llama_batch & batch_inp) {
GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); // NOLINT
if (batch_inp.n_tokens == 0) { if (batch_inp.n_tokens == 0) {
LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__); LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
return -1; return -1;
} }
const auto & hparams = model.hparams;
const int64_t n_embd = hparams.n_embd;
// note: during encode, we always pass the full sequence starting from pos = 0 // note: during encode, we always pass the full sequence starting from pos = 0
if (!batch_allocr->init(batch_inp, model.vocab, nullptr, true)) { if (!balloc->init(batch_inp, model.vocab, nullptr, n_embd, true)) {
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
return -1; return -1;
} }
const llama_batch & batch = batch_allocr->get_batch(); const uint32_t n_tokens = balloc->get_n_tokens();
const uint32_t n_tokens = batch.n_tokens; const llama_ubatch ubatch = balloc->split_simple(n_tokens);
GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
// micro-batching is not possible for non-causal encoding, so we process the batch in a single shot // micro-batching is not possible for non-causal encoding, so we process the batch in a single shot
GGML_ASSERT(cparams.n_ubatch >= n_tokens && "encoder requires n_ubatch >= n_tokens"); GGML_ASSERT(cparams.n_ubatch >= n_tokens && "encoder requires n_ubatch >= n_tokens");
@ -751,14 +755,6 @@ int llama_context::encode(const llama_batch & batch_inp) {
n_queued_tokens += n_tokens; n_queued_tokens += n_tokens;
const auto & hparams = model.hparams;
const int64_t n_embd = hparams.n_embd;
llama_sbatch sbatch = llama_sbatch(batch, n_embd, /* simple_split */ true);
const llama_ubatch ubatch = sbatch.split_simple(n_tokens);
// reserve output buffer // reserve output buffer
if (output_reserve(n_tokens) < n_tokens) { if (output_reserve(n_tokens) < n_tokens) {
LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_tokens); LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_tokens);
@ -817,34 +813,28 @@ int llama_context::encode(const llama_batch & batch_inp) {
{ {
// extract sequence embeddings // extract sequence embeddings
auto & embd_seq_out = embd_seq; auto & embd_seq_out = embd_seq;
embd_seq_out.clear();
GGML_ASSERT(!ubatch.equal_seqs); // TODO: handle equal splits for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
const llama_seq_id seq_id = ubatch.seq_id_unq[s];
const int32_t seq_idx = ubatch.seq_idx[seq_id];
// TODO: fix indexing [UBATCH_IDX]
for (uint32_t i = 0; i < n_tokens; i++) {
const llama_seq_id seq_id = ubatch.seq_id[i][0];
if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
continue;
}
embd_seq_out[seq_id].resize(n_embd); embd_seq_out[seq_id].resize(n_embd);
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float)); ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float));
} }
} break; } break;
case LLAMA_POOLING_TYPE_RANK: case LLAMA_POOLING_TYPE_RANK:
{ {
// extract the rerank score - n_cls_out floats per sequence // extract the rerank score - n_cls_out floats per sequence
auto & embd_seq_out = embd_seq; auto & embd_seq_out = embd_seq;
const uint32_t n_cls_out = hparams.n_cls_out; const uint32_t n_cls_out = hparams.n_cls_out;
// TODO: fix indexing [UBATCH_IDX] for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
for (uint32_t s = 0; s < ubatch.n_seqs; ++s) { const llama_seq_id seq_id = ubatch.seq_id_unq[s];
const llama_seq_id seq_id = ubatch.seq_id[s][0]; const int32_t seq_idx = ubatch.seq_idx[seq_id];
if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
continue;
}
embd_seq_out[seq_id].resize(n_cls_out); embd_seq_out[seq_id].resize(n_cls_out);
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_id)*sizeof(float), n_cls_out*sizeof(float)); ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float));
} }
} break; } break;
case LLAMA_POOLING_TYPE_UNSPECIFIED: case LLAMA_POOLING_TYPE_UNSPECIFIED:
@ -869,12 +859,16 @@ int llama_context::encode(const llama_batch & batch_inp) {
cross.v_embd.resize(cross.n_embd*cross.n_enc); cross.v_embd.resize(cross.n_embd*cross.n_enc);
memcpy(cross.v_embd.data(), embd, ggml_nbytes(t_embd)); memcpy(cross.v_embd.data(), embd, ggml_nbytes(t_embd));
const auto & batch = balloc->get_batch();
// remember the sequence ids used during the encoding - needed for cross attention later // remember the sequence ids used during the encoding - needed for cross attention later
cross.seq_ids_enc.resize(n_tokens); cross.seq_ids_enc.resize(n_tokens);
for (uint32_t i = 0; i < n_tokens; i++) { for (uint32_t i = 0; i < n_tokens; i++) {
cross.seq_ids_enc[i].clear(); cross.seq_ids_enc[i].clear();
for (int s = 0; s < batch.n_seq_id[i]; s++) { for (int s = 0; s < batch.n_seq_id[i]; s++) {
llama_seq_id seq_id = batch.seq_id[i][s]; const llama_seq_id seq_id = batch.seq_id[i][s];
cross.seq_ids_enc[i].insert(seq_id); cross.seq_ids_enc[i].insert(seq_id);
} }
} }
@ -884,6 +878,8 @@ int llama_context::encode(const llama_batch & batch_inp) {
} }
int llama_context::decode(const llama_batch & batch_inp) { int llama_context::decode(const llama_batch & batch_inp) {
GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); // NOLINT
if (!memory) { if (!memory) {
LLAMA_LOG_DEBUG("%s: cannot decode batches with this context (calling encode() instead)\n", __func__); LLAMA_LOG_DEBUG("%s: cannot decode batches with this context (calling encode() instead)\n", __func__);
return encode(batch_inp); return encode(batch_inp);
@ -894,29 +890,24 @@ int llama_context::decode(const llama_batch & batch_inp) {
return -1; return -1;
} }
// when computing embeddings, all tokens are output
const bool embd_all = cparams.embeddings;
if (!batch_allocr->init(batch_inp, model.vocab, memory.get(), embd_all)) {
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
return -1;
}
const llama_batch & batch = batch_allocr->get_batch();
const auto & vocab = model.vocab; const auto & vocab = model.vocab;
const auto & hparams = model.hparams; const auto & hparams = model.hparams;
const int32_t n_vocab = vocab.n_tokens(); const int32_t n_vocab = vocab.n_tokens();
const int64_t n_embd = hparams.n_embd; const int64_t n_embd = hparams.n_embd;
const uint32_t n_tokens_all = batch.n_tokens; // when computing embeddings, all tokens are output
const bool output_all = cparams.embeddings;
GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT if (!balloc->init(batch_inp, vocab, memory.get(), n_embd, output_all)) {
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
return -1;
}
const uint32_t n_outputs_all = batch_allocr->get_n_outputs(); const uint32_t n_tokens_all = balloc->get_n_tokens();
const uint32_t n_outputs_all = balloc->get_n_outputs();
if (embd_all) { if (output_all) {
// require that all tokens are output // require that all tokens are output
if (n_outputs_all != n_tokens_all) { if (n_outputs_all != n_tokens_all) {
LLAMA_LOG_ERROR("%s: pooled embedding requires that all tokens are output (n_outputs_all = %d, n_tokens_all = %d)\n", LLAMA_LOG_ERROR("%s: pooled embedding requires that all tokens are output (n_outputs_all = %d, n_tokens_all = %d)\n",
@ -945,7 +936,7 @@ int llama_context::decode(const llama_batch & batch_inp) {
llama_memory_state_ptr mstate; llama_memory_state_ptr mstate;
while (true) { while (true) {
mstate = memory->init_batch(batch, cparams.n_ubatch, embd_all); mstate = memory->init_batch(*balloc, cparams.n_ubatch, output_all);
if (!mstate) { if (!mstate) {
return -2; return -2;
} }
@ -966,19 +957,19 @@ int llama_context::decode(const llama_batch & batch_inp) {
did_optimize = true; did_optimize = true;
if (kv_self_update(true)) { if (kv_self_update(true)) {
LLAMA_LOG_DEBUG("%s: retrying batch size %d after cache optimization\n", __func__, batch.n_tokens); LLAMA_LOG_DEBUG("%s: retrying batch size %d after cache optimization\n", __func__, balloc->get_n_tokens());
continue; continue;
} }
} }
LLAMA_LOG_WARN("%s: failed to find a memory slot for batch of size %d\n", __func__, batch.n_tokens); LLAMA_LOG_WARN("%s: failed to find a memory slot for batch of size %d\n", __func__, balloc->get_n_tokens());
return 1; return 1;
} }
case LLAMA_MEMORY_STATUS_FAILED_COMPUTE: case LLAMA_MEMORY_STATUS_FAILED_COMPUTE:
{ {
LLAMA_LOG_ERROR("%s: compute failed while preparing batch of size %d\n", __func__, batch.n_tokens); LLAMA_LOG_ERROR("%s: compute failed while preparing batch of size %d\n", __func__, balloc->get_n_tokens());
return -2; return -2;
} }
@ -1005,7 +996,6 @@ int llama_context::decode(const llama_batch & batch_inp) {
if (n_outputs_all == n_tokens_all) { if (n_outputs_all == n_tokens_all) {
n_outputs_new = ubatch.n_tokens; n_outputs_new = ubatch.n_tokens;
} else { } else {
GGML_ASSERT(ubatch.output);
for (uint32_t i = 0; i < ubatch.n_tokens; i++) { for (uint32_t i = 0; i < ubatch.n_tokens; i++) {
n_outputs_new += (int32_t) (ubatch.output[i] != 0); n_outputs_new += (int32_t) (ubatch.output[i] != 0);
} }
@ -1105,27 +1095,27 @@ int llama_context::decode(const llama_batch & batch_inp) {
// extract sequence embeddings (cleared before processing each batch) // extract sequence embeddings (cleared before processing each batch)
auto & embd_seq_out = embd_seq; auto & embd_seq_out = embd_seq;
for (uint32_t s = 0; s < ubatch.n_seqs; ++s) { for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
const llama_seq_id seq_id = ubatch.seq_id[s][0]; const llama_seq_id seq_id = ubatch.seq_id_unq[s];
if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { const int32_t seq_idx = ubatch.seq_idx[seq_id];
continue;
}
embd_seq_out[seq_id].resize(n_embd); embd_seq_out[seq_id].resize(n_embd);
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float)); ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float));
} }
} break; } break;
case LLAMA_POOLING_TYPE_RANK: case LLAMA_POOLING_TYPE_RANK:
{ {
// extract the rerank score - a single float per sequence // extract the rerank score - n_cls_out floats per sequence
auto & embd_seq_out = embd_seq; auto & embd_seq_out = embd_seq;
for (uint32_t s = 0; s < ubatch.n_seqs; ++s) { const uint32_t n_cls_out = hparams.n_cls_out;
const llama_seq_id seq_id = ubatch.seq_id[s][0];
if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
continue; const llama_seq_id seq_id = ubatch.seq_id_unq[s];
} const int32_t seq_idx = ubatch.seq_idx[seq_id];
embd_seq_out[seq_id].resize(1);
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (seq_id)*sizeof(float), sizeof(float)); embd_seq_out[seq_id].resize(n_cls_out);
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float));
} }
} break; } break;
case LLAMA_POOLING_TYPE_UNSPECIFIED: case LLAMA_POOLING_TYPE_UNSPECIFIED:
@ -1145,7 +1135,7 @@ int llama_context::decode(const llama_batch & batch_inp) {
if (n_outputs > 0) { if (n_outputs > 0) {
bool sorted_output = true; bool sorted_output = true;
auto & out_ids = mstate->out_ids(); auto & out_ids = balloc->get_out_ids();
GGML_ASSERT(out_ids.size() == (size_t) n_outputs); GGML_ASSERT(out_ids.size() == (size_t) n_outputs);
@ -1318,8 +1308,8 @@ ggml_cgraph * llama_context::graph_reserve(uint32_t n_tokens, uint32_t n_seqs, u
this->n_outputs = n_outputs; this->n_outputs = n_outputs;
llama_token token = model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph llama_batch_allocr balloc(model.hparams.n_pos_per_embd());
llama_ubatch ubatch = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; llama_ubatch ubatch = balloc.ubatch_reserve(n_tokens/n_seqs, n_seqs);
auto * gf = graph_init(); auto * gf = graph_init();
auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT, mstate); auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT, mstate);
@ -2039,7 +2029,12 @@ void llama_context::opt_epoch_iter(
batch.logits [pos_batch] = true; batch.logits [pos_batch] = true;
} }
const auto n_tokens_all = batch.n_tokens; if (!balloc->init(batch, model.vocab, nullptr, model.hparams.n_embd, true)) {
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);
return;
}
const uint32_t n_tokens_all = balloc->get_n_tokens();
n_queued_tokens += n_tokens_all; n_queued_tokens += n_tokens_all;
@ -2047,7 +2042,7 @@ void llama_context::opt_epoch_iter(
uint32_t n_outputs_all = n_tokens_all; uint32_t n_outputs_all = n_tokens_all;
auto mstate = memory->init_batch(batch, cparams.n_ubatch, true); auto mstate = memory->init_batch(*balloc, cparams.n_ubatch, true);
if (!mstate || mstate->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) { if (!mstate || mstate->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) {
LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__); LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__);
break; break;

View File

@ -247,7 +247,7 @@ private:
std::map<llama_seq_id, std::vector<float>> embd_seq; std::map<llama_seq_id, std::vector<float>> embd_seq;
// reuse the batch_allocr to avoid unnecessary memory allocations // reuse the batch_allocr to avoid unnecessary memory allocations
std::unique_ptr<llama_batch_allocr> batch_allocr; std::unique_ptr<llama_batch_allocr> balloc;
uint32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch uint32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch

View File

@ -6,7 +6,8 @@
#include "llama-kv-cache-unified.h" #include "llama-kv-cache-unified.h"
#include "llama-kv-cache-unified-iswa.h" #include "llama-kv-cache-unified-iswa.h"
#include "llama-kv-cache-recurrent.h" #include "llama-memory-hybrid.h"
#include "llama-memory-recurrent.h"
#include <cassert> #include <cassert>
#include <cmath> #include <cmath>
@ -91,12 +92,8 @@ void llm_graph_input_pos_bucket_kv::set_input(const llama_ubatch * ubatch) {
} }
void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) { void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) {
if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) { GGML_ASSERT(out_ids);
//GGML_ASSERT(out_ids && "every model that can must skip unused outputs");
if (!out_ids) {
LLAMA_LOG_WARN("%s: 'out_ids' is not created\n", __func__);
} else {
const int64_t n_tokens = ubatch->n_tokens; const int64_t n_tokens = ubatch->n_tokens;
GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer)); GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer));
@ -106,150 +103,133 @@ void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) {
for (int i = 0; i < n_tokens; ++i) { for (int i = 0; i < n_tokens; ++i) {
data[i] = i; data[i] = i;
} }
} else if (ubatch->output) {
int32_t n_outputs = 0; return;
}
GGML_ASSERT(ubatch->output);
int n_outputs = 0;
for (int i = 0; i < n_tokens; ++i) { for (int i = 0; i < n_tokens; ++i) {
if (ubatch->output[i]) { if (ubatch->output[i]) {
data[n_outputs++] = i; data[n_outputs++] = i;
} }
} }
// the graph needs to have been passed the correct number of outputs
GGML_ASSERT(n_outputs == n_outputs);
} else if (n_outputs == 1) {
// only keep last output
data[0] = n_tokens - 1;
} else {
GGML_ASSERT(n_outputs == 0);
}
}
}
} }
void llm_graph_input_mean::set_input(const llama_ubatch * ubatch) { void llm_graph_input_mean::set_input(const llama_ubatch * ubatch) {
if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) { if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
const int64_t n_tokens = ubatch->n_tokens; const int64_t n_tokens = ubatch->n_tokens;
const int64_t n_seq_tokens = ubatch->n_seq_tokens; const int64_t n_seq_tokens = ubatch->n_seq_tokens;
const int64_t n_seqs = ubatch->n_seqs; const int64_t n_seqs_unq = ubatch->n_seqs_unq;
GGML_ASSERT(mean); GGML_ASSERT(mean);
GGML_ASSERT(ggml_backend_buffer_is_host(mean->buffer)); GGML_ASSERT(ggml_backend_buffer_is_host(mean->buffer));
float * data = (float *) mean->data; float * data = (float *) mean->data;
memset(mean->data, 0, n_tokens * n_tokens * ggml_element_size(mean)); memset(mean->data, 0, n_tokens*n_seqs_unq*ggml_element_size(mean));
std::vector<uint64_t> sum(n_tokens, 0); std::vector<uint64_t> sums(n_seqs_unq, 0);
for (int i = 0; i < n_tokens; i += n_seq_tokens) {
for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
const llama_seq_id seq_id = ubatch->seq_id[i][s];
const int32_t seq_idx = ubatch->seq_idx[seq_id];
// TODO: fix indexing [UBATCH_IDX] sums[seq_idx] += ubatch->n_seq_tokens;
for (int s = 0; s < n_seqs; ++s) {
const llama_seq_id seq_id = ubatch->seq_id[s][0];
// TODO: adapt limits to n_seqs when ubatch->equal_seqs is true
GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN");
sum[seq_id] += ubatch->n_seq_tokens;
}
std::vector<float> div(n_tokens, 0.0f);
for (int i = 0; i < n_tokens; ++i) {
const uint64_t s = sum[i];
if (s > 0) {
div[i] = 1.0f/float(s);
} }
} }
// TODO: fix indexing [UBATCH_IDX] std::vector<float> div(n_seqs_unq, 0.0f);
for (int s = 0; s < n_seqs; ++s) { for (int s = 0; s < n_seqs_unq; ++s) {
const llama_seq_id seq_id = ubatch->seq_id[s][0]; const uint64_t sum = sums[s];
if (sum > 0) {
div[s] = 1.0f/float(sum);
}
}
for (int i = 0; i < n_seq_tokens; ++i) { for (int i = 0; i < n_tokens; i += n_seq_tokens) {
data[seq_id*n_tokens + s*n_seq_tokens + i] = div[seq_id]; for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
const llama_seq_id seq_id = ubatch->seq_id[i][s];
const int32_t seq_idx = ubatch->seq_idx[seq_id];
for (int j = 0; j < n_seq_tokens; ++j) {
data[seq_idx*n_tokens + i + j] = div[seq_idx];
}
} }
} }
} }
} }
void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) { void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) {
if (cparams.embeddings && (
cparams.pooling_type == LLAMA_POOLING_TYPE_CLS ||
cparams.pooling_type == LLAMA_POOLING_TYPE_RANK)) {
const int64_t n_tokens = ubatch->n_tokens; const int64_t n_tokens = ubatch->n_tokens;
const int64_t n_seq_tokens = ubatch->n_seq_tokens; const int64_t n_seq_tokens = ubatch->n_seq_tokens;
const int64_t n_seqs = ubatch->n_seqs; const int64_t n_seqs_unq = ubatch->n_seqs_unq;
if (cparams.embeddings && (
cparams.pooling_type == LLAMA_POOLING_TYPE_CLS ||
cparams.pooling_type == LLAMA_POOLING_TYPE_RANK
)) {
GGML_ASSERT(cls); GGML_ASSERT(cls);
GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer)); GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer));
uint32_t * data = (uint32_t *) cls->data; uint32_t * data = (uint32_t *) cls->data;
memset(cls->data, 0, n_tokens * ggml_element_size(cls)); memset(cls->data, 0, n_seqs_unq*ggml_element_size(cls));
// TODO: fix indexing [UBATCH_IDX] for (int i = 0; i < n_tokens; i += n_seq_tokens) {
for (int s = 0; s < n_seqs; ++s) { for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
const llama_seq_id seq_id = ubatch->seq_id[s][0]; const llama_seq_id seq_id = ubatch->seq_id[i][s];
const int32_t seq_idx = ubatch->seq_idx[seq_id];
// TODO: adapt limits to n_seqs when ubatch->equal_seqs is true data[seq_idx] = i;
GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS or RANK");
for (int i = 0; i < n_seq_tokens; ++i) {
const llama_pos pos = ubatch->pos[s*n_seq_tokens + i];
if (pos == 0) {
data[seq_id] = s*n_seq_tokens + i;
}
} }
} }
} }
if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) { if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) {
const int64_t n_tokens = ubatch->n_tokens;
const int64_t n_seq_tokens = ubatch->n_seq_tokens;
const int64_t n_seqs = ubatch->n_seqs;
GGML_ASSERT(cls); GGML_ASSERT(cls);
GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer)); GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer));
uint32_t * data = (uint32_t *) cls->data; uint32_t * data = (uint32_t *) cls->data;
memset(cls->data, 0, n_tokens * ggml_element_size(cls)); memset(cls->data, 0, n_seqs_unq*ggml_element_size(cls));
std::vector<int> last_pos(n_tokens, -1); std::vector<int> last_pos(n_seqs_unq, -1);
std::vector<int> last_row(n_tokens, -1); std::vector<int> last_row(n_seqs_unq, -1);
// TODO: fix indexing [UBATCH_IDX]
for (int s = 0; s < n_seqs; ++s) {
const llama_seq_id seq_id = ubatch->seq_id[s][0];
// TODO: adapt limits to n_seqs when ubatch->equal_seqs is true
GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == LAST");
for (int i = 0; i < n_seq_tokens; ++i) {
const llama_pos pos = ubatch->pos[s*n_seq_tokens + i];
if (pos >= last_pos[seq_id]) {
last_pos[seq_id] = pos;
last_row[seq_id] = s*n_seq_tokens + i;
}
}
}
for (int i = 0; i < n_tokens; ++i) { for (int i = 0; i < n_tokens; ++i) {
if (last_row[i] >= 0) { const llama_pos pos = ubatch->pos[i];
data[i] = last_row[i];
for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
const llama_seq_id seq_id = ubatch->seq_id[i][s];
const int32_t seq_idx = ubatch->seq_idx[seq_id];
if (pos >= last_pos[seq_idx]) {
last_pos[seq_idx] = pos;
last_row[seq_idx] = i;
}
}
}
for (int s = 0; s < n_seqs_unq; ++s) {
if (last_row[s] >= 0) {
data[s] = last_row[s];
} }
} }
} }
} }
void llm_graph_input_s_copy::set_input(const llama_ubatch * ubatch) { void llm_graph_input_rs::set_input(const llama_ubatch * ubatch) {
GGML_UNUSED(ubatch); GGML_UNUSED(ubatch);
const int64_t n_kv = kv_state->get_n_kv(); const int64_t n_rs = mem_state->get_n_rs();
if (s_copy) { if (s_copy) {
GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer)); GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer));
int32_t * data = (int32_t *) s_copy->data; int32_t * data = (int32_t *) s_copy->data;
// assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n
for (uint32_t i = 0; i < n_kv; ++i) { for (uint32_t i = 0; i < n_rs; ++i) {
data[i] = kv_state->s_copy(i); data[i] = mem_state->s_copy(i);
} }
} }
} }
@ -265,33 +245,28 @@ void llm_graph_input_cross_embd::set_input(const llama_ubatch * ubatch) {
} }
void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) { void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) {
if (kq_mask) {
if (cparams.causal_attn) {
const int64_t n_kv = ubatch->n_tokens; const int64_t n_kv = ubatch->n_tokens;
const int64_t n_tokens = ubatch->n_tokens; const int64_t n_tokens = ubatch->n_tokens;
const int64_t n_seq_tokens = ubatch->n_seq_tokens;
const int64_t n_seqs = ubatch->n_seqs;
GGML_ASSERT(kq_mask);
GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer)); GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer));
float * data = (float *) kq_mask->data; float * data = (float *) kq_mask->data;
for (int h = 0; h < 1; ++h) { for (int h = 0; h < 1; ++h) {
for (int s1 = 0; s1 < n_seqs; ++s1) { for (int i1 = 0; i1 < n_tokens; ++i1) {
const llama_seq_id seq_id = ubatch->seq_id[s1][0]; const llama_seq_id s1 = ubatch->seq_id[i1][0];
for (int j = 0; j < n_seq_tokens; ++j) { for (int i0 = 0; i0 < n_tokens; ++i0) {
const int32_t tj = s1*n_seq_tokens + j;
for (int s0 = 0; s0 < n_seqs; ++s0) {
for (int i = 0; i < n_seq_tokens; ++i) {
const int32_t ti = s0*n_seq_tokens + i;
float f = -INFINITY; float f = -INFINITY;
// TODO: fix indexing [UBATCH_IDX] for (int s = 0; s < ubatch->n_seq_id[i0]; ++s) {
for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) { const llama_seq_id s0 = ubatch->seq_id[i0][0];
if (ubatch->seq_id[s0][s] == seq_id && ubatch->pos[ti] <= ubatch->pos[tj]) {
// TODO: reimplement this like in llama_kv_cache_unified
if (s0 == s1 && (!cparams.causal_attn || ubatch->pos[i0] <= ubatch->pos[i1])) {
if (hparams.use_alibi) { if (hparams.use_alibi) {
f = -std::abs(ubatch->pos[ti] - ubatch->pos[tj]); f = -std::abs(ubatch->pos[i0] - ubatch->pos[i1]);
} else { } else {
f = 0.0f; f = 0.0f;
} }
@ -299,55 +274,7 @@ void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) {
} }
} }
data[h*(n_kv*n_tokens) + tj*n_kv + ti] = f; data[h*(n_kv*n_tokens) + i1*n_kv + i0] = f;
}
}
}
}
}
} else {
const int64_t n_tokens = ubatch->n_tokens;
const int64_t n_seq_tokens = ubatch->n_seq_tokens;
const int64_t n_seqs = ubatch->n_seqs;
const int64_t n_stride = ubatch->n_tokens;
GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer));
float * data = (float *) kq_mask->data;
for (int h = 0; h < 1; ++h) {
for (int s1 = 0; s1 < n_seqs; ++s1) {
const llama_seq_id seq_id = ubatch->seq_id[s1][0];
for (int j = 0; j < n_seq_tokens; ++j) {
const int32_t tj = s1*n_seq_tokens + j;
for (int s0 = 0; s0 < n_seqs; ++s0) {
for (int i = 0; i < n_seq_tokens; ++i) {
const int32_t ti = s0*n_seq_tokens + i;
float f = -INFINITY;
// TODO: fix indexing [UBATCH_IDX]
for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) {
if (ubatch->seq_id[s0][s] == seq_id) {
if (hparams.use_alibi) {
f = -std::abs(ubatch->pos[ti] - ubatch->pos[tj]);
} else {
f = 0.0f;
}
break;
}
}
data[h*(n_tokens*n_tokens) + tj*n_stride + ti] = f;
}
}
for (int i = n_tokens; i < n_stride; ++i) {
data[h*(n_tokens*n_tokens) + tj*n_stride + i] = -INFINITY;
}
}
}
} }
} }
} }
@ -370,7 +297,8 @@ void llm_graph_input_attn_kv_unified_iswa::set_input(const llama_ubatch * ubatch
} }
void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) { void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) {
if (cross_kq_mask) { GGML_ASSERT(cross_kq_mask);
const int64_t n_enc = cross_kq_mask->ne[0]; const int64_t n_enc = cross_kq_mask->ne[0];
const int64_t n_tokens = ubatch->n_tokens; const int64_t n_tokens = ubatch->n_tokens;
@ -380,17 +308,19 @@ void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) {
float * data = (float *) cross_kq_mask->data; float * data = (float *) cross_kq_mask->data;
for (int h = 0; h < 1; ++h) { for (int h = 0; h < 1; ++h) {
for (int j = 0; j < n_tokens; ++j) { for (int i = 0; i < n_tokens; ++i) {
for (int i = 0; i < n_enc; ++i) { for (int j = 0; j < n_enc; ++j) {
float f = -INFINITY; float f = -INFINITY;
// TODO: fix indexing [UBATCH_IDX]
for (int s = 0; s < ubatch->n_seq_id[j]; ++s) { for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
const llama_seq_id seq_id = ubatch->seq_id[j][s]; const llama_seq_id seq_id = ubatch->seq_id[i][s];
if (cross->seq_ids_enc[i].find(seq_id) != cross->seq_ids_enc[i].end()) {
if (cross->seq_ids_enc[j].find(seq_id) != cross->seq_ids_enc[j].end()) {
f = 0.0f; f = 0.0f;
} }
} }
data[h*(n_enc*n_tokens) + j*n_enc + i] = f;
data[h*(n_enc*n_tokens) + i*n_enc + j] = f;
} }
} }
@ -401,6 +331,23 @@ void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) {
} }
} }
} }
void llm_graph_input_mem_hybrid::set_input(const llama_ubatch * ubatch) {
if (self_kq_mask) {
mem_state->get_state_attn()->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn);
}
const int64_t n_rs = mem_state->get_state_recr()->get_n_rs();
if (s_copy) {
GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer));
int32_t * data = (int32_t *) s_copy->data;
// assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n
for (uint32_t i = 0; i < n_rs; ++i) {
data[i] = mem_state->get_state_recr()->s_copy(i);
}
}
} }
// //
@ -448,10 +395,6 @@ llm_graph_context::llm_graph_context(const llm_graph_params & params) :
res (std::make_unique<llm_graph_result>()) { res (std::make_unique<llm_graph_result>()) {
} }
int64_t llm_graph_context::n_pos_per_embd() const {
return hparams.rope_type == LLAMA_ROPE_TYPE_MROPE ? 4 : 1;
}
void llm_graph_context::cb(ggml_tensor * cur, const char * name, int il) const { void llm_graph_context::cb(ggml_tensor * cur, const char * name, int il) const {
if (cb_func) { if (cb_func) {
cb_func(ubatch, cur, name, il); cb_func(ubatch, cur, name, il);
@ -896,11 +839,11 @@ ggml_tensor * llm_graph_context::build_inp_embd(ggml_tensor * tok_embd) const {
} }
ggml_tensor * llm_graph_context::build_inp_pos() const { ggml_tensor * llm_graph_context::build_inp_pos() const {
auto inp = std::make_unique<llm_graph_input_pos>(n_pos_per_embd()); auto inp = std::make_unique<llm_graph_input_pos>(hparams.n_pos_per_embd());
auto & cur = inp->pos; auto & cur = inp->pos;
cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens*n_pos_per_embd()); cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, (int64_t)n_tokens*hparams.n_pos_per_embd());
ggml_set_input(cur); ggml_set_input(cur);
res->add_input(std::move(inp)); res->add_input(std::move(inp));
@ -923,6 +866,14 @@ ggml_tensor * llm_graph_context::build_inp_attn_scale() const {
} }
ggml_tensor * llm_graph_context::build_inp_out_ids() const { ggml_tensor * llm_graph_context::build_inp_out_ids() const {
// note: when all tokens are output, we could skip this optimization to spare the ggml_get_rows() calls,
// but this would make the graph topology depend on the number of output tokens, which can interere with
// features that require constant topology such as pipline parallelism
// ref: https://github.com/ggml-org/llama.cpp/pull/14275#issuecomment-2987424471
//if (n_outputs < n_tokens) {
// return nullptr;
//}
auto inp = std::make_unique<llm_graph_input_out_ids>(hparams, cparams, n_outputs); auto inp = std::make_unique<llm_graph_input_out_ids>(hparams, cparams, n_outputs);
auto & cur = inp->out_ids; auto & cur = inp->out_ids;
@ -940,7 +891,7 @@ ggml_tensor * llm_graph_context::build_inp_mean() const {
auto & cur = inp->mean; auto & cur = inp->mean;
cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens); cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, ubatch.n_seqs_unq);
ggml_set_input(cur); ggml_set_input(cur);
res->add_input(std::move(inp)); res->add_input(std::move(inp));
@ -953,24 +904,7 @@ ggml_tensor * llm_graph_context::build_inp_cls() const {
auto & cur = inp->cls; auto & cur = inp->cls;
cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_seqs_unq);
ggml_set_input(cur);
res->add_input(std::move(inp));
return cur;
}
ggml_tensor * llm_graph_context::build_inp_s_copy() const {
const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
auto inp = std::make_unique<llm_graph_input_s_copy>(kv_state);
const auto n_kv = kv_state->get_n_kv();
auto & cur = inp->s_copy;
cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_kv);
ggml_set_input(cur); ggml_set_input(cur);
res->add_input(std::move(inp)); res->add_input(std::move(inp));
@ -1047,6 +981,33 @@ ggml_tensor * llm_graph_context::build_pos_bias(ggml_tensor * pos_bucket, ggml_t
return pos_bias; return pos_bias;
} }
llm_graph_input_mem_hybrid * llm_graph_context::build_inp_mem_hybrid() const {
const auto * mem_state = static_cast<const llama_memory_hybrid_state *>(mstate);
auto inp = std::make_unique<llm_graph_input_mem_hybrid>(hparams, cparams, mem_state);
{
GGML_ASSERT(hparams.swa_type == LLAMA_SWA_TYPE_NONE && "Hybrid recurrent is not supported with SWA attention layers");
const auto n_kv = inp->mem_state->get_state_attn()->get_n_kv();
inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
//cb(inp->self_kq_mask, "KQ_mask", -1);
ggml_set_input(inp->self_kq_mask);
inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
}
{
const auto n_rs = mem_state->get_state_recr()->get_n_rs();
inp->s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_rs);
ggml_set_input(inp->s_copy);
}
return (llm_graph_input_mem_hybrid *) res->add_input(std::move(inp));
}
ggml_tensor * llm_graph_context::build_attn_mha( ggml_tensor * llm_graph_context::build_attn_mha(
ggml_cgraph * gf, ggml_cgraph * gf,
ggml_tensor * q, ggml_tensor * q,
@ -1291,36 +1252,6 @@ ggml_tensor * llm_graph_context::build_attn(
return cur; return cur;
} }
llm_graph_input_attn_kv_unified_iswa * llm_graph_context::build_attn_inp_kv_unified_iswa() const {
const auto * kv_state = static_cast<const llama_kv_cache_unified_iswa_state *>(mstate);
auto inp = std::make_unique<llm_graph_input_attn_kv_unified_iswa>(hparams, cparams, kv_state);
{
const auto n_kv = kv_state->get_base()->get_n_kv();
inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
//cb(inp->self_kq_mask, "KQ_mask", -1);
ggml_set_input(inp->self_kq_mask);
inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
}
{
GGML_ASSERT(hparams.swa_type != LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified for non-SWA");
const auto n_kv = kv_state->get_swa()->get_n_kv();
inp->self_kq_mask_swa = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
//cb(inp->self_kq_mask_swa, "KQ_mask_swa", -1);
ggml_set_input(inp->self_kq_mask_swa);
inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa;
}
return (llm_graph_input_attn_kv_unified_iswa *) res->add_input(std::move(inp));
}
ggml_tensor * llm_graph_context::build_attn( ggml_tensor * llm_graph_context::build_attn(
llm_graph_input_attn_kv_unified_iswa * inp, llm_graph_input_attn_kv_unified_iswa * inp,
ggml_cgraph * gf, ggml_cgraph * gf,
@ -1430,20 +1361,99 @@ ggml_tensor * llm_graph_context::build_attn(
return cur; return cur;
} }
ggml_tensor * llm_graph_context::build_recurrent_state( ggml_tensor * llm_graph_context::build_attn(
llm_graph_input_mem_hybrid * inp,
ggml_cgraph * gf,
ggml_tensor * wo,
ggml_tensor * wo_b,
ggml_tensor * q_cur,
ggml_tensor * k_cur,
ggml_tensor * v_cur,
ggml_tensor * kq_b,
ggml_tensor * v_mla,
float kq_scale,
int il) const {
// these nodes are added to the graph together so that they are not reordered
// by doing so, the number of splits in the graph is reduced
ggml_build_forward_expand(gf, q_cur);
ggml_build_forward_expand(gf, k_cur);
ggml_build_forward_expand(gf, v_cur);
const auto * kv_state = static_cast<const llama_memory_hybrid_state *>(mstate)->get_state_attn();
// store to KV cache
{
ggml_build_forward_expand(gf, kv_state->cpy_k(ctx0, k_cur, il));
ggml_build_forward_expand(gf, kv_state->cpy_v(ctx0, v_cur, il));
}
const auto & kq_mask = inp->get_kq_mask();
ggml_tensor * q = q_cur;
ggml_tensor * k = kv_state->get_k(ctx0, il);
ggml_tensor * v = kv_state->get_v(ctx0, il);
ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, kq_scale);
cb(cur, "kqv_out", il);
if (wo) {
cur = build_lora_mm(wo, cur);
if (arch == LLM_ARCH_GLM4) {
// GLM4 seems to have numerical issues with half-precision accumulators
ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
}
}
if (wo_b) {
cur = ggml_add(ctx0, cur, wo_b);
}
return cur;
}
llm_graph_input_attn_kv_unified_iswa * llm_graph_context::build_attn_inp_kv_unified_iswa() const {
const auto * kv_state = static_cast<const llama_kv_cache_unified_iswa_state *>(mstate);
auto inp = std::make_unique<llm_graph_input_attn_kv_unified_iswa>(hparams, cparams, kv_state);
{
const auto n_kv = kv_state->get_base()->get_n_kv();
inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
//cb(inp->self_kq_mask, "KQ_mask", -1);
ggml_set_input(inp->self_kq_mask);
inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
}
{
GGML_ASSERT(hparams.swa_type != LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified for non-SWA");
const auto n_kv = kv_state->get_swa()->get_n_kv();
inp->self_kq_mask_swa = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD));
//cb(inp->self_kq_mask_swa, "KQ_mask_swa", -1);
ggml_set_input(inp->self_kq_mask_swa);
inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa;
}
return (llm_graph_input_attn_kv_unified_iswa *) res->add_input(std::move(inp));
}
ggml_tensor * llm_graph_context::build_rs(
ggml_cgraph * gf, ggml_cgraph * gf,
ggml_tensor * s, ggml_tensor * s,
ggml_tensor * state_copy, ggml_tensor * state_copy,
int32_t state_size, int32_t state_size,
int32_t n_seqs, int32_t n_seqs,
uint32_t n_kv,
uint32_t kv_head,
uint32_t kv_size,
int32_t rs_zero,
bool avoid_copies) const { bool avoid_copies) const {
const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate);
const auto n_kv = kv_state->get_n_kv(); ggml_tensor * states = ggml_reshape_2d(ctx0, s, state_size, kv_size);
const auto kv_head = kv_state->get_head();
const auto rs_zero = kv_state->get_rs_z();
ggml_tensor * states = ggml_reshape_2d(ctx0, s, state_size, kv_state->get_size());
// Clear a single state which will then be copied to the other cleared states. // Clear a single state which will then be copied to the other cleared states.
// Note that this is a no-op when the view is zero-sized. // Note that this is a no-op when the view is zero-sized.
@ -1474,22 +1484,59 @@ ggml_tensor * llm_graph_context::build_recurrent_state(
return output_states; return output_states;
} }
ggml_tensor * llm_graph_context::build_rwkv_token_shift_load( llm_graph_input_rs * llm_graph_context::build_rs_inp() const {
const auto * kv_state = static_cast<const llama_memory_recurrent_state *>(mstate);
auto inp = std::make_unique<llm_graph_input_rs>(kv_state);
const auto n_rs = kv_state->get_n_rs();
inp->s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_rs);
ggml_set_input(inp->s_copy);
return (llm_graph_input_rs *) res->add_input(std::move(inp));
}
ggml_tensor * llm_graph_context::build_rs(
llm_graph_input_rs * inp,
ggml_cgraph * gf,
ggml_tensor * s,
int32_t state_size,
int32_t n_seqs,
bool avoid_copies) const {
const auto * kv_state = static_cast<const llama_memory_recurrent_state *>(mstate);
return build_rs(gf, s, inp->s_copy, state_size, n_seqs, kv_state->get_n_rs(), kv_state->get_head(), kv_state->get_size(), kv_state->get_rs_z(), avoid_copies);
}
ggml_tensor * llm_graph_context::build_rs(
llm_graph_input_mem_hybrid * inp,
ggml_cgraph * gf,
ggml_tensor * s,
int32_t state_size,
int32_t n_seqs,
bool avoid_copies) const {
const auto * kv_state = static_cast<const llama_memory_hybrid_state *>(mstate)->get_state_recr();
return build_rs(gf, s, inp->s_copy, state_size, n_seqs, kv_state->get_n_rs(), kv_state->get_head(), kv_state->get_size(), kv_state->get_rs_z(), avoid_copies);
}
ggml_tensor * llm_graph_context::build_rwkv_token_shift_load(
llm_graph_input_rs * inp,
ggml_cgraph * gf, ggml_cgraph * gf,
ggml_tensor * state_copy,
const llama_ubatch & ubatch, const llama_ubatch & ubatch,
int il) const { int il) const {
const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate); const auto * kv_state = static_cast<const llama_memory_recurrent_state *>(mstate);
const auto token_shift_count = hparams.token_shift_count; const auto token_shift_count = hparams.token_shift_count;
const int64_t n_seqs = ubatch.n_seqs; const int64_t n_seqs = ubatch.n_seqs;
ggml_tensor * token_shift_all = kv_state->get_k_l(il); ggml_tensor * token_shift_all = kv_state->get_r_l(il);
ggml_tensor * token_shift = build_recurrent_state( ggml_tensor * token_shift = build_rs(
gf, token_shift_all, state_copy, inp, gf, token_shift_all,
hparams.n_embd_k_s(), n_seqs); hparams.n_embd_r(), n_seqs);
token_shift = ggml_reshape_3d(ctx0, token_shift, hparams.n_embd, token_shift_count, n_seqs); token_shift = ggml_reshape_3d(ctx0, token_shift, hparams.n_embd, token_shift_count, n_seqs);
@ -1500,7 +1547,7 @@ ggml_tensor * llm_graph_context::build_rwkv_token_shift_store(
ggml_tensor * token_shift, ggml_tensor * token_shift,
const llama_ubatch & ubatch, const llama_ubatch & ubatch,
int il) const { int il) const {
const auto * kv_state = static_cast<const llama_kv_cache_recurrent_state *>(mstate); const auto * kv_state = static_cast<const llama_memory_recurrent_state *>(mstate);
const auto token_shift_count = hparams.token_shift_count; const auto token_shift_count = hparams.token_shift_count;
const auto n_embd = hparams.n_embd; const auto n_embd = hparams.n_embd;
@ -1512,7 +1559,7 @@ ggml_tensor * llm_graph_context::build_rwkv_token_shift_store(
return ggml_cpy( return ggml_cpy(
ctx0, ctx0,
ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * token_shift_count, 0), ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * token_shift_count, 0),
ggml_view_1d(ctx0, kv_state->get_k_l(il), hparams.n_embd_k_s()*n_seqs, hparams.n_embd_k_s()*kv_head*ggml_element_size(kv_state->get_k_l(il))) ggml_view_1d(ctx0, kv_state->get_r_l(il), hparams.n_embd_r()*n_seqs, hparams.n_embd_r()*kv_head*ggml_element_size(kv_state->get_r_l(il)))
); );
} }

View File

@ -21,7 +21,8 @@ struct llama_memory_state_i;
class llama_kv_cache_unified_state; class llama_kv_cache_unified_state;
class llama_kv_cache_unified_iswa_state; class llama_kv_cache_unified_iswa_state;
class llama_kv_cache_recurrent_state; class llama_memory_recurrent_state;
class llama_memory_hybrid_state;
// certain models (typically multi-modal) can produce different types of graphs // certain models (typically multi-modal) can produce different types of graphs
enum llm_graph_type { enum llm_graph_type {
@ -94,14 +95,14 @@ public:
class llm_graph_input_pos : public llm_graph_input_i { class llm_graph_input_pos : public llm_graph_input_i {
public: public:
llm_graph_input_pos(int64_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) {} llm_graph_input_pos(uint32_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) {}
virtual ~llm_graph_input_pos() = default; virtual ~llm_graph_input_pos() = default;
void set_input(const llama_ubatch * ubatch) override; void set_input(const llama_ubatch * ubatch) override;
ggml_tensor * pos = nullptr; // I32 [n_batch] ggml_tensor * pos = nullptr; // I32 [n_batch]
const int64_t n_pos_per_embd = 1; const uint32_t n_pos_per_embd = 1;
}; };
// temperature tuning, used by llama4 // temperature tuning, used by llama4
@ -188,16 +189,16 @@ public:
const llama_cparams & cparams; const llama_cparams & cparams;
}; };
class llm_graph_input_s_copy : public llm_graph_input_i { class llm_graph_input_rs : public llm_graph_input_i {
public: public:
llm_graph_input_s_copy(const llama_kv_cache_recurrent_state * kv_state) : kv_state(kv_state) {} llm_graph_input_rs(const llama_memory_recurrent_state * mem_state) : mem_state(mem_state) {}
virtual ~llm_graph_input_s_copy() = default; virtual ~llm_graph_input_rs() = default;
void set_input(const llama_ubatch * ubatch) override; void set_input(const llama_ubatch * ubatch) override;
ggml_tensor * s_copy; // I32 [kv_size] ggml_tensor * s_copy; // I32 [kv_size]
const llama_kv_cache_recurrent_state * kv_state; const llama_memory_recurrent_state * mem_state;
}; };
class llm_graph_input_cross_embd : public llm_graph_input_i { class llm_graph_input_cross_embd : public llm_graph_input_i {
@ -300,6 +301,33 @@ public:
const llama_cross * cross = nullptr; const llama_cross * cross = nullptr;
}; };
class llm_graph_input_mem_hybrid : public llm_graph_input_i {
public:
llm_graph_input_mem_hybrid(
const llama_hparams & hparams,
const llama_cparams & cparams,
const llama_memory_hybrid_state * mem_state) :
hparams(hparams),
cparams(cparams),
mem_state(mem_state) {
}
virtual ~llm_graph_input_mem_hybrid() = default;
void set_input(const llama_ubatch * ubatch) override;
ggml_tensor * s_copy; // I32 [kv_size]
ggml_tensor * get_kq_mask() const { return self_kq_mask_cnv; }
ggml_tensor * self_kq_mask = nullptr; // F32 [n_kv, n_batch]
ggml_tensor * self_kq_mask_cnv = nullptr; // [n_kv, n_batch]
const llama_hparams & hparams;
const llama_cparams & cparams;
const llama_memory_hybrid_state * mem_state;
};
// //
// llm_graph_result // llm_graph_result
// //
@ -436,8 +464,6 @@ struct llm_graph_context {
llm_graph_context(const llm_graph_params & params); llm_graph_context(const llm_graph_params & params);
int64_t n_pos_per_embd() const;
void cb(ggml_tensor * cur, const char * name, int il) const; void cb(ggml_tensor * cur, const char * name, int il) const;
// //
@ -508,13 +534,14 @@ struct llm_graph_context {
ggml_tensor * build_inp_out_ids() const; ggml_tensor * build_inp_out_ids() const;
ggml_tensor * build_inp_mean() const; ggml_tensor * build_inp_mean() const;
ggml_tensor * build_inp_cls() const; ggml_tensor * build_inp_cls() const;
ggml_tensor * build_inp_s_copy() const;
ggml_tensor * build_inp_cross_embd() const; ggml_tensor * build_inp_cross_embd() const;
ggml_tensor * build_inp_pos_bucket_enc() const; ggml_tensor * build_inp_pos_bucket_enc() const;
ggml_tensor * build_inp_pos_bucket_dec() const; ggml_tensor * build_inp_pos_bucket_dec() const;
ggml_tensor * build_pos_bias(ggml_tensor * pos_bucket, ggml_tensor * attn_rel_b) const; ggml_tensor * build_pos_bias(ggml_tensor * pos_bucket, ggml_tensor * attn_rel_b) const;
llm_graph_input_mem_hybrid * build_inp_mem_hybrid() const;
// //
// attention // attention
// //
@ -589,21 +616,61 @@ struct llm_graph_context {
float kq_scale, float kq_scale,
int il) const; int il) const;
ggml_tensor * build_attn(
llm_graph_input_mem_hybrid * inp,
ggml_cgraph * gf,
ggml_tensor * wo,
ggml_tensor * wo_b,
ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens]
ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens]
ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens]
ggml_tensor * kq_b,
ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v]
float kq_scale,
int il) const;
// //
// recurrent // recurrent
// //
ggml_tensor * build_recurrent_state( // TODO: avoid notion of "kv"
// TODO: move this implementation to llama_memory_recurrent.
// this is analogous to llama_kv_cache_unified::cpy_k / cpy_v
// when moving, avoid passing `ggml_cgraph` - only pass `ggml_context`. would likely need to split the
// implementation in 2 separate methods. the goal is to avoid calling `ggml_build_forward_expand` in
// `llama_memory_recurrent`
ggml_tensor * build_rs(
ggml_cgraph * gf, ggml_cgraph * gf,
ggml_tensor * s, ggml_tensor * s,
ggml_tensor * state_copy, ggml_tensor * state_copy,
int32_t state_size,
int32_t n_seqs,
uint32_t n_kv,
uint32_t kv_head,
uint32_t kv_size,
int32_t rs_zero,
bool avoid_copies = false) const;
llm_graph_input_rs * build_rs_inp() const;
ggml_tensor * build_rs(
llm_graph_input_rs * inp,
ggml_cgraph * gf,
ggml_tensor * s,
int32_t state_size,
int32_t n_seqs,
bool avoid_copies = false) const;
ggml_tensor * build_rs(
llm_graph_input_mem_hybrid * inp,
ggml_cgraph * gf,
ggml_tensor * s,
int32_t state_size, int32_t state_size,
int32_t n_seqs, int32_t n_seqs,
bool avoid_copies = false) const; bool avoid_copies = false) const;
ggml_tensor * build_rwkv_token_shift_load( ggml_tensor * build_rwkv_token_shift_load(
llm_graph_input_rs * inp,
ggml_cgraph * gf, ggml_cgraph * gf,
ggml_tensor * state_copy,
const llama_ubatch & ubatch, const llama_ubatch & ubatch,
int il) const; int il) const;

View File

@ -65,7 +65,7 @@ uint32_t llama_hparams::n_embd_v_gqa(uint32_t il) const {
return n_embd_head_v * n_head_kv; return n_embd_head_v * n_head_kv;
} }
uint32_t llama_hparams::n_embd_k_s() const { uint32_t llama_hparams::n_embd_r() const {
if (wkv_head_size != 0) { if (wkv_head_size != 0) {
// for RWKV models // for RWKV models
return token_shift_count * n_embd; return token_shift_count * n_embd;
@ -76,7 +76,7 @@ uint32_t llama_hparams::n_embd_k_s() const {
return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * ssm_d_inner; return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * ssm_d_inner;
} }
uint32_t llama_hparams::n_embd_v_s() const { uint32_t llama_hparams::n_embd_s() const {
if (wkv_head_size != 0) { if (wkv_head_size != 0) {
// corresponds to RWKV's wkv_states size // corresponds to RWKV's wkv_states size
return n_embd * wkv_head_size; return n_embd * wkv_head_size;
@ -86,6 +86,14 @@ uint32_t llama_hparams::n_embd_v_s() const {
return ssm_d_state * ssm_d_inner; return ssm_d_state * ssm_d_inner;
} }
bool llama_hparams::is_recurrent(uint32_t il) const {
return recurrent_layer_arr[il];
}
uint32_t llama_hparams::n_pos_per_embd() const {
return rope_type == LLAMA_ROPE_TYPE_MROPE ? 4 : 1;
}
bool llama_hparams::is_swa(uint32_t il) const { bool llama_hparams::is_swa(uint32_t il) const {
if (il < n_layer) { if (il < n_layer) {
return swa_layers[il]; return swa_layers[il];

View File

@ -115,6 +115,9 @@ struct llama_hparams {
uint32_t ssm_d_state = 0; uint32_t ssm_d_state = 0;
uint32_t ssm_dt_rank = 0; uint32_t ssm_dt_rank = 0;
// for hybrid state space models
std::array<bool, LLAMA_MAX_LAYERS> recurrent_layer_arr;
bool ssm_dt_b_c_rms = false; bool ssm_dt_b_c_rms = false;
float f_clamp_kqv = 0.0f; float f_clamp_kqv = 0.0f;
@ -181,10 +184,15 @@ struct llama_hparams {
// dimension of the rolling state embeddings // dimension of the rolling state embeddings
// corresponds to Mamba's conv_states size or RWKV's token_shift states size // corresponds to Mamba's conv_states size or RWKV's token_shift states size
uint32_t n_embd_k_s() const; uint32_t n_embd_r() const;
// dimension of the recurrent state embeddings // dimension of the recurrent state embeddings
uint32_t n_embd_v_s() const; uint32_t n_embd_s() const;
// whether or not the given layer is recurrent (for hybrid models)
bool is_recurrent(uint32_t il) const;
uint32_t n_pos_per_embd() const;
bool is_swa(uint32_t il) const; bool is_swa(uint32_t il) const;
}; };

View File

@ -95,19 +95,22 @@ llama_pos llama_kv_cache_unified_iswa::seq_pos_max(llama_seq_id seq_id) const {
return kv_swa->seq_pos_max(seq_id); return kv_swa->seq_pos_max(seq_id);
} }
llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_all) { llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) {
GGML_UNUSED(embd_all); GGML_UNUSED(embd_all);
// first try simple split // first try simple split
do { do {
auto sbatch = llama_sbatch(batch, hparams.n_embd, true); balloc.split_reset();
std::vector<llama_ubatch> ubatches; std::vector<llama_ubatch> ubatches;
while (true) {
auto ubatch = balloc.split_simple(n_ubatch);
while (sbatch.n_tokens > 0) { if (ubatch.n_tokens == 0) {
auto ubatch = sbatch.split_simple(n_ubatch); break;
}
ubatches.push_back(ubatch); ubatches.push_back(std::move(ubatch)); // NOLINT
} }
auto heads_base = kv_base->prepare(ubatches); auto heads_base = kv_base->prepare(ubatches);
@ -123,19 +126,22 @@ llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch
assert(heads_base.size() == heads_swa.size()); assert(heads_base.size() == heads_swa.size());
return std::make_unique<llama_kv_cache_unified_iswa_state>( return std::make_unique<llama_kv_cache_unified_iswa_state>(
this, std::move(sbatch), std::move(heads_base), std::move(heads_swa), std::move(ubatches)); this, std::move(heads_base), std::move(heads_swa), std::move(ubatches));
} while (false); } while (false);
// if it fails, try equal split // if it fails, try equal split
do { do {
auto sbatch = llama_sbatch(batch, hparams.n_embd, false); balloc.split_reset();
std::vector<llama_ubatch> ubatches; std::vector<llama_ubatch> ubatches;
while (true) {
auto ubatch = balloc.split_equal(n_ubatch);
while (sbatch.n_tokens > 0) { if (ubatch.n_tokens == 0) {
auto ubatch = sbatch.split_equal(n_ubatch); break;
}
ubatches.push_back(ubatch); ubatches.push_back(std::move(ubatch)); // NOLINT
} }
auto heads_base = kv_base->prepare(ubatches); auto heads_base = kv_base->prepare(ubatches);
@ -151,7 +157,7 @@ llama_memory_state_ptr llama_kv_cache_unified_iswa::init_batch(const llama_batch
assert(heads_base.size() == heads_swa.size()); assert(heads_base.size() == heads_swa.size());
return std::make_unique<llama_kv_cache_unified_iswa_state>( return std::make_unique<llama_kv_cache_unified_iswa_state>(
this, std::move(sbatch), std::move(heads_base), std::move(heads_swa), std::move(ubatches)); this, std::move(heads_base), std::move(heads_swa), std::move(ubatches));
} while (false); } while (false);
// TODO: if we fail again, we should attempt different splitting strategies // TODO: if we fail again, we should attempt different splitting strategies
@ -197,37 +203,31 @@ llama_kv_cache_unified * llama_kv_cache_unified_iswa::get_swa() const {
llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state(llama_memory_status status) : status(status) {} llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state(llama_memory_status status) : status(status) {}
llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state( llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state(
llama_kv_cache_unified_iswa * kv) : status(LLAMA_MEMORY_STATUS_SUCCESS) { llama_kv_cache_unified_iswa * kv) :
state_base = kv->get_base()->init_full(); state_base(kv->get_base()->init_full()),
state_swa = kv->get_swa ()->init_full(); state_swa (kv->get_swa ()->init_full()),
status(llama_memory_status_combine(state_base->get_status(), state_swa->get_status())) {
status = llama_memory_status_combine(state_base->get_status(), state_swa->get_status());
} }
llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state( llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state(
llama_kv_cache_unified_iswa * kv, llama_kv_cache_unified_iswa * kv,
llama_context * lctx, llama_context * lctx,
bool optimize) : status(LLAMA_MEMORY_STATUS_SUCCESS) { bool optimize) :
state_base = kv->get_base()->init_update(lctx, optimize); state_base(kv->get_base()->init_update(lctx, optimize)),
state_swa = kv->get_swa ()->init_update(lctx, optimize); state_swa (kv->get_swa ()->init_update(lctx, optimize)),
status(llama_memory_status_combine(state_base->get_status(), state_swa->get_status())) {
status = llama_memory_status_combine(state_base->get_status(), state_swa->get_status());
} }
llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state( llama_kv_cache_unified_iswa_state::llama_kv_cache_unified_iswa_state(
llama_kv_cache_unified_iswa * kv, llama_kv_cache_unified_iswa * kv,
llama_sbatch sbatch,
std::vector<uint32_t> heads_base, std::vector<uint32_t> heads_base,
std::vector<uint32_t> heads_swa, std::vector<uint32_t> heads_swa,
std::vector<llama_ubatch> ubatches) std::vector<llama_ubatch> ubatches) :
: status(LLAMA_MEMORY_STATUS_SUCCESS), ubatches(std::move(ubatches)),
sbatch(std::move(sbatch)),
ubatches(std::move(ubatches)) {
// note: here we copy the ubatches. not sure if this is ideal // note: here we copy the ubatches. not sure if this is ideal
state_base.reset(new llama_kv_cache_unified_state(kv->get_base(), {}, std::move(heads_base), this->ubatches)); state_base(new llama_kv_cache_unified_state(kv->get_base(), std::move(heads_base), this->ubatches)),
state_swa .reset(new llama_kv_cache_unified_state(kv->get_swa (), {}, std::move(heads_swa), this->ubatches)); state_swa (new llama_kv_cache_unified_state(kv->get_swa (), std::move(heads_swa), this->ubatches)),
status(llama_memory_status_combine(state_base->get_status(), state_swa->get_status())) {
status = llama_memory_status_combine(state_base->get_status(), state_swa->get_status());
} }
llama_kv_cache_unified_iswa_state:: ~llama_kv_cache_unified_iswa_state() = default; llama_kv_cache_unified_iswa_state:: ~llama_kv_cache_unified_iswa_state() = default;
@ -256,12 +256,6 @@ bool llama_kv_cache_unified_iswa_state::apply() {
return res; return res;
} }
std::vector<int64_t> & llama_kv_cache_unified_iswa_state::out_ids() {
assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
return sbatch.out_ids;
}
llama_memory_status llama_kv_cache_unified_iswa_state::get_status() const { llama_memory_status llama_kv_cache_unified_iswa_state::get_status() const {
return status; return status;
} }

View File

@ -32,7 +32,7 @@ public:
// //
llama_memory_state_ptr init_batch( llama_memory_state_ptr init_batch(
const llama_batch & batch, llama_batch_allocr & balloc,
uint32_t n_ubatch, uint32_t n_ubatch,
bool embd_all) override; bool embd_all) override;
@ -90,7 +90,6 @@ public:
// used to create a state from a batch // used to create a state from a batch
llama_kv_cache_unified_iswa_state( llama_kv_cache_unified_iswa_state(
llama_kv_cache_unified_iswa * kv, llama_kv_cache_unified_iswa * kv,
llama_sbatch sbatch,
std::vector<uint32_t> heads_base, std::vector<uint32_t> heads_base,
std::vector<uint32_t> heads_swa, std::vector<uint32_t> heads_swa,
std::vector<llama_ubatch> ubatches); std::vector<llama_ubatch> ubatches);
@ -104,8 +103,6 @@ public:
bool next() override; bool next() override;
bool apply() override; bool apply() override;
std::vector<int64_t> & out_ids() override;
llama_memory_status get_status() const override; llama_memory_status get_status() const override;
const llama_ubatch & get_ubatch() const override; const llama_ubatch & get_ubatch() const override;
@ -117,17 +114,15 @@ public:
const llama_kv_cache_unified_state * get_swa() const; const llama_kv_cache_unified_state * get_swa() const;
private: private:
llama_memory_status status;
//llama_kv_cache_unified_iswa * kv; //llama_kv_cache_unified_iswa * kv;
llama_sbatch sbatch;
// the index of the next ubatch to process // the index of the next ubatch to process
size_t i_next = 0; size_t i_next = 0;
std::vector<llama_ubatch> ubatches; std::vector<llama_ubatch> ubatches;
llama_memory_state_ptr state_base; const llama_memory_state_ptr state_base;
llama_memory_state_ptr state_swa; const llama_memory_state_ptr state_swa;
const llama_memory_status status;
}; };

View File

@ -68,8 +68,8 @@ llama_kv_cache_unified::llama_kv_cache_unified(
continue; continue;
} }
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
const char * dev_name = "CPU"; const char * dev_name = "CPU";
@ -308,17 +308,23 @@ llama_pos llama_kv_cache_unified::seq_pos_max(llama_seq_id seq_id) const {
} }
llama_memory_state_ptr llama_kv_cache_unified::init_batch( llama_memory_state_ptr llama_kv_cache_unified::init_batch(
const llama_batch & batch, llama_batch_allocr & balloc,
uint32_t n_ubatch, uint32_t n_ubatch,
bool embd_all) { bool embd_all) {
GGML_UNUSED(embd_all); GGML_UNUSED(embd_all);
do { do {
auto sbatch = llama_sbatch(batch, hparams.n_embd, true); balloc.split_reset();
std::vector<llama_ubatch> ubatches; std::vector<llama_ubatch> ubatches;
while (sbatch.n_tokens > 0) { while (true) {
ubatches.push_back(sbatch.split_simple(n_ubatch)); auto ubatch = balloc.split_simple(n_ubatch);
if (ubatch.n_tokens == 0) {
break;
}
ubatches.push_back(std::move(ubatch)); // NOLINT
} }
auto heads = prepare(ubatches); auto heads = prepare(ubatches);
@ -327,7 +333,7 @@ llama_memory_state_ptr llama_kv_cache_unified::init_batch(
} }
return std::make_unique<llama_kv_cache_unified_state>( return std::make_unique<llama_kv_cache_unified_state>(
this, std::move(sbatch), std::move(heads), std::move(ubatches)); this, std::move(heads), std::move(ubatches));
} while (false); } while (false);
return std::make_unique<llama_kv_cache_unified_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE); return std::make_unique<llama_kv_cache_unified_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
@ -644,12 +650,6 @@ int32_t llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch) const {
} }
void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch & ubatch) { void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch & ubatch) {
if (debug > 0) {
LLAMA_LOG_DEBUG("%s: ubatch info:\n", __func__);
LLAMA_LOG_DEBUG("%s: n_tokens = %d, equal_seqs = %d\n", __func__, ubatch.n_tokens, ubatch.equal_seqs);
LLAMA_LOG_DEBUG("%s: n_seq_tokens = %d, n_seqs = %d\n", __func__, ubatch.n_seq_tokens, ubatch.n_seqs);
}
// keep track of the max sequence position that we would overwrite with this ubatch // keep track of the max sequence position that we would overwrite with this ubatch
// for non-SWA cache, this would be always empty // for non-SWA cache, this would be always empty
llama_seq_id seq_pos_max_rm[LLAMA_MAX_SEQ]; llama_seq_id seq_pos_max_rm[LLAMA_MAX_SEQ];
@ -657,27 +657,22 @@ void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch
seq_pos_max_rm[s] = -1; seq_pos_max_rm[s] = -1;
} }
for (uint32_t s = 0; s < ubatch.n_seqs; ++s) { for (uint32_t i = 0; i < ubatch.n_tokens; ++i) {
for (uint32_t j = 0; j < ubatch.n_seq_tokens; ++j) { if (!cells.is_empty(head_cur + i)) {
const uint32_t idx = s*ubatch.n_seq_tokens + j; assert(cells.seq_count(head_cur + i) == 1);
if (!cells.is_empty(head_cur + idx)) { const llama_seq_id seq_id = cells.seq_get(head_cur + i);
assert(cells.seq_count(head_cur + idx) == 1); const llama_pos pos = cells.pos_get(head_cur + i);
const llama_seq_id seq_id = cells.seq_get(head_cur + idx);
const llama_pos pos = cells.pos_get(head_cur + idx);
seq_pos_max_rm[seq_id] = std::max(seq_pos_max_rm[seq_id], pos); seq_pos_max_rm[seq_id] = std::max(seq_pos_max_rm[seq_id], pos);
cells.rm(head_cur + idx); cells.rm(head_cur + i);
} }
cells.pos_set(head_cur + idx, ubatch.pos[idx]); cells.pos_set(head_cur + i, ubatch.pos[i]);
// TODO: fix indexing [UBATCH_IDX] for (int32_t s = 0; s < ubatch.n_seq_id[i]; s++) {
for (int32_t i = 0; i < ubatch.n_seq_id[s]; i++) { cells.seq_add(head_cur + i, ubatch.seq_id[i][s]);
cells.seq_add(head_cur + idx, ubatch.seq_id[s][i]);
}
} }
} }
@ -696,6 +691,7 @@ void llama_kv_cache_unified::apply_ubatch(uint32_t head_cur, const llama_ubatch
seq_rm(s, cells.seq_pos_min(s), seq_pos_max_rm[s] + 1); seq_rm(s, cells.seq_pos_min(s), seq_pos_max_rm[s] + 1);
} }
} }
// move the head at the end of the slot // move the head at the end of the slot
head = head_cur + ubatch.n_tokens; head = head_cur + ubatch.n_tokens;
} }
@ -793,8 +789,6 @@ ggml_tensor * llama_kv_cache_unified::cpy_v(ggml_context * ctx, ggml_tensor * v_
void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const { void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const {
const uint32_t n_tokens = ubatch->n_tokens; const uint32_t n_tokens = ubatch->n_tokens;
const uint32_t n_seq_tokens = ubatch->n_seq_tokens;
const uint32_t n_seqs = ubatch->n_seqs;
GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer)); GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
float * data = (float *) dst->data; float * data = (float *) dst->data;
@ -814,26 +808,23 @@ void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ub
// xxxxx----- // xxxxx-----
// To visualize the mask, see https://github.com/ggml-org/llama.cpp/pull/12615 // To visualize the mask, see https://github.com/ggml-org/llama.cpp/pull/12615
for (uint32_t h = 0; h < 1; ++h) { for (uint32_t h = 0; h < 1; ++h) {
for (uint32_t s = 0; s < n_seqs; ++s) { for (uint32_t i = 0; i < n_tokens; ++i) {
const llama_seq_id seq_id = ubatch->seq_id[s][0]; const llama_seq_id seq_id = ubatch->seq_id[i][0];
for (uint32_t j = 0; j < n_seq_tokens; ++j) { const llama_pos p1 = ubatch->pos[i];
const uint32_t idx = s*n_seq_tokens + j;
const llama_pos p1 = ubatch->pos[idx]; for (uint32_t j = 0; j < n_kv; ++j) {
for (uint32_t i = 0; i < n_kv; ++i) {
float f = 0.0f; float f = 0.0f;
bool masked = false; bool masked = false;
if (cells.is_empty(i)) { if (cells.is_empty(j)) {
masked = true; masked = true;
} else { } else {
const llama_pos p0 = cells.pos_get(i); const llama_pos p0 = cells.pos_get(j);
// mask the token if not the same sequence // mask the token if not the same sequence
masked = masked || (!cells.seq_has(i, seq_id)); masked = masked || (!cells.seq_has(j, seq_id));
// mask future tokens // mask future tokens
masked = masked || (causal_attn && p0 > p1); masked = masked || (causal_attn && p0 > p1);
@ -850,16 +841,15 @@ void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ub
f = -INFINITY; f = -INFINITY;
} }
data[h*(n_kv*n_tokens) + idx*n_kv + i] = f; data[h*(n_kv*n_tokens) + i*n_kv + j] = f;
}
} }
} }
// mask padded tokens // mask padded tokens
if (data) { if (data) {
for (uint32_t j = n_tokens; j < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++j) { for (uint32_t i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
for (uint32_t i = 0; i < n_kv; ++i) { for (uint32_t j = 0; j < n_kv; ++j) {
data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY; data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
} }
} }
} }
@ -887,12 +877,12 @@ void llama_kv_cache_unified::set_input_pos_bucket(ggml_tensor * dst, const llama
const int32_t n_kv = dst->ne[0]; const int32_t n_kv = dst->ne[0];
for (int h = 0; h < 1; ++h) { for (int h = 0; h < 1; ++h) {
for (int j = 0; j < n_tokens; ++j) { for (int i = 0; i < n_tokens; ++i) {
for (int i = 0; i < n_kv; ++i) { for (int j = 0; j < n_kv; ++j) {
// the position when the cells is empty is irrelevant - it will be masked out later in the attention // the position when the cells is empty is irrelevant - it will be masked out later in the attention
const llama_pos p0 = cells.is_empty(i) ? -1 : cells.pos_get(i); const llama_pos p0 = cells.is_empty(j) ? -1 : cells.pos_get(j);
data[h*(n_kv*n_tokens) + j*n_kv + i] = llama_relative_position_bucket(p0, ubatch->pos[j], hparams.n_rel_attn_bkts, false); data[h*(n_kv*n_tokens) + i*n_kv + j] = llama_relative_position_bucket(p0, ubatch->pos[i], hparams.n_rel_attn_bkts, false);
} }
} }
} }
@ -1430,7 +1420,7 @@ void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std::
for (const auto & layer : layers) { for (const auto & layer : layers) {
const uint32_t il = layer.il; const uint32_t il = layer.il;
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
// Write key type // Write key type
const int32_t k_type_i = (int32_t)layer.k->type; const int32_t k_type_i = (int32_t)layer.k->type;
@ -1452,7 +1442,7 @@ void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std::
for (const auto & layer : layers) { for (const auto & layer : layers) {
const uint32_t il = layer.il; const uint32_t il = layer.il;
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
// Write value type // Write value type
const int32_t v_type_i = (int32_t)layer.v->type; const int32_t v_type_i = (int32_t)layer.v->type;
@ -1476,7 +1466,7 @@ void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std::
for (const auto & layer : layers) { for (const auto & layer : layers) {
const uint32_t il = layer.il; const uint32_t il = layer.il;
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
// Write value type // Write value type
const int32_t v_type_i = (int32_t)layer.v->type; const int32_t v_type_i = (int32_t)layer.v->type;
@ -1509,12 +1499,9 @@ bool llama_kv_cache_unified::state_read_meta(llama_io_read_i & io, uint32_t cell
seq_rm(dest_seq_id, -1, -1); seq_rm(dest_seq_id, -1, -1);
llama_sbatch sbatch; llama_batch_allocr balloc(hparams.n_pos_per_embd());
llama_ubatch ubatch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false);
ubatch.n_tokens = cell_count; llama_ubatch ubatch = balloc.ubatch_reserve(cell_count, 1);
ubatch.n_seq_tokens = cell_count;
ubatch.n_seqs = 1;
for (uint32_t i = 0; i < cell_count; ++i) { for (uint32_t i = 0; i < cell_count; ++i) {
llama_pos pos; llama_pos pos;
@ -1621,7 +1608,7 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell
for (const auto & layer : layers) { for (const auto & layer : layers) {
const uint32_t il = layer.il; const uint32_t il = layer.il;
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
// Read type of key // Read type of key
int32_t k_type_i_ref; int32_t k_type_i_ref;
@ -1651,7 +1638,7 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell
for (const auto & layer : layers) { for (const auto & layer : layers) {
const uint32_t il = layer.il; const uint32_t il = layer.il;
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
// Read type of value // Read type of value
int32_t v_type_i_ref; int32_t v_type_i_ref;
@ -1681,7 +1668,7 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell
for (const auto & layer : layers) { for (const auto & layer : layers) {
const uint32_t il = layer.il; const uint32_t il = layer.il;
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
// Read type of value // Read type of value
int32_t v_type_i_ref; int32_t v_type_i_ref;
@ -1746,9 +1733,8 @@ llama_kv_cache_unified_state::llama_kv_cache_unified_state(
llama_kv_cache_unified_state::llama_kv_cache_unified_state( llama_kv_cache_unified_state::llama_kv_cache_unified_state(
llama_kv_cache_unified * kv, llama_kv_cache_unified * kv,
llama_sbatch sbatch,
llama_kv_cache_unified::ubatch_heads heads, llama_kv_cache_unified::ubatch_heads heads,
std::vector<llama_ubatch> ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), sbatch(std::move(sbatch)), heads(std::move(heads)), ubatches(std::move(ubatches)) { std::vector<llama_ubatch> ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), heads(std::move(heads)), ubatches(std::move(ubatches)) {
} }
llama_kv_cache_unified_state::~llama_kv_cache_unified_state() = default; llama_kv_cache_unified_state::~llama_kv_cache_unified_state() = default;
@ -1781,12 +1767,6 @@ bool llama_kv_cache_unified_state::apply() {
return true; return true;
} }
std::vector<int64_t> & llama_kv_cache_unified_state::out_ids() {
assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
return sbatch.out_ids;
}
llama_memory_status llama_kv_cache_unified_state::get_status() const { llama_memory_status llama_kv_cache_unified_state::get_status() const {
return status; return status;
} }

View File

@ -57,7 +57,7 @@ public:
// //
llama_memory_state_ptr init_batch( llama_memory_state_ptr init_batch(
const llama_batch & batch, llama_batch_allocr & balloc,
uint32_t n_ubatch, uint32_t n_ubatch,
bool embd_all) override; bool embd_all) override;
@ -231,7 +231,6 @@ public:
// used to create a decode state from a batch // used to create a decode state from a batch
llama_kv_cache_unified_state( llama_kv_cache_unified_state(
llama_kv_cache_unified * kv, llama_kv_cache_unified * kv,
llama_sbatch sbatch,
ubatch_heads heads, ubatch_heads heads,
std::vector<llama_ubatch> ubatches); std::vector<llama_ubatch> ubatches);
@ -244,8 +243,6 @@ public:
bool next() override; bool next() override;
bool apply() override; bool apply() override;
std::vector<int64_t> & out_ids() override;
llama_memory_status get_status() const override; llama_memory_status get_status() const override;
const llama_ubatch & get_ubatch() const override; const llama_ubatch & get_ubatch() const override;
@ -286,8 +283,6 @@ private:
// batch processing state // batch processing state
// //
llama_sbatch sbatch;
// the index of the next ubatch to process // the index of the next ubatch to process
size_t i_next = 0; size_t i_next = 0;

View File

@ -384,10 +384,10 @@ private:
// //
std::vector<llama_pos> shift; std::vector<llama_pos> shift;
using bits_t = std::bitset<LLAMA_MAX_SEQ>; using seq_set_t = std::bitset<LLAMA_MAX_SEQ>;
// the bitset seq[i] tells us which sequences are currently occupying the i-th cell // the bitset seq[i] tells us which sequences are currently occupying the i-th cell
std::vector<bits_t> seq; std::vector<seq_set_t> seq;
// the set seq_pos[s] tells us which positions are currently present for sequence s // the set seq_pos[s] tells us which positions are currently present for sequence s
// this way seq_pos[s].begin() and seq_pos[s].rbegin() give us the min/max positions currently in the cache // this way seq_pos[s].begin() and seq_pos[s].rbegin() give us the min/max positions currently in the cache

View File

@ -0,0 +1,246 @@
#include "llama-memory-hybrid.h"
#include "llama-impl.h"
#include "llama-model.h"
#include "llama-context.h"
//
// llama_memory_hybrid
//
llama_memory_hybrid::llama_memory_hybrid(
const llama_model & model,
/* attn */
ggml_type type_k,
ggml_type type_v,
bool v_trans,
uint32_t kv_size,
uint32_t n_pad,
uint32_t n_swa,
llama_swa_type swa_type,
/* recurrent */
ggml_type type_r,
ggml_type type_s,
uint32_t rs_size,
/* common */
uint32_t n_seq_max,
bool offload,
/* layer filters */
layer_filter_cb && filter_attn,
layer_filter_cb && filter_recr) :
hparams(model.hparams),
mem_attn(new llama_kv_cache_unified(
model,
filter_attn == nullptr ?
[&](int32_t il) { return !hparams.is_recurrent(il); }
: filter_attn,
type_k,
type_v,
v_trans,
offload,
kv_size,
n_seq_max,
n_pad,
n_swa,
swa_type
)),
mem_recr(new llama_memory_recurrent(
model,
filter_recr == nullptr ?
[&](int32_t il) { return hparams.is_recurrent(il); }
: filter_recr,
type_r,
type_s,
offload,
rs_size,
n_seq_max
)) {}
llama_memory_state_ptr llama_memory_hybrid::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) {
do {
balloc.split_reset();
// follow the recurrent pattern for creating the ubatch splits
std::vector<llama_ubatch> ubatches;
while (true) {
llama_ubatch ubatch;
if (embd_all) {
// if all tokens are output, split by sequence
ubatch = balloc.split_seq(n_ubatch);
} else {
ubatch = balloc.split_equal(n_ubatch);
}
if (ubatch.n_tokens == 0) {
break;
}
ubatches.push_back(std::move(ubatch)); // NOLINT
}
// prepare the recurrent batches first
if (!mem_recr->prepare(ubatches)) {
// TODO: will the recurrent cache be in an undefined state at this point?
LLAMA_LOG_ERROR("%s: failed to prepare recurrent ubatches\n", __func__);
return std::make_unique<llama_memory_hybrid_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
}
// prepare the attention cache
auto heads_attn = mem_attn->prepare(ubatches);
if (heads_attn.empty()) {
LLAMA_LOG_ERROR("%s: failed to prepare attention ubatches\n", __func__);
return std::make_unique<llama_memory_hybrid_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
}
return std::make_unique<llama_memory_hybrid_state>(
this, std::move(heads_attn), std::move(ubatches));
} while(false);
return std::make_unique<llama_memory_hybrid_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
}
llama_memory_state_ptr llama_memory_hybrid::init_full() {
return std::make_unique<llama_memory_hybrid_state>(this);
}
llama_memory_state_ptr llama_memory_hybrid::init_update(llama_context * lctx, bool optimize) {
return std::make_unique<llama_memory_hybrid_state>(this, lctx, optimize);
}
bool llama_memory_hybrid::get_can_shift() const {
// Shifting is trivially supported for recurrent
return mem_attn->get_can_shift();
}
void llama_memory_hybrid::clear(bool data) {
mem_attn->clear(data);
mem_recr->clear(data);
}
bool llama_memory_hybrid::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
// Try removing from the recurrent cache first since it may fail. If it does
// fail, the cache will not have been mutated.
if (!mem_recr->seq_rm(seq_id, p0, p1)) {
return false;
}
return mem_attn->seq_rm(seq_id, p0, p1);
}
void llama_memory_hybrid::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
mem_attn->seq_cp(seq_id_src, seq_id_dst, p0, p1);
mem_recr->seq_cp(seq_id_src, seq_id_dst, p0, p1);
}
void llama_memory_hybrid::seq_keep(llama_seq_id seq_id) {
mem_attn->seq_keep(seq_id);
mem_recr->seq_keep(seq_id);
}
void llama_memory_hybrid::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
mem_attn->seq_add(seq_id, p0, p1, shift);
mem_recr->seq_add(seq_id, p0, p1, shift);
}
void llama_memory_hybrid::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
mem_attn->seq_div(seq_id, p0, p1, d);
mem_recr->seq_div(seq_id, p0, p1, d);
}
llama_pos llama_memory_hybrid::seq_pos_min(llama_seq_id seq_id) const {
// the min of the total cache is the max of the two caches' min values
return std::max(mem_attn->seq_pos_min(seq_id), mem_recr->seq_pos_min(seq_id));
}
llama_pos llama_memory_hybrid::seq_pos_max(llama_seq_id seq_id) const {
// the max of the total cache is the min of the two caches' max values
return std::min(mem_attn->seq_pos_max(seq_id), mem_recr->seq_pos_max(seq_id));
}
void llama_memory_hybrid::state_write(llama_io_write_i & io, llama_seq_id seq_id) const {
mem_attn->state_write(io, seq_id);
mem_recr->state_write(io, seq_id);
}
void llama_memory_hybrid::state_read(llama_io_read_i & io, llama_seq_id seq_id) {
mem_attn->state_read(io, seq_id);
mem_recr->state_read(io, seq_id);
}
llama_kv_cache_unified * llama_memory_hybrid::get_mem_attn() const {
return mem_attn.get();
}
llama_memory_recurrent * llama_memory_hybrid::get_mem_recr() const {
return mem_recr.get();
}
llama_memory_hybrid_state::llama_memory_hybrid_state(llama_memory_status status) : status(status) {}
llama_memory_hybrid_state::llama_memory_hybrid_state(llama_memory_hybrid * mem) :
state_attn(mem->get_mem_attn()->init_full()),
state_recr(mem->get_mem_recr()->init_full()),
status(llama_memory_status_combine(state_attn->get_status(), state_recr->get_status())) {
}
llama_memory_hybrid_state::llama_memory_hybrid_state(
llama_memory_hybrid * mem,
llama_context * lctx,
bool optimize) :
state_attn(mem->get_mem_attn()->init_update(lctx, optimize)),
state_recr(mem->get_mem_recr()->init_update(lctx, optimize)),
status(llama_memory_status_combine(state_attn->get_status(), state_recr->get_status())) {
}
llama_memory_hybrid_state::llama_memory_hybrid_state(
llama_memory_hybrid * mem,
std::vector<uint32_t> heads_attn,
std::vector<llama_ubatch> ubatches) :
ubatches(std::move(ubatches)),
// note: here we copy the ubatches. not sure if this is ideal
state_attn(new llama_kv_cache_unified_state(mem->get_mem_attn(), std::move(heads_attn), this->ubatches)),
state_recr(new llama_memory_recurrent_state(mem->get_mem_recr(), this->ubatches)),
status(llama_memory_status_combine(state_attn->get_status(), state_recr->get_status())) {
}
bool llama_memory_hybrid_state::next() {
assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
state_attn->next();
state_recr->next();
if (++i_next >= ubatches.size()) {
return false;
}
return true;
}
bool llama_memory_hybrid_state::apply() {
assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
bool res = true;
res = res & state_attn->apply();
res = res & state_recr->apply();
return res;
}
llama_memory_status llama_memory_hybrid_state::get_status() const {
return status;
}
const llama_ubatch & llama_memory_hybrid_state::get_ubatch() const {
assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
return ubatches[i_next];
}
const llama_kv_cache_unified_state * llama_memory_hybrid_state::get_state_attn() const {
return static_cast<const llama_kv_cache_unified_state *>(state_attn.get());
}
const llama_memory_recurrent_state * llama_memory_hybrid_state::get_state_recr() const {
return static_cast<const llama_memory_recurrent_state *>(state_recr.get());
}

View File

@ -0,0 +1,138 @@
#pragma once
#include "llama-batch.h"
#include "llama-graph.h"
#include "llama-kv-cache-unified.h"
#include "llama-memory.h"
#include "llama-memory-recurrent.h"
#include <memory>
#include <vector>
//
// llama_memory_hybrid
//
// utilizes instances of llama_memory_recurrent and llama_kv_cache_unified to
// support models where each layer may be either attention-based or recurrent
class llama_memory_hybrid : public llama_memory_i {
public:
// this callback is used to filter out layers that should not be included in the cache
using layer_filter_cb = std::function<bool(int32_t il)>;
llama_memory_hybrid(
const llama_model & model,
/* attn */
ggml_type type_k,
ggml_type type_v,
bool v_trans,
uint32_t kv_size,
uint32_t n_pad,
uint32_t n_swa,
llama_swa_type swa_type,
/* recurrent */
ggml_type type_r,
ggml_type type_s,
uint32_t rs_size,
/* common */
uint32_t n_seq_max,
bool offload,
/* layer filters */
layer_filter_cb && filter_attn = nullptr,
layer_filter_cb && filter_recr = nullptr);
~llama_memory_hybrid() = default;
//
// llama_memory_i
//
llama_memory_state_ptr init_batch(
llama_batch_allocr & balloc,
uint32_t n_ubatch,
bool embd_all) override;
llama_memory_state_ptr init_full() override;
llama_memory_state_ptr init_update(llama_context * lctx, bool optimize) override;
bool get_can_shift() const override;
void clear(bool data) override;
bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
void seq_keep(llama_seq_id seq_id) override;
void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override;
void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
llama_pos seq_pos_min(llama_seq_id seq_id) const override;
llama_pos seq_pos_max(llama_seq_id seq_id) const override;
// state write/load
void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override;
void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override;
//
// llama_memory_hybrid specific API
//
llama_kv_cache_unified * get_mem_attn() const;
llama_memory_recurrent * get_mem_recr() const;
private:
const llama_hparams & hparams;
const std::unique_ptr<llama_kv_cache_unified> mem_attn;
const std::unique_ptr<llama_memory_recurrent> mem_recr;
};
class llama_memory_hybrid_state : public llama_memory_state_i {
public:
// init failure
explicit llama_memory_hybrid_state(llama_memory_status status);
// init full
explicit llama_memory_hybrid_state(llama_memory_hybrid * mem);
// init update
explicit llama_memory_hybrid_state(
llama_memory_hybrid * mem,
llama_context * lctx,
bool optimize);
// init success
llama_memory_hybrid_state(
llama_memory_hybrid * mem,
std::vector<uint32_t> heads_attn,
std::vector<llama_ubatch> ubatches);
~llama_memory_hybrid_state() = default;
bool next() override;
bool apply() override;
llama_memory_status get_status() const override;
const llama_ubatch & get_ubatch() const override;
//
// llama_memory_hybrid_state
//
const llama_kv_cache_unified_state * get_state_attn() const;
const llama_memory_recurrent_state * get_state_recr() const;
private:
// the index of the next ubatch to process
size_t i_next = 0;
std::vector<llama_ubatch> ubatches;
const llama_memory_state_ptr state_attn;
const llama_memory_state_ptr state_recr;
const llama_memory_status status;
};

View File

@ -1,4 +1,4 @@
#include "llama-kv-cache-recurrent.h" #include "llama-memory-recurrent.h"
#include "llama-impl.h" #include "llama-impl.h"
#include "llama-io.h" #include "llama-io.h"
@ -12,27 +12,28 @@
#include <stdexcept> #include <stdexcept>
// //
// llama_kv_cache_recurrent // llama_memory_recurrent
// //
llama_kv_cache_recurrent::llama_kv_cache_recurrent( llama_memory_recurrent::llama_memory_recurrent(
const llama_model & model, const llama_model & model,
ggml_type type_k, layer_filter_cb && filter,
ggml_type type_v, ggml_type type_r,
ggml_type type_s,
bool offload, bool offload,
uint32_t kv_size, uint32_t mem_size,
uint32_t n_seq_max) : hparams(model.hparams), n_seq_max(n_seq_max) { uint32_t n_seq_max) : hparams(model.hparams), n_seq_max(n_seq_max) {
const int32_t n_layer = hparams.n_layer; const int32_t n_layer = hparams.n_layer;
LLAMA_LOG_INFO("%s: kv_size = %u, n_seq_max = %u, type_k = '%s', type_v = '%s', n_layer = %d\n", LLAMA_LOG_INFO("%s: mem_size = %u, n_seq_max = %u, type_r = '%s', type_s = '%s', n_layer = %d\n",
__func__, kv_size, n_seq_max, ggml_type_name(type_k), ggml_type_name(type_v), n_layer); __func__, mem_size, n_seq_max, ggml_type_name(type_r), ggml_type_name(type_s), n_layer);
head = 0; head = 0;
size = kv_size; size = mem_size;
used = 0; used = 0;
cells.clear(); cells.clear();
cells.resize(kv_size); cells.resize(mem_size);
// create a context for each buffer type // create a context for each buffer type
std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map; std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
@ -59,12 +60,14 @@ llama_kv_cache_recurrent::llama_kv_cache_recurrent(
return it->second; return it->second;
}; };
k_l.reserve(n_layer); r_l.resize(n_layer);
v_l.reserve(n_layer); s_l.resize(n_layer);
for (int i = 0; i < n_layer; i++) { for (int i = 0; i < n_layer; i++) {
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s(); if (filter && !filter(i)) {
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s(); LLAMA_LOG_DEBUG("%s: layer %3d: skipped\n", __func__, i);
continue;
}
const char * dev_name = "CPU"; const char * dev_name = "CPU";
@ -84,12 +87,12 @@ llama_kv_cache_recurrent::llama_kv_cache_recurrent(
throw std::runtime_error("failed to create ggml context for kv cache"); throw std::runtime_error("failed to create ggml context for kv cache");
} }
ggml_tensor * k = ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size); ggml_tensor * r = ggml_new_tensor_1d(ctx, type_r, hparams.n_embd_r()*mem_size);
ggml_tensor * v = ggml_new_tensor_1d(ctx, type_v, n_embd_v_gqa*kv_size); ggml_tensor * s = ggml_new_tensor_1d(ctx, type_s, hparams.n_embd_s()*mem_size);
ggml_format_name(k, "cache_k_l%d", i); ggml_format_name(r, "cache_r_l%d", i);
ggml_format_name(v, "cache_v_l%d", i); ggml_format_name(s, "cache_s_l%d", i);
k_l.push_back(k); r_l[i] = r;
v_l.push_back(v); s_l[i] = s;
} }
// allocate tensors and initialize the buffers to avoid NaNs in the padding // allocate tensors and initialize the buffers to avoid NaNs in the padding
@ -107,17 +110,17 @@ llama_kv_cache_recurrent::llama_kv_cache_recurrent(
} }
{ {
const size_t memory_size_k = size_k_bytes(); const size_t memory_size_r = size_r_bytes();
const size_t memory_size_v = size_v_bytes(); const size_t memory_size_s = size_s_bytes();
LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__, LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, R (%s): %7.2f MiB, S (%s): %7.2f MiB\n", __func__,
(float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f), (float)(memory_size_r + memory_size_s) / (1024.0f * 1024.0f),
ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f), ggml_type_name(type_r), (float)memory_size_r / (1024.0f * 1024.0f),
ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f)); ggml_type_name(type_s), (float)memory_size_s / (1024.0f * 1024.0f));
} }
} }
void llama_kv_cache_recurrent::clear(bool data) { void llama_memory_recurrent::clear(bool data) {
for (int32_t i = 0; i < (int32_t) size; ++i) { for (int32_t i = 0; i < (int32_t) size; ++i) {
cells[i].pos = -1; cells[i].pos = -1;
cells[i].seq_id.clear(); cells[i].seq_id.clear();
@ -135,7 +138,7 @@ void llama_kv_cache_recurrent::clear(bool data) {
} }
} }
bool llama_kv_cache_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) { bool llama_memory_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
uint32_t new_head = size; uint32_t new_head = size;
if (p0 < 0) { if (p0 < 0) {
@ -154,7 +157,7 @@ bool llama_kv_cache_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_p
if (0 <= seq_id) { if (0 <= seq_id) {
int32_t & tail_id = cells[seq_id].tail; int32_t & tail_id = cells[seq_id].tail;
if (tail_id >= 0) { if (tail_id >= 0) {
const kv_cell & cell = cells[tail_id]; const auto & cell = cells[tail_id];
// partial intersection is invalid // partial intersection is invalid
if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) { if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) {
return false; return false;
@ -202,7 +205,7 @@ bool llama_kv_cache_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_p
return true; return true;
} }
void llama_kv_cache_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) { void llama_memory_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
if (seq_id_src == seq_id_dst) { if (seq_id_src == seq_id_dst) {
return; return;
} }
@ -216,11 +219,11 @@ void llama_kv_cache_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_
} }
if ((uint32_t) seq_id_dst < size && (uint32_t) seq_id_src < size) { if ((uint32_t) seq_id_dst < size && (uint32_t) seq_id_src < size) {
kv_cell & tail_src = cells[seq_id_src]; auto & tail_src = cells[seq_id_src];
kv_cell & tail_dst = cells[seq_id_dst]; auto & tail_dst = cells[seq_id_dst];
if (tail_dst.tail >= 0) { if (tail_dst.tail >= 0) {
// clear destination seq_id if it wasn't empty // clear destination seq_id if it wasn't empty
kv_cell & cell_dst = cells[tail_dst.tail]; auto & cell_dst = cells[tail_dst.tail];
cell_dst.seq_id.erase(seq_id_dst); cell_dst.seq_id.erase(seq_id_dst);
tail_dst.tail = -1; tail_dst.tail = -1;
@ -231,7 +234,7 @@ void llama_kv_cache_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_
} }
} }
if (tail_src.tail >= 0) { if (tail_src.tail >= 0) {
kv_cell & cell_src = cells[tail_src.tail]; auto & cell_src = cells[tail_src.tail];
cell_src.seq_id.insert(seq_id_dst); cell_src.seq_id.insert(seq_id_dst);
tail_dst.tail = tail_src.tail; tail_dst.tail = tail_src.tail;
@ -239,7 +242,7 @@ void llama_kv_cache_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_
} }
} }
void llama_kv_cache_recurrent::seq_keep(llama_seq_id seq_id) { void llama_memory_recurrent::seq_keep(llama_seq_id seq_id) {
uint32_t new_head = size; uint32_t new_head = size;
for (uint32_t i = 0; i < size; ++i) { for (uint32_t i = 0; i < size; ++i) {
@ -271,7 +274,7 @@ void llama_kv_cache_recurrent::seq_keep(llama_seq_id seq_id) {
} }
} }
void llama_kv_cache_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) { void llama_memory_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
if (shift == 0) { if (shift == 0) {
return; return;
} }
@ -293,7 +296,7 @@ void llama_kv_cache_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_
if (0 <= seq_id && seq_id < (int64_t) size) { if (0 <= seq_id && seq_id < (int64_t) size) {
const int32_t tail_id = cells[seq_id].tail; const int32_t tail_id = cells[seq_id].tail;
if (tail_id >= 0) { if (tail_id >= 0) {
kv_cell & cell = cells[tail_id]; auto & cell = cells[tail_id];
if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
cell.pos += shift; cell.pos += shift;
} }
@ -301,7 +304,7 @@ void llama_kv_cache_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_
} }
} }
void llama_kv_cache_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) { void llama_memory_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
if (d == 1) { if (d == 1) {
return; return;
} }
@ -323,7 +326,7 @@ void llama_kv_cache_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_
if (0 <= seq_id && seq_id < (int64_t) size) { if (0 <= seq_id && seq_id < (int64_t) size) {
const int32_t tail_id = cells[seq_id].tail; const int32_t tail_id = cells[seq_id].tail;
if (tail_id >= 0) { if (tail_id >= 0) {
kv_cell & cell = cells[tail_id]; auto & cell = cells[tail_id];
if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
cell.pos /= d; cell.pos /= d;
} }
@ -331,7 +334,7 @@ void llama_kv_cache_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_
} }
} }
llama_pos llama_kv_cache_recurrent::seq_pos_min(llama_seq_id seq_id) const { llama_pos llama_memory_recurrent::seq_pos_min(llama_seq_id seq_id) const {
llama_pos result = std::numeric_limits<llama_pos>::max(); llama_pos result = std::numeric_limits<llama_pos>::max();
for (uint32_t i = 0; i < size; ++i) { for (uint32_t i = 0; i < size; ++i) {
@ -347,7 +350,7 @@ llama_pos llama_kv_cache_recurrent::seq_pos_min(llama_seq_id seq_id) const {
return result; return result;
} }
llama_pos llama_kv_cache_recurrent::seq_pos_max(llama_seq_id seq_id) const { llama_pos llama_memory_recurrent::seq_pos_max(llama_seq_id seq_id) const {
llama_pos result = -1; llama_pos result = -1;
for (uint32_t i = 0; i < size; ++i) { for (uint32_t i = 0; i < size; ++i) {
@ -359,43 +362,45 @@ llama_pos llama_kv_cache_recurrent::seq_pos_max(llama_seq_id seq_id) const {
return result; return result;
} }
llama_memory_state_ptr llama_kv_cache_recurrent::init_batch(const llama_batch & batch, uint32_t n_ubatch, bool embd_all) { llama_memory_state_ptr llama_memory_recurrent::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) {
auto sbatch = llama_sbatch(batch, hparams.n_embd, false);
std::vector<llama_ubatch> ubatches; std::vector<llama_ubatch> ubatches;
while (sbatch.n_tokens > 0) { while (true) {
llama_ubatch ubatch; llama_ubatch ubatch;
if (embd_all) { if (embd_all) {
// if all tokens are output, split by sequence // if all tokens are output, split by sequence
ubatch = sbatch.split_seq(n_ubatch); ubatch = balloc.split_seq(n_ubatch);
} else { } else {
ubatch = sbatch.split_equal(n_ubatch); ubatch = balloc.split_equal(n_ubatch);
} }
ubatches.push_back(ubatch); if (ubatch.n_tokens == 0) {
break;
}
ubatches.push_back(std::move(ubatch)); // NOLINT
} }
if (!prepare(ubatches)) { if (!prepare(ubatches)) {
return std::make_unique<llama_kv_cache_recurrent_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE); return std::make_unique<llama_memory_recurrent_state>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
} }
return std::make_unique<llama_kv_cache_recurrent_state>(LLAMA_MEMORY_STATUS_SUCCESS, this, std::move(sbatch), std::move(ubatches)); return std::make_unique<llama_memory_recurrent_state>(this, std::move(ubatches));
} }
llama_memory_state_ptr llama_kv_cache_recurrent::init_full() { llama_memory_state_ptr llama_memory_recurrent::init_full() {
return std::make_unique<llama_kv_cache_recurrent_state>(LLAMA_MEMORY_STATUS_SUCCESS, this); return std::make_unique<llama_memory_recurrent_state>(this);
} }
llama_memory_state_ptr llama_kv_cache_recurrent::init_update(llama_context * lctx, bool optimize) { llama_memory_state_ptr llama_memory_recurrent::init_update(llama_context * lctx, bool optimize) {
GGML_UNUSED(lctx); GGML_UNUSED(lctx);
GGML_UNUSED(optimize); GGML_UNUSED(optimize);
return std::make_unique<llama_kv_cache_recurrent_state>(LLAMA_MEMORY_STATUS_NO_UPDATE); return std::make_unique<llama_memory_recurrent_state>(LLAMA_MEMORY_STATUS_NO_UPDATE);
} }
bool llama_kv_cache_recurrent::prepare(const std::vector<llama_ubatch> & ubatches) { bool llama_memory_recurrent::prepare(const std::vector<llama_ubatch> & ubatches) {
// simply remember the full state because it is very small for this type of cache // simply remember the full state because it is very small for this type of cache
// TODO: optimize // TODO: optimize
auto org_cells = cells; auto org_cells = cells;
@ -419,10 +424,9 @@ bool llama_kv_cache_recurrent::prepare(const std::vector<llama_ubatch> & ubatche
return success; return success;
} }
bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) { bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) {
const uint32_t n_seqs = ubatch.n_seqs;
const uint32_t n_seq_tokens = ubatch.n_seq_tokens; const uint32_t n_seq_tokens = ubatch.n_seq_tokens;
const uint32_t n_seqs = ubatch.n_seqs;
// if we have enough unused cells before the current head -> // if we have enough unused cells before the current head ->
// better to start searching from the beginning of the cache, hoping to fill it // better to start searching from the beginning of the cache, hoping to fill it
@ -442,9 +446,11 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) {
// everything should fit if all seq_ids are smaller than the max // everything should fit if all seq_ids are smaller than the max
for (uint32_t s = 0; s < n_seqs; ++s) { for (uint32_t s = 0; s < n_seqs; ++s) {
const uint32_t n_seq_id = ubatch.n_seq_id[s]; const uint32_t i = s*n_seq_tokens; // first token of sequence set s
const uint32_t n_seq_id = ubatch.n_seq_id[i];
for (uint32_t j = 0; j < n_seq_id; ++j) { for (uint32_t j = 0; j < n_seq_id; ++j) {
const llama_seq_id seq_id = ubatch.seq_id[s][j]; const llama_seq_id seq_id = ubatch.seq_id[i][j];
if (seq_id < 0 || (uint32_t) seq_id >= size) { if (seq_id < 0 || (uint32_t) seq_id >= size) {
// too big seq_id // too big seq_id
@ -453,9 +459,9 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) {
return false; return false;
} }
if (j > 0) { if (j > 0) {
kv_cell & seq = cells[seq_id]; auto & seq = cells[seq_id];
if (seq.tail >= 0) { if (seq.tail >= 0) {
kv_cell & cell = cells[seq.tail]; auto & cell = cells[seq.tail];
// clear cells from seq_ids that become shared // clear cells from seq_ids that become shared
// (should not normally happen, but let's handle it anyway) // (should not normally happen, but let's handle it anyway)
cell.seq_id.erase(seq_id); cell.seq_id.erase(seq_id);
@ -475,7 +481,7 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) {
std::vector<int32_t> tails_verif; std::vector<int32_t> tails_verif;
tails_verif.assign(size, -1); tails_verif.assign(size, -1);
for (uint32_t i = 0; i < size; ++i) { for (uint32_t i = 0; i < size; ++i) {
kv_cell & cell = cells[i]; auto & cell = cells[i];
for (llama_seq_id seq_id : cell.seq_id) { for (llama_seq_id seq_id : cell.seq_id) {
if (tails_verif[seq_id] != -1) { if (tails_verif[seq_id] != -1) {
LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]); LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]);
@ -496,28 +502,29 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) {
for (uint32_t i = 0; i < size; ++i) { for (uint32_t i = 0; i < size; ++i) {
if (next_empty_cell >= size) { next_empty_cell -= size; } if (next_empty_cell >= size) { next_empty_cell -= size; }
kv_cell & cell = cells[next_empty_cell]; auto & cell = cells[next_empty_cell];
if (cell.is_empty()) { break; } if (cell.is_empty()) { break; }
next_empty_cell += 1; next_empty_cell += 1;
} }
// find usable cell range // find usable cell range
for (uint32_t s = 0; s < n_seqs; ++s) { for (uint32_t s = 0; s < n_seqs; ++s) {
const llama_seq_id seq_id = ubatch.seq_id[s][0]; const uint32_t i = s*n_seq_tokens;
kv_cell & seq_meta = cells[seq_id]; const llama_seq_id seq_id = ubatch.seq_id[i][0];
auto & seq_meta = cells[seq_id];
bool has_cell = false; bool has_cell = false;
if (seq_meta.tail >= 0) { if (seq_meta.tail >= 0) {
kv_cell & cell = cells[seq_meta.tail]; auto & cell = cells[seq_meta.tail];
GGML_ASSERT(cell.has_seq_id(seq_id)); GGML_ASSERT(cell.has_seq_id(seq_id));
// does this seq_id "own" the cell? // does this seq_id "own" the cell?
if (cell.seq_id.size() == 1) { has_cell = true; } if (cell.seq_id.size() == 1) { has_cell = true; }
} }
if (!has_cell) { if (!has_cell) {
kv_cell & empty_cell = cells[next_empty_cell]; auto & empty_cell = cells[next_empty_cell];
GGML_ASSERT(empty_cell.is_empty()); GGML_ASSERT(empty_cell.is_empty());
// copy old tail into the empty cell // copy old tail into the empty cell
if (seq_meta.tail >= 0) { if (seq_meta.tail >= 0) {
kv_cell & orig_cell = cells[seq_meta.tail]; auto & orig_cell = cells[seq_meta.tail];
empty_cell.pos = orig_cell.pos; empty_cell.pos = orig_cell.pos;
empty_cell.src = orig_cell.src; empty_cell.src = orig_cell.src;
orig_cell.seq_id.erase(seq_id); orig_cell.seq_id.erase(seq_id);
@ -527,10 +534,10 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) {
seq_meta.tail = next_empty_cell; seq_meta.tail = next_empty_cell;
// find next empty cell // find next empty cell
if (s + 1 < n_seqs) { if (s + 1 < n_seqs) {
for (uint32_t i = 0; i < size; ++i) { for (uint32_t j = 0; j < size; ++j) {
next_empty_cell += 1; next_empty_cell += 1;
if (next_empty_cell >= size) { next_empty_cell -= size; } if (next_empty_cell >= size) { next_empty_cell -= size; }
kv_cell & cell = cells[next_empty_cell]; auto & cell = cells[next_empty_cell];
if (cell.is_empty()) { break; } if (cell.is_empty()) { break; }
} }
} }
@ -541,19 +548,20 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) {
// gather and re-order // gather and re-order
for (uint32_t s = 0; s < n_seqs; ++s) { for (uint32_t s = 0; s < n_seqs; ++s) {
const uint32_t i = s*n_seq_tokens;
const int32_t dst_id = s + min; const int32_t dst_id = s + min;
const int32_t src_id = cells[ubatch.seq_id[s][0]].tail; const int32_t src_id = cells[ubatch.seq_id[i][0]].tail;
if (dst_id != src_id) { if (dst_id != src_id) {
kv_cell & dst_cell = cells[dst_id]; auto & dst_cell = cells[dst_id];
kv_cell & src_cell = cells[src_id]; auto & src_cell = cells[src_id];
std::swap(dst_cell.pos, src_cell.pos); std::swap(dst_cell.pos, src_cell.pos);
std::swap(dst_cell.src, src_cell.src); std::swap(dst_cell.src, src_cell.src);
std::swap(dst_cell.seq_id, src_cell.seq_id); std::swap(dst_cell.seq_id, src_cell.seq_id);
// swap tails // swap tails
for (uint32_t i = 0; i < size; ++i) { for (uint32_t j = 0; j < size; ++j) {
int32_t & tail = cells[i].tail; int32_t & tail = cells[j].tail;
if (tail == src_id) { if (tail == src_id) {
tail = dst_id; tail = dst_id;
} else if (tail == dst_id) { } else if (tail == dst_id) {
@ -565,20 +573,21 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) {
// update the pos of the used seqs // update the pos of the used seqs
for (uint32_t s = 0; s < n_seqs; ++s) { for (uint32_t s = 0; s < n_seqs; ++s) {
const llama_pos last_pos = ubatch.pos[n_seq_tokens * s + n_seq_tokens - 1]; const uint32_t i = s*n_seq_tokens;
const llama_pos last_pos = ubatch.pos[i + n_seq_tokens - 1];
const int32_t cell_id = s + min; const int32_t cell_id = s + min;
kv_cell & cell = cells[cell_id]; auto & cell = cells[cell_id];
if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) { if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) {
// What should happen when the pos backtracks or skips a value? // What should happen when the pos backtracks or skips a value?
// Clearing the state mid-batch would require special-casing which isn't done. // Clearing the state mid-batch would require special-casing which isn't done.
LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n", LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n",
__func__, last_pos, cell.pos, ubatch.seq_id[s][0], n_seq_tokens); __func__, last_pos, cell.pos, ubatch.seq_id[i][0], n_seq_tokens);
} }
cell.pos = last_pos; cell.pos = last_pos;
cell.seq_id.clear(); cell.seq_id.clear();
for (int32_t j = 0; j < ubatch.n_seq_id[s]; ++j) { for (int32_t j = 0; j < ubatch.n_seq_id[i]; ++j) {
const llama_seq_id seq_id = ubatch.seq_id[s][j]; const llama_seq_id seq_id = ubatch.seq_id[i][j];
cell.seq_id.insert(seq_id); cell.seq_id.insert(seq_id);
cells[seq_id].tail = cell_id; cells[seq_id].tail = cell_id;
} }
@ -620,18 +629,18 @@ bool llama_kv_cache_recurrent::find_slot(const llama_ubatch & ubatch) {
head = min; head = min;
n = max - min + 1; n = max - min + 1;
used = std::count_if(cells.begin(), cells.end(), used = std::count_if(cells.begin(), cells.end(),
[](const kv_cell & cell){ return !cell.is_empty(); }); [](const mem_cell & cell){ return !cell.is_empty(); });
// sanity check // sanity check
return n >= n_seqs; return n >= n_seqs;
} }
bool llama_kv_cache_recurrent::get_can_shift() const { bool llama_memory_recurrent::get_can_shift() const {
// shifting the pos is trivial for recurrent models // shifting the pos is trivial for recurrent models
return true; return true;
} }
size_t llama_kv_cache_recurrent::total_size() const { size_t llama_memory_recurrent::total_size() const {
size_t size = 0; size_t size = 0;
for (const auto & buf : bufs) { for (const auto & buf : bufs) {
size += ggml_backend_buffer_get_size(buf.get()); size += ggml_backend_buffer_get_size(buf.get());
@ -640,27 +649,31 @@ size_t llama_kv_cache_recurrent::total_size() const {
return size; return size;
} }
size_t llama_kv_cache_recurrent::size_k_bytes() const { size_t llama_memory_recurrent::size_r_bytes() const {
size_t size_k_bytes = 0; size_t size_r_bytes = 0;
for (const auto & k : k_l) { for (const auto & r : r_l) {
size_k_bytes += ggml_nbytes(k); if (r != nullptr) {
size_r_bytes += ggml_nbytes(r);
}
} }
return size_k_bytes; return size_r_bytes;
} }
size_t llama_kv_cache_recurrent::size_v_bytes() const { size_t llama_memory_recurrent::size_s_bytes() const {
size_t size_v_bytes = 0; size_t size_s_bytes = 0;
for (const auto & v : v_l) { for (const auto & s : s_l) {
size_v_bytes += ggml_nbytes(v); if (s != nullptr) {
size_s_bytes += ggml_nbytes(s);
}
} }
return size_v_bytes; return size_s_bytes;
} }
void llama_kv_cache_recurrent::state_write(llama_io_write_i & io, llama_seq_id seq_id) const { void llama_memory_recurrent::state_write(llama_io_write_i & io, llama_seq_id seq_id) const {
std::vector<std::pair<uint32_t, uint32_t>> cell_ranges; // ranges, from inclusive, to exclusive std::vector<std::pair<uint32_t, uint32_t>> cell_ranges; // ranges, from inclusive, to exclusive
uint32_t cell_count = 0; uint32_t cell_count = 0;
@ -698,7 +711,7 @@ void llama_kv_cache_recurrent::state_write(llama_io_write_i & io, llama_seq_id s
state_write_data(io, cell_ranges); state_write_data(io, cell_ranges);
} }
void llama_kv_cache_recurrent::state_read(llama_io_read_i & io, llama_seq_id seq_id) { void llama_memory_recurrent::state_read(llama_io_read_i & io, llama_seq_id seq_id) {
uint32_t cell_count; uint32_t cell_count;
io.read_to(&cell_count, sizeof(cell_count)); io.read_to(&cell_count, sizeof(cell_count));
@ -717,7 +730,7 @@ void llama_kv_cache_recurrent::state_read(llama_io_read_i & io, llama_seq_id seq
} }
} }
void llama_kv_cache_recurrent::state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id) const { void llama_memory_recurrent::state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id) const {
for (const auto & range : cell_ranges) { for (const auto & range : cell_ranges) {
for (uint32_t i = range.first; i < range.second; ++i) { for (uint32_t i = range.first; i < range.second; ++i) {
const auto & cell = cells[i]; const auto & cell = cells[i];
@ -736,11 +749,11 @@ void llama_kv_cache_recurrent::state_write_meta(llama_io_write_i & io, const std
} }
} }
void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const { void llama_memory_recurrent::state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const {
const uint32_t v_trans = 0; const uint32_t s_trans = 0;
const uint32_t n_layer = hparams.n_layer; const uint32_t n_layer = hparams.n_layer;
io.write(&v_trans, sizeof(v_trans)); io.write(&s_trans, sizeof(s_trans));
io.write(&n_layer, sizeof(n_layer)); io.write(&n_layer, sizeof(n_layer));
std::vector<uint8_t> tmp_buf; std::vector<uint8_t> tmp_buf;
@ -748,86 +761,81 @@ void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std
// Iterate and write all the keys first, each row is a cell // Iterate and write all the keys first, each row is a cell
// Get whole range at a time // Get whole range at a time
for (uint32_t il = 0; il < n_layer; ++il) { for (uint32_t il = 0; il < n_layer; ++il) {
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
// Write key type // Write key type
const int32_t k_type_i = (int32_t)k_l[il]->type; const int32_t r_type_i = (int32_t)r_l[il]->type;
io.write(&k_type_i, sizeof(k_type_i)); io.write(&r_type_i, sizeof(r_type_i));
// Write row size of key // Write row size of key
const uint64_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa); const uint64_t r_size_row = ggml_row_size(r_l[il]->type, hparams.n_embd_r());
io.write(&k_size_row, sizeof(k_size_row)); io.write(&r_size_row, sizeof(r_size_row));
// Read each range of cells of k_size length each into tmp_buf and write out // Read each range of cells of k_size length each into tmp_buf and write out
for (const auto & range : cell_ranges) { for (const auto & range : cell_ranges) {
const size_t range_size = range.second - range.first; const size_t range_size = range.second - range.first;
const size_t buf_size = range_size * k_size_row; const size_t buf_size = range_size * r_size_row;
io.write_tensor(k_l[il], range.first * k_size_row, buf_size); io.write_tensor(r_l[il], range.first * r_size_row, buf_size);
} }
} }
if (!v_trans) { if (!s_trans) {
for (uint32_t il = 0; il < n_layer; ++il) { for (uint32_t il = 0; il < n_layer; ++il) {
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
// Write value type // Write value type
const int32_t v_type_i = (int32_t)v_l[il]->type; const int32_t s_type_i = (int32_t)s_l[il]->type;
io.write(&v_type_i, sizeof(v_type_i)); io.write(&s_type_i, sizeof(s_type_i));
// Write row size of value // Write row size of value
const uint64_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa); const uint64_t s_size_row = ggml_row_size(s_l[il]->type, hparams.n_embd_s());
io.write(&v_size_row, sizeof(v_size_row)); io.write(&s_size_row, sizeof(s_size_row));
// Read each range of cells of v_size length each into tmp_buf and write out // Read each range of cells of s_size length each into tmp_buf and write out
for (const auto & range : cell_ranges) { for (const auto & range : cell_ranges) {
const size_t range_size = range.second - range.first; const size_t range_size = range.second - range.first;
const size_t buf_size = range_size * v_size_row; const size_t buf_size = range_size * s_size_row;
io.write_tensor(v_l[il], range.first * v_size_row, buf_size); io.write_tensor(s_l[il], range.first * s_size_row, buf_size);
} }
} }
} else { } else {
// When v is transposed, we also need the element size and get the element ranges from each row // When v is transposed, we also need the element size and get the element ranges from each row
const uint32_t kv_size = size; const uint32_t mem_size = size;
for (uint32_t il = 0; il < n_layer; ++il) { for (uint32_t il = 0; il < n_layer; ++il) {
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); const uint32_t n_embd_s = hparams.n_embd_s();
// Write value type // Write value type
const int32_t v_type_i = (int32_t)v_l[il]->type; const int32_t s_type_i = (int32_t)s_l[il]->type;
io.write(&v_type_i, sizeof(v_type_i)); io.write(&s_type_i, sizeof(s_type_i));
// Write element size // Write element size
const uint32_t v_size_el = ggml_type_size(v_l[il]->type); const uint32_t s_size_el = ggml_type_size(s_l[il]->type);
io.write(&v_size_el, sizeof(v_size_el)); io.write(&s_size_el, sizeof(s_size_el));
// Write GQA embedding size // Write GQA embedding size
io.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa)); io.write(&n_embd_s, sizeof(n_embd_s));
// For each row, we get the element values of each cell // For each row, we get the element values of each cell
for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { for (uint32_t j = 0; j < n_embd_s; ++j) {
// Read each range of cells of v_size_el length each into tmp_buf and write out // Read each range of cells of v_size_el length each into tmp_buf and write out
for (const auto & range : cell_ranges) { for (const auto & range : cell_ranges) {
const size_t range_size = range.second - range.first; const size_t range_size = range.second - range.first;
const size_t src_offset = (range.first + j * kv_size) * v_size_el; const size_t src_offset = (range.first + j * mem_size) * s_size_el;
const size_t buf_size = range_size * v_size_el; const size_t buf_size = range_size * s_size_el;
io.write_tensor(v_l[il], src_offset, buf_size); io.write_tensor(s_l[il], src_offset, buf_size);
} }
} }
} }
} }
} }
bool llama_kv_cache_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) { bool llama_memory_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) {
if (dest_seq_id != -1) { if (dest_seq_id != -1) {
// single sequence // single sequence
seq_rm(dest_seq_id, -1, -1); seq_rm(dest_seq_id, -1, -1);
llama_sbatch sbatch; llama_batch_allocr balloc(hparams.n_pos_per_embd());
llama_ubatch batch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false);
batch.n_tokens = cell_count; llama_ubatch ubatch = balloc.ubatch_reserve(cell_count, 1);
batch.n_seq_tokens = cell_count;
batch.n_seqs = 1;
for (uint32_t i = 0; i < cell_count; ++i) { for (uint32_t i = 0; i < cell_count; ++i) {
llama_pos pos; llama_pos pos;
@ -841,12 +849,12 @@ bool llama_kv_cache_recurrent::state_read_meta(llama_io_read_i & io, uint32_t ce
return false; return false;
} }
batch.pos[i] = pos; ubatch.pos[i] = pos;
} }
batch.n_seq_id[0] = 1; ubatch.n_seq_id[0] = 1;
batch.seq_id[0] = &dest_seq_id; ubatch.seq_id[0] = &dest_seq_id;
if (!find_slot(batch)) { if (!find_slot(ubatch)) {
LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__); LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
return false; return false;
} }
@ -854,8 +862,8 @@ bool llama_kv_cache_recurrent::state_read_meta(llama_io_read_i & io, uint32_t ce
// DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values) // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
// Assume that this is one contiguous block of cells // Assume that this is one contiguous block of cells
GGML_ASSERT(head + cell_count <= size); GGML_ASSERT(head + cell_count <= size);
GGML_ASSERT(cells[head].pos == batch.pos[0]); GGML_ASSERT(cells[head].pos == ubatch.pos[0]);
GGML_ASSERT(cells[head + cell_count - 1].pos == batch.pos[cell_count - 1]); GGML_ASSERT(cells[head + cell_count - 1].pos == ubatch.pos[cell_count - 1]);
GGML_ASSERT(cells[head].has_seq_id(dest_seq_id)); GGML_ASSERT(cells[head].has_seq_id(dest_seq_id));
GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id)); GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id));
} else { } else {
@ -869,7 +877,7 @@ bool llama_kv_cache_recurrent::state_read_meta(llama_io_read_i & io, uint32_t ce
clear(true); clear(true);
for (uint32_t i = 0; i < cell_count; ++i) { for (uint32_t i = 0; i < cell_count; ++i) {
kv_cell & cell = cells[i]; auto & cell = cells[i];
llama_pos pos; llama_pos pos;
uint32_t n_seq_id; uint32_t n_seq_id;
@ -883,7 +891,7 @@ bool llama_kv_cache_recurrent::state_read_meta(llama_io_read_i & io, uint32_t ce
llama_seq_id seq_id; llama_seq_id seq_id;
io.read_to(&seq_id, sizeof(seq_id)); io.read_to(&seq_id, sizeof(seq_id));
// TODO: llama_kv_cache_recurrent should have a notion of max sequences // TODO: llama_memory_recurrent should have a notion of max sequences
//if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) { //if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) {
if (seq_id < 0) { if (seq_id < 0) {
//LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx)); //LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx));
@ -915,10 +923,10 @@ bool llama_kv_cache_recurrent::state_read_meta(llama_io_read_i & io, uint32_t ce
return true; return true;
} }
bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t cell_count) { bool llama_memory_recurrent::state_read_data(llama_io_read_i & io, uint32_t cell_count) {
uint32_t v_trans; uint32_t s_trans;
uint32_t n_layer; uint32_t n_layer;
io.read_to(&v_trans, sizeof(v_trans)); io.read_to(&s_trans, sizeof(s_trans));
io.read_to(&n_layer, sizeof(n_layer)); io.read_to(&n_layer, sizeof(n_layer));
if (n_layer != hparams.n_layer) { if (n_layer != hparams.n_layer) {
@ -929,102 +937,100 @@ bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t ce
LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, size); LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, size);
return false; return false;
} }
if (false != (bool) v_trans) { if (false != (bool) s_trans) {
LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__); LLAMA_LOG_ERROR("%s: incompatible s transposition\n", __func__);
return false; return false;
} }
// For each layer, read the keys for each cell, one row is one cell, read as one contiguous block // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
for (uint32_t il = 0; il < n_layer; ++il) { for (uint32_t il = 0; il < n_layer; ++il) {
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
// Read type of key // Read type of key
int32_t k_type_i_ref; int32_t r_type_i_ref;
io.read_to(&k_type_i_ref, sizeof(k_type_i_ref)); io.read_to(&r_type_i_ref, sizeof(r_type_i_ref));
const int32_t k_type_i = (int32_t) k_l[il]->type; const int32_t r_type_i = (int32_t) r_l[il]->type;
if (k_type_i != k_type_i_ref) { if (r_type_i != r_type_i_ref) {
LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il); LLAMA_LOG_ERROR("%s: mismatched r type (%d != %d, layer %d)\n", __func__, r_type_i, r_type_i_ref, il);
return false; return false;
} }
// Read row size of key // Read row size of key
uint64_t k_size_row_ref; uint64_t r_size_row_ref;
io.read_to(&k_size_row_ref, sizeof(k_size_row_ref)); io.read_to(&r_size_row_ref, sizeof(r_size_row_ref));
const size_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa); const size_t r_size_row = ggml_row_size(r_l[il]->type, hparams.n_embd_r());
if (k_size_row != k_size_row_ref) { if (r_size_row != r_size_row_ref) {
LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il); LLAMA_LOG_ERROR("%s: mismatched r row size (%zu != %zu, layer %d)\n", __func__, r_size_row, (size_t) r_size_row_ref, il);
return false; return false;
} }
if (cell_count) { if (cell_count) {
// Read and set the keys for the whole cell range // Read and set the keys for the whole cell range
ggml_backend_tensor_set(k_l[il], io.read(cell_count * k_size_row), head * k_size_row, cell_count * k_size_row); ggml_backend_tensor_set(r_l[il], io.read(cell_count * r_size_row), head * r_size_row, cell_count * r_size_row);
} }
} }
if (!v_trans) { if (!s_trans) {
for (uint32_t il = 0; il < n_layer; ++il) { for (uint32_t il = 0; il < n_layer; ++il) {
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
// Read type of value // Read type of value
int32_t v_type_i_ref; int32_t s_type_i_ref;
io.read_to(&v_type_i_ref, sizeof(v_type_i_ref)); io.read_to(&s_type_i_ref, sizeof(s_type_i_ref));
const int32_t v_type_i = (int32_t)v_l[il]->type; const int32_t s_type_i = (int32_t)s_l[il]->type;
if (v_type_i != v_type_i_ref) { if (s_type_i != s_type_i_ref) {
LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); LLAMA_LOG_ERROR("%s: mismatched s type (%d != %d, layer %d)\n", __func__, s_type_i, s_type_i_ref, il);
return false; return false;
} }
// Read row size of value // Read row size of value
uint64_t v_size_row_ref; uint64_t s_size_row_ref;
io.read_to(&v_size_row_ref, sizeof(v_size_row_ref)); io.read_to(&s_size_row_ref, sizeof(s_size_row_ref));
const size_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa); const size_t s_size_row = ggml_row_size(s_l[il]->type, hparams.n_embd_s());
if (v_size_row != v_size_row_ref) { if (s_size_row != s_size_row_ref) {
LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il); LLAMA_LOG_ERROR("%s: mismatched s row size (%zu != %zu, layer %d)\n", __func__, s_size_row, (size_t) s_size_row_ref, il);
return false; return false;
} }
if (cell_count) { if (cell_count) {
// Read and set the values for the whole cell range // Read and set the values for the whole cell range
ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_row), head * v_size_row, cell_count * v_size_row); ggml_backend_tensor_set(s_l[il], io.read(cell_count * s_size_row), head * s_size_row, cell_count * s_size_row);
} }
} }
} else { } else {
// For each layer, read the values for each cell (transposed) // For each layer, read the values for each cell (transposed)
for (uint32_t il = 0; il < n_layer; ++il) { for (uint32_t il = 0; il < n_layer; ++il) {
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); const uint32_t n_embd_s = hparams.n_embd_s();
// Read type of value // Read type of value
int32_t v_type_i_ref; int32_t s_type_i_ref;
io.read_to(&v_type_i_ref, sizeof(v_type_i_ref)); io.read_to(&s_type_i_ref, sizeof(s_type_i_ref));
const int32_t v_type_i = (int32_t)v_l[il]->type; const int32_t s_type_i = (int32_t)s_l[il]->type;
if (v_type_i != v_type_i_ref) { if (s_type_i != s_type_i_ref) {
LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); LLAMA_LOG_ERROR("%s: mismatched s type (%d != %d, layer %d)\n", __func__, s_type_i, s_type_i_ref, il);
return false; return false;
} }
// Read element size of value // Read element size of value
uint32_t v_size_el_ref; uint32_t s_size_el_ref;
io.read_to(&v_size_el_ref, sizeof(v_size_el_ref)); io.read_to(&s_size_el_ref, sizeof(s_size_el_ref));
const size_t v_size_el = ggml_type_size(v_l[il]->type); const size_t s_size_el = ggml_type_size(s_l[il]->type);
if (v_size_el != v_size_el_ref) { if (s_size_el != s_size_el_ref) {
LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il); LLAMA_LOG_ERROR("%s: mismatched s element size (%zu != %zu, layer %d)\n", __func__, s_size_el, (size_t) s_size_el_ref, il);
return false; return false;
} }
// Read GQA embedding size // Read state embedding size
uint32_t n_embd_v_gqa_ref; uint32_t n_embd_s_ref;
io.read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref)); io.read_to(&n_embd_s_ref, sizeof(n_embd_s_ref));
if (n_embd_v_gqa != n_embd_v_gqa_ref) { if (n_embd_s != n_embd_s_ref) {
LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il); LLAMA_LOG_ERROR("%s: mismatched s embedding size (%u != %u, layer %d)\n", __func__, n_embd_s, n_embd_s_ref, il);
return false; return false;
} }
if (cell_count) { if (cell_count) {
// For each row in the transposed matrix, read the values for the whole cell range // For each row in the transposed matrix, read the values for the whole cell range
for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { for (uint32_t j = 0; j < n_embd_s; ++j) {
const size_t dst_offset = (head + j * size) * v_size_el; const size_t dst_offset = (head + j * size) * s_size_el;
ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el); ggml_backend_tensor_set(s_l[il], io.read(cell_count * s_size_el), dst_offset, cell_count * s_size_el);
} }
} }
} }
@ -1034,25 +1040,22 @@ bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t ce
} }
// //
// llama_kv_cache_recurrent_state // llama_memory_recurrent_state
// //
llama_kv_cache_recurrent_state::llama_kv_cache_recurrent_state(llama_memory_status status) : status(status) {} llama_memory_recurrent_state::llama_memory_recurrent_state(llama_memory_status status) : status(status) {}
llama_kv_cache_recurrent_state::llama_kv_cache_recurrent_state( llama_memory_recurrent_state::llama_memory_recurrent_state(
llama_memory_status status, llama_memory_recurrent * mem) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), is_full(true) {
llama_kv_cache_recurrent * kv) : status(status), kv(kv), is_full(true) {
} }
llama_kv_cache_recurrent_state::llama_kv_cache_recurrent_state( llama_memory_recurrent_state::llama_memory_recurrent_state(
llama_memory_status status, llama_memory_recurrent * mem,
llama_kv_cache_recurrent * kv, std::vector<llama_ubatch> ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), ubatches(std::move(ubatches)) {}
llama_sbatch sbatch,
std::vector<llama_ubatch> ubatches) : status(status), kv(kv), sbatch(std::move(sbatch)), ubatches(std::move(ubatches)) {}
llama_kv_cache_recurrent_state::~llama_kv_cache_recurrent_state() = default; llama_memory_recurrent_state::~llama_memory_recurrent_state() = default;
bool llama_kv_cache_recurrent_state::next() { bool llama_memory_recurrent_state::next() {
assert(status == LLAMA_MEMORY_STATUS_SUCCESS); assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
if (++i_next >= ubatches.size()) { if (++i_next >= ubatches.size()) {
@ -1062,54 +1065,48 @@ bool llama_kv_cache_recurrent_state::next() {
return true; return true;
} }
bool llama_kv_cache_recurrent_state::apply() { bool llama_memory_recurrent_state::apply() {
assert(status == LLAMA_MEMORY_STATUS_SUCCESS); assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
kv->find_slot(ubatches[i_next]); mem->find_slot(ubatches[i_next]);
return true; return true;
} }
std::vector<int64_t> & llama_kv_cache_recurrent_state::out_ids() { llama_memory_status llama_memory_recurrent_state::get_status() const {
assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
return sbatch.out_ids;
}
llama_memory_status llama_kv_cache_recurrent_state::get_status() const {
return status; return status;
} }
const llama_ubatch & llama_kv_cache_recurrent_state::get_ubatch() const { const llama_ubatch & llama_memory_recurrent_state::get_ubatch() const {
assert(status == LLAMA_MEMORY_STATUS_SUCCESS); assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
return ubatches[i_next]; return ubatches[i_next];
} }
uint32_t llama_kv_cache_recurrent_state::get_n_kv() const { uint32_t llama_memory_recurrent_state::get_n_rs() const {
return is_full ? kv->size : kv->n; return is_full ? mem->size : mem->n;
} }
uint32_t llama_kv_cache_recurrent_state::get_head() const { uint32_t llama_memory_recurrent_state::get_head() const {
return is_full ? 0 : kv->head; return is_full ? 0 : mem->head;
} }
int32_t llama_kv_cache_recurrent_state::get_rs_z() const { int32_t llama_memory_recurrent_state::get_rs_z() const {
return is_full ? 0 : kv->rs_z; return is_full ? 0 : mem->rs_z;
} }
uint32_t llama_kv_cache_recurrent_state::get_size() const { uint32_t llama_memory_recurrent_state::get_size() const {
return kv->size; return mem->size;
} }
ggml_tensor * llama_kv_cache_recurrent_state::get_k_l(int32_t il) const { ggml_tensor * llama_memory_recurrent_state::get_r_l(int32_t il) const {
return kv->k_l[il]; return mem->r_l[il];
} }
ggml_tensor * llama_kv_cache_recurrent_state::get_v_l(int32_t il) const { ggml_tensor * llama_memory_recurrent_state::get_s_l(int32_t il) const {
return kv->v_l[il]; return mem->s_l[il];
} }
int32_t llama_kv_cache_recurrent_state::s_copy(int i) const { int32_t llama_memory_recurrent_state::s_copy(int i) const {
return kv->cells[i + kv->head].src0; return mem->cells[i + mem->head].src0;
} }

View File

@ -8,29 +8,34 @@
#include <vector> #include <vector>
// //
// llama_kv_cache_recurrent // llama_memory_recurrent
// //
// TODO: extract the KV cache state used for graph computation into llama_kv_cache_recurrent_state_i // TODO: extract the cache state used for graph computation into llama_memory_recurrent_state_i
// see the implementation of llama_kv_cache_unified_state_i for an example how to do it // see the implementation of llama_kv_cache_unified_state_i for an example how to do it
class llama_kv_cache_recurrent : public llama_memory_i { class llama_memory_recurrent : public llama_memory_i {
public: public:
llama_kv_cache_recurrent(
// this callback is used to filter out layers that should not be included in the cache
using layer_filter_cb = std::function<bool(int32_t il)>;
llama_memory_recurrent(
const llama_model & model, const llama_model & model,
ggml_type type_k, layer_filter_cb && filter,
ggml_type type_v, ggml_type type_r,
ggml_type type_s,
bool offload, bool offload,
uint32_t kv_size, uint32_t mem_size,
uint32_t n_seq_max); uint32_t n_seq_max);
~llama_kv_cache_recurrent() = default; ~llama_memory_recurrent() = default;
// //
// llama_memory_i // llama_memory_i
// //
llama_memory_state_ptr init_batch( llama_memory_state_ptr init_batch(
const llama_batch & batch, llama_batch_allocr & balloc,
uint32_t n_ubatch, uint32_t n_ubatch,
bool embd_all) override; bool embd_all) override;
@ -51,7 +56,7 @@ public:
bool prepare(const std::vector<llama_ubatch> & ubatches); bool prepare(const std::vector<llama_ubatch> & ubatches);
// find a contiguous slot of kv cells and emplace the ubatch there // find a contiguous slot of memory cells and emplace the ubatch there
bool find_slot(const llama_ubatch & ubatch); bool find_slot(const llama_ubatch & ubatch);
bool get_can_shift() const override; bool get_can_shift() const override;
@ -72,7 +77,7 @@ public:
int32_t rs_z = -1; int32_t rs_z = -1;
// TODO: optimize for recurrent state needs // TODO: optimize for recurrent state needs
struct kv_cell { struct mem_cell {
llama_pos pos = -1; llama_pos pos = -1;
int32_t src = -1; // used to know where states should be copied from int32_t src = -1; // used to know where states should be copied from
int32_t src0 = -1; // like src, but only used when setting the inputs (allowing to copy once) int32_t src0 = -1; // like src, but only used when setting the inputs (allowing to copy once)
@ -88,15 +93,16 @@ public:
return seq_id.empty(); return seq_id.empty();
} }
bool is_same_seq(const kv_cell & other) const { bool is_same_seq(const mem_cell & other) const {
return seq_id == other.seq_id; return seq_id == other.seq_id;
} }
}; };
std::vector<kv_cell> cells; std::vector<mem_cell> cells;
std::vector<ggml_tensor *> k_l; // per layer // per layer
std::vector<ggml_tensor *> v_l; std::vector<ggml_tensor *> r_l;
std::vector<ggml_tensor *> s_l;
private: private:
//const llama_model & model; //const llama_model & model;
@ -109,8 +115,8 @@ private:
size_t total_size() const; size_t total_size() const;
size_t size_k_bytes() const; size_t size_r_bytes() const;
size_t size_v_bytes() const; size_t size_s_bytes() const;
void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const; void state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id = -1) const;
void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const; void state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const;
@ -119,24 +125,21 @@ private:
bool state_read_data(llama_io_read_i & io, uint32_t cell_count); bool state_read_data(llama_io_read_i & io, uint32_t cell_count);
}; };
class llama_kv_cache_recurrent_state : public llama_memory_state_i { class llama_memory_recurrent_state : public llama_memory_state_i {
public: public:
// used for errors // used for errors
llama_kv_cache_recurrent_state(llama_memory_status status); llama_memory_recurrent_state(llama_memory_status status);
// used to create a full-cache state // used to create a full-cache state
llama_kv_cache_recurrent_state( llama_memory_recurrent_state(
llama_memory_status status, llama_memory_recurrent * mem);
llama_kv_cache_recurrent * kv);
// used to create a state from a batch // used to create a state from a batch
llama_kv_cache_recurrent_state( llama_memory_recurrent_state(
llama_memory_status status, llama_memory_recurrent * mem,
llama_kv_cache_recurrent * kv,
llama_sbatch sbatch,
std::vector<llama_ubatch> ubatches); std::vector<llama_ubatch> ubatches);
virtual ~llama_kv_cache_recurrent_state(); virtual ~llama_memory_recurrent_state();
// //
// llama_memory_state_i // llama_memory_state_i
@ -145,31 +148,27 @@ public:
bool next() override; bool next() override;
bool apply() override; bool apply() override;
std::vector<int64_t> & out_ids() override;
llama_memory_status get_status() const override; llama_memory_status get_status() const override;
const llama_ubatch & get_ubatch() const override; const llama_ubatch & get_ubatch() const override;
// //
// llama_kv_cache_recurrent_state specific API // llama_memory_recurrent_state specific API
// //
uint32_t get_n_kv() const; uint32_t get_n_rs() const;
uint32_t get_head() const; uint32_t get_head() const;
int32_t get_rs_z() const; int32_t get_rs_z() const;
uint32_t get_size() const; uint32_t get_size() const;
ggml_tensor * get_k_l(int32_t il) const; ggml_tensor * get_r_l(int32_t il) const;
ggml_tensor * get_v_l(int32_t il) const; ggml_tensor * get_s_l(int32_t il) const;
int32_t s_copy(int i) const; int32_t s_copy(int i) const;
private: private:
const llama_memory_status status; const llama_memory_status status;
llama_kv_cache_recurrent * kv; llama_memory_recurrent * mem;
llama_sbatch sbatch;
size_t i_next = 0; size_t i_next = 0;

View File

@ -7,6 +7,8 @@
struct llama_ubatch; struct llama_ubatch;
class llama_batch_allocr;
class llama_io_write_i; class llama_io_write_i;
class llama_io_read_i; class llama_io_read_i;
@ -50,9 +52,6 @@ struct llama_memory_state_i {
// return false on failure // return false on failure
virtual bool apply() = 0; virtual bool apply() = 0;
// TODO: this might get reworked in the future when refactoring llama_batch
virtual std::vector<int64_t> & out_ids() = 0;
// get the current ubatch // get the current ubatch
virtual const llama_ubatch & get_ubatch() const = 0; virtual const llama_ubatch & get_ubatch() const = 0;
@ -71,7 +70,7 @@ struct llama_memory_i {
// return a state object containing the ubatches and KV cache state required to process them // return a state object containing the ubatches and KV cache state required to process them
// check the llama_memory_state_i::get_status() for the result // check the llama_memory_state_i::get_status() for the result
virtual llama_memory_state_ptr init_batch( virtual llama_memory_state_ptr init_batch(
const llama_batch & batch, llama_batch_allocr & balloc,
uint32_t n_ubatch, uint32_t n_ubatch,
bool embd_all) = 0; bool embd_all) = 0;

View File

@ -228,6 +228,7 @@ void llama_model_saver::add_kv_from_model() {
// add_kv(LLM_KV_TOKENIZER_MASK_ID, ???); // add_kv(LLM_KV_TOKENIZER_MASK_ID, ???);
add_kv(LLM_KV_TOKENIZER_ADD_BOS, vocab.get_add_bos()); add_kv(LLM_KV_TOKENIZER_ADD_BOS, vocab.get_add_bos());
add_kv(LLM_KV_TOKENIZER_ADD_EOS, vocab.get_add_eos()); add_kv(LLM_KV_TOKENIZER_ADD_EOS, vocab.get_add_eos());
add_kv(LLM_KV_TOKENIZER_ADD_SEP, vocab.get_add_sep());
add_kv(LLM_KV_TOKENIZER_ADD_PREFIX, vocab.get_add_space_prefix()); add_kv(LLM_KV_TOKENIZER_ADD_PREFIX, vocab.get_add_space_prefix());
add_kv(LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, vocab.get_remove_extra_whitespaces()); add_kv(LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, vocab.get_remove_extra_whitespaces());
add_kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, vocab.get_precompiled_charsmap()); add_kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, vocab.get_precompiled_charsmap());

File diff suppressed because it is too large Load Diff

View File

@ -1269,6 +1269,7 @@ struct llama_vocab::impl {
bool add_space_prefix = false; bool add_space_prefix = false;
bool add_bos = false; bool add_bos = false;
bool add_eos = false; bool add_eos = false;
bool add_sep = false;
bool ignore_merges = false; bool ignore_merges = false;
bool clean_spaces = false; // clean_up_tokenization_spaces bool clean_spaces = false; // clean_up_tokenization_spaces
bool remove_extra_whitespaces = false; bool remove_extra_whitespaces = false;
@ -1421,6 +1422,8 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
special_sep_id = 102; special_sep_id = 102;
special_pad_id = 0; special_pad_id = 0;
special_mask_id = 103; special_mask_id = 103;
add_sep = true;
} else if (tokenizer_model == "gpt2") { } else if (tokenizer_model == "gpt2") {
type = LLAMA_VOCAB_TYPE_BPE; type = LLAMA_VOCAB_TYPE_BPE;
@ -1550,12 +1553,15 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
tokenizer_pre == "jina-es" || tokenizer_pre == "jina-es" ||
tokenizer_pre == "jina-de" || tokenizer_pre == "jina-de" ||
tokenizer_pre == "gigachat" || tokenizer_pre == "gigachat" ||
tokenizer_pre == "jina-v1-en" ||
tokenizer_pre == "jina-v2-es" || tokenizer_pre == "jina-v2-es" ||
tokenizer_pre == "jina-v2-de" || tokenizer_pre == "jina-v2-de") {
pre_type = LLAMA_VOCAB_PRE_TYPE_GPT2;
} else if (
tokenizer_pre == "jina-v1-en" ||
tokenizer_pre == "jina-v2-code" || tokenizer_pre == "jina-v2-code" ||
tokenizer_pre == "roberta-bpe") { tokenizer_pre == "roberta-bpe") {
pre_type = LLAMA_VOCAB_PRE_TYPE_GPT2; pre_type = LLAMA_VOCAB_PRE_TYPE_GPT2;
add_sep = true;
} else if ( } else if (
tokenizer_pre == "refact") { tokenizer_pre == "refact") {
pre_type = LLAMA_VOCAB_PRE_TYPE_REFACT; pre_type = LLAMA_VOCAB_PRE_TYPE_REFACT;
@ -1665,6 +1671,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
clean_spaces = true; clean_spaces = true;
add_bos = true; add_bos = true;
add_eos = false; add_eos = false;
add_sep = true;
} else if (type == LLAMA_VOCAB_TYPE_UGM) { } else if (type == LLAMA_VOCAB_TYPE_UGM) {
pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT; pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
add_bos = false; add_bos = false;
@ -1801,7 +1808,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
} }
} }
// Handle add_bos and add_eos // Handle add_bos, add_eos and add_sep
{ {
bool temp = true; bool temp = true;
@ -1811,6 +1818,9 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) { if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) {
add_eos = temp; add_eos = temp;
} }
if (ml.get_key(LLM_KV_TOKENIZER_ADD_SEP, temp, false)) {
add_sep = temp;
}
} }
// auto-detect special tokens by text // auto-detect special tokens by text
@ -2060,9 +2070,9 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
//NOTE: Per token attributes are missing from the GGUF file. //NOTE: Per token attributes are missing from the GGUF file.
//TODO: Extract attributes from GGUF file. //TODO: Extract attributes from GGUF file.
{ {
auto _contains_any = [] (const std::string & str, const std::vector<std::string> & substrs) -> bool { auto _contains_any = [] (const std::string & str, const std::vector<std::string_view> & substrs) -> bool {
for (const auto & substr : substrs) { for (const auto & substr : substrs) {
if (str.find(substr) < std::string::npos) { if (str.find(substr) != std::string::npos) {
return true; return true;
} }
} }
@ -3000,6 +3010,10 @@ bool llama_vocab::get_add_eos() const {
return pimpl->add_eos; return pimpl->add_eos;
} }
bool llama_vocab::get_add_sep() const {
return pimpl->add_sep;
}
bool llama_vocab::get_ignore_merges() const { bool llama_vocab::get_ignore_merges() const {
return pimpl->ignore_merges; return pimpl->ignore_merges;
} }
@ -3060,6 +3074,11 @@ int32_t llama_vocab::tokenize(
bool add_special, bool add_special,
bool parse_special) const { bool parse_special) const {
auto res = tokenize(std::string(text, text_len), add_special, parse_special); auto res = tokenize(std::string(text, text_len), add_special, parse_special);
if (res.size() >= static_cast<size_t>(std::numeric_limits<int32_t>::max())) {
LLAMA_LOG_ERROR("%s: tokenization result size %zu exceeds int32_t limit\n", __func__, res.size());
return std::numeric_limits<int32_t>::min();
}
if (n_tokens_max < (int) res.size()) { if (n_tokens_max < (int) res.size()) {
// LLAMA_LOG_ERROR("%s: too many tokens\n", __func__); // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
return -((int) res.size()); return -((int) res.size());
@ -3191,6 +3210,10 @@ bool llama_vocab_get_add_eos(const struct llama_vocab * vocab) {
return vocab->get_add_eos(); return vocab->get_add_eos();
} }
bool llama_vocab_get_add_sep(const struct llama_vocab * vocab) {
return vocab->get_add_sep();
}
llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab) { llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab) {
return vocab->token_fim_pre(); return vocab->token_fim_pre();
} }

View File

@ -74,6 +74,7 @@ struct llama_vocab {
bool get_add_space_prefix () const; bool get_add_space_prefix () const;
bool get_add_bos () const; bool get_add_bos () const;
bool get_add_eos () const; bool get_add_eos () const;
bool get_add_sep () const;
bool get_ignore_merges () const; bool get_ignore_merges () const;
bool get_clean_spaces () const; bool get_clean_spaces () const;
bool get_remove_extra_whitespaces () const; bool get_remove_extra_whitespaces () const;

View File

@ -1044,6 +1044,7 @@ extern "C" {
LLAMA_API bool llama_vocab_get_add_bos(const struct llama_vocab * vocab); LLAMA_API bool llama_vocab_get_add_bos(const struct llama_vocab * vocab);
LLAMA_API bool llama_vocab_get_add_eos(const struct llama_vocab * vocab); LLAMA_API bool llama_vocab_get_add_eos(const struct llama_vocab * vocab);
LLAMA_API bool llama_vocab_get_add_sep(const struct llama_vocab * vocab);
LLAMA_API llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab); LLAMA_API llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab);
LLAMA_API llama_token llama_vocab_fim_suf(const struct llama_vocab * vocab); LLAMA_API llama_token llama_vocab_fim_suf(const struct llama_vocab * vocab);
@ -1087,6 +1088,7 @@ extern "C" {
/// @param tokens The tokens pointer must be large enough to hold the resulting tokens. /// @param tokens The tokens pointer must be large enough to hold the resulting tokens.
/// @return Returns the number of tokens on success, no more than n_tokens_max /// @return Returns the number of tokens on success, no more than n_tokens_max
/// @return Returns a negative number on failure - the number of tokens that would have been returned /// @return Returns a negative number on failure - the number of tokens that would have been returned
/// @return Returns INT32_MIN on overflow (e.g., tokenization result size exceeds int32_t limit)
/// @param add_special Allow to add BOS and EOS tokens if model is configured to do so. /// @param add_special Allow to add BOS and EOS tokens if model is configured to do so.
/// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated /// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated
/// as plaintext. Does not insert a leading space. /// as plaintext. Does not insert a leading space.

View File

@ -204,12 +204,17 @@ static inline std::wstring unicode_wstring_from_utf8(const std::string & s) {
// disable C++17 deprecation warning for std::codecvt_utf8 // disable C++17 deprecation warning for std::codecvt_utf8
# pragma clang diagnostic push # pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wdeprecated-declarations" # pragma clang diagnostic ignored "-Wdeprecated-declarations"
#elif defined(__GNUC__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif #endif
std::wstring_convert<std::codecvt_utf8<wchar_t>> conv; std::wstring_convert<std::codecvt_utf8<wchar_t>> conv;
#if defined(__clang__) #if defined(__clang__)
# pragma clang diagnostic pop # pragma clang diagnostic pop
#elif defined(__GNUC__)
# pragma GCC diagnostic pop
#endif #endif
return conv.from_bytes(s); return conv.from_bytes(s);