#include "llama-graph.h" #include "llama-impl.h" #include "llama-batch.h" #include "llama-cparams.h" #include "llama-kv-cache.h" #include #include #include static int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional) { // TODO move to hparams if a T5 variant appears that uses a different value const int64_t max_distance = 128; if (bidirectional) { n_buckets >>= 1; } const int64_t max_exact = n_buckets >> 1; int32_t relative_position = x - y; int32_t relative_bucket = 0; if (bidirectional) { relative_bucket += (relative_position > 0) * n_buckets; relative_position = abs(relative_position); } else { relative_position = -std::min(relative_position, 0); } int32_t relative_position_if_large = floorf(max_exact + logf(1.0 * relative_position / max_exact) * (n_buckets - max_exact) / log(1.0 * max_distance / max_exact)); relative_position_if_large = std::min(relative_position_if_large, n_buckets - 1); relative_bucket += (relative_position < max_exact ? relative_position : relative_position_if_large); return relative_bucket; } void llm_graph_input_embd::set_input(const llama_ubatch * ubatch) { if (ubatch->token) { const int64_t n_tokens = ubatch->n_tokens; ggml_backend_tensor_set(tokens, ubatch->token, 0, n_tokens*ggml_element_size(tokens)); } if (ubatch->embd) { const int64_t n_embd = embd->ne[0]; const int64_t n_tokens = ubatch->n_tokens; ggml_backend_tensor_set(embd, ubatch->embd, 0, n_tokens*n_embd*ggml_element_size(embd)); } } void llm_graph_input_pos::set_input(const llama_ubatch * ubatch) { if (ubatch->pos && pos) { const int64_t n_tokens = ubatch->n_tokens; ggml_backend_tensor_set(pos, ubatch->pos, 0, n_tokens*n_pos_per_token*ggml_element_size(pos)); } } void llm_graph_input_attn_temp::set_input(const llama_ubatch * ubatch) { if (ubatch->pos && attn_scale) { const int64_t n_tokens = ubatch->n_tokens; std::vector attn_scale_data(n_tokens, 0.0f); for (int i = 0; i < n_tokens; ++i) { const float pos = ubatch->pos[i]; attn_scale_data[i] = std::log( std::floor((pos + 1.0f) / n_attn_temp_floor_scale) + 1.0 ) * f_attn_temp_scale + 1.0; } ggml_backend_tensor_set(attn_scale, attn_scale_data.data(), 0, n_tokens*n_pos_per_token*ggml_element_size(attn_scale)); } } void llm_graph_input_pos_bucket::set_input(const llama_ubatch * ubatch) { if (pos_bucket) { const int64_t n_tokens = ubatch->n_tokens; GGML_ASSERT(ggml_backend_buffer_is_host(pos_bucket->buffer)); GGML_ASSERT(!ubatch->equal_seqs); // TODO: use ubatch->n_seqs instead of failing int32_t * data = (int32_t *) pos_bucket->data; for (int h = 0; h < 1; ++h) { for (int j = 0; j < n_tokens; ++j) { for (int i = 0; i < n_tokens; ++i) { data[h*(n_tokens*n_tokens) + j*n_tokens + i] = llama_relative_position_bucket(ubatch->pos[i], ubatch->pos[j], hparams.n_rel_attn_bkts, true); } } } } } void llm_graph_input_pos_bucket_kv::set_input(const llama_ubatch * ubatch) { if (pos_bucket) { const int64_t n_tokens = ubatch->n_tokens; GGML_ASSERT(ggml_backend_buffer_is_host(pos_bucket->buffer)); GGML_ASSERT(!ubatch->equal_seqs); // TODO: use ubatch->n_seqs instead of failing int32_t * data = (int32_t *) pos_bucket->data; const int64_t n_kv = kv_self->n; for (int h = 0; h < 1; ++h) { for (int j = 0; j < n_tokens; ++j) { for (int i = 0; i < n_kv; ++i) { data[h*(n_kv*n_tokens) + j*n_kv + i] = llama_relative_position_bucket(kv_self->cells[i].pos, ubatch->pos[j], hparams.n_rel_attn_bkts, false); } } } } } void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) { if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) { //GGML_ASSERT(out_ids && "every model that can must skip unused outputs"); if (!out_ids) { LLAMA_LOG_WARN("%s: 'out_ids' is not created\n", __func__); } else { const int64_t n_tokens = ubatch->n_tokens; GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer)); int32_t * data = (int32_t *) out_ids->data; if (n_outputs == n_tokens) { for (int i = 0; i < n_tokens; ++i) { data[i] = i; } } else if (ubatch->output) { int32_t n_outputs = 0; for (int i = 0; i < n_tokens; ++i) { if (ubatch->output[i]) { data[n_outputs++] = i; } } // the graph needs to have been passed the correct number of outputs GGML_ASSERT(n_outputs == n_outputs); } else if (n_outputs == 1) { // only keep last output data[0] = n_tokens - 1; } else { GGML_ASSERT(n_outputs == 0); } } } } void llm_graph_input_mean::set_input(const llama_ubatch * ubatch) { if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) { const int64_t n_tokens = ubatch->n_tokens; const int64_t n_seq_tokens = ubatch->n_seq_tokens; const int64_t n_seqs = ubatch->n_seqs; GGML_ASSERT(mean); GGML_ASSERT(ggml_backend_buffer_is_host(mean->buffer)); float * data = (float *) mean->data; memset(mean->data, 0, n_tokens * n_tokens * ggml_element_size(mean)); std::vector sum(n_tokens, 0); for (int s = 0; s < n_seqs; ++s) { const llama_seq_id seq_id = ubatch->seq_id[s][0]; // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN"); sum[seq_id] += ubatch->n_seq_tokens; } std::vector div(n_tokens, 0.0f); for (int i = 0; i < n_tokens; ++i) { const uint64_t s = sum[i]; if (s > 0) { div[i] = 1.0f/float(s); } } for (int s = 0; s < n_seqs; ++s) { const llama_seq_id seq_id = ubatch->seq_id[s][0]; for (int i = 0; i < n_seq_tokens; ++i) { data[seq_id*n_tokens + s*n_seq_tokens + i] = div[seq_id]; } } } } void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) { if (cparams.embeddings && ( cparams.pooling_type == LLAMA_POOLING_TYPE_CLS || cparams.pooling_type == LLAMA_POOLING_TYPE_RANK)) { const int64_t n_tokens = ubatch->n_tokens; const int64_t n_seq_tokens = ubatch->n_seq_tokens; const int64_t n_seqs = ubatch->n_seqs; GGML_ASSERT(cls); GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer)); uint32_t * data = (uint32_t *) cls->data; memset(cls->data, 0, n_tokens * ggml_element_size(cls)); for (int s = 0; s < n_seqs; ++s) { const llama_seq_id seq_id = ubatch->seq_id[s][0]; // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS or RANK"); for (int i = 0; i < n_seq_tokens; ++i) { const llama_pos pos = ubatch->pos[s*n_seq_tokens + i]; if (pos == 0) { data[seq_id] = s*n_seq_tokens + i; } } } } if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) { const int64_t n_tokens = ubatch->n_tokens; const int64_t n_seq_tokens = ubatch->n_seq_tokens; const int64_t n_seqs = ubatch->n_seqs; GGML_ASSERT(cls); GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer)); uint32_t * data = (uint32_t *) cls->data; memset(cls->data, 0, n_tokens * ggml_element_size(cls)); std::vector last_pos(n_tokens, -1); std::vector last_row(n_tokens, -1); for (int s = 0; s < n_seqs; ++s) { const llama_seq_id seq_id = ubatch->seq_id[s][0]; // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == LAST"); for (int i = 0; i < n_seq_tokens; ++i) { const llama_pos pos = ubatch->pos[s*n_seq_tokens + i]; if (pos >= last_pos[seq_id]) { last_pos[seq_id] = pos; last_row[seq_id] = s*n_seq_tokens + i; } } } for (int i = 0; i < n_tokens; ++i) { if (last_row[i] >= 0) { data[i] = last_row[i]; } } } } void llm_graph_input_s_copy::set_input(const llama_ubatch * ubatch) { GGML_UNUSED(ubatch); const int64_t n_kv = kv_self->n; if (s_copy) { GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer)); int32_t * data = (int32_t *) s_copy->data; // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n for (uint32_t i = 0; i < n_kv; ++i) { const uint32_t cell_id = i + kv_self->head; ////////////////////////////////////////////// // TODO: this should not mutate the KV cache ! llama_kv_cell & kv_cell = const_cast(kv_self)->cells[i]; // prevent out-of-bound sources if (kv_cell.src < 0 || (uint32_t) kv_cell.src >= kv_self->size) { kv_cell.src = cell_id; } data[i] = kv_cell.src; // TODO: do not mutate the KV cache // ensure copy only happens once if (kv_cell.src != (int32_t) cell_id) { kv_cell.src = cell_id; } } } } void llm_graph_input_s_mask::set_input(const llama_ubatch * ubatch) { GGML_UNUSED(ubatch); const int64_t n_kv = kv_self->n; if (s_mask) { GGML_ASSERT(ggml_backend_buffer_is_host(s_mask->buffer)); float * data = (float *) s_mask->data; // clear unused states for (int i = 0; i < n_kv; ++i) { const uint32_t cell_id = i + kv_self->head; ////////////////////////////////////////////// // TODO: this should not mutate the KV cache ! llama_kv_cell & kv_cell = const_cast(kv_self)->cells[i]; data[i] = (float) (kv_cell.src >= 0); // only clear once if (kv_cell.src < 0) { kv_cell.src = cell_id; } } } } void llm_graph_input_cross_embd::set_input(const llama_ubatch * ubatch) { GGML_UNUSED(ubatch); if (cross_embd && !cross->v_embd.empty()) { assert(cross_embd->type == GGML_TYPE_F32); ggml_backend_tensor_set(cross_embd, cross->v_embd.data(), 0, ggml_nbytes(cross_embd)); } } void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) { if (kq_mask) { if (cparams.causal_attn) { const int64_t n_kv = ubatch->n_tokens; const int64_t n_tokens = ubatch->n_tokens; const int64_t n_seq_tokens = ubatch->n_seq_tokens; const int64_t n_seqs = ubatch->n_seqs; GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer)); float * data = (float *) kq_mask->data; for (int h = 0; h < 1; ++h) { for (int s1 = 0; s1 < n_seqs; ++s1) { const llama_seq_id seq_id = ubatch->seq_id[s1][0]; for (int j = 0; j < n_seq_tokens; ++j) { const int32_t tj = s1*n_seq_tokens + j; for (int s0 = 0; s0 < n_seqs; ++s0) { for (int i = 0; i < n_seq_tokens; ++i) { const int32_t ti = s0*n_seq_tokens + i; float f = -INFINITY; for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) { if (ubatch->seq_id[s0][s] == seq_id && ubatch->pos[ti] <= ubatch->pos[tj]) { if (hparams.use_alibi) { f = -std::abs(ubatch->pos[ti] - ubatch->pos[tj]); } else { f = 0.0f; } break; } } data[h*(n_kv*n_tokens) + tj*n_kv + ti] = f; } } } } } } else { const int64_t n_tokens = ubatch->n_tokens; const int64_t n_seq_tokens = ubatch->n_seq_tokens; const int64_t n_seqs = ubatch->n_seqs; const int64_t n_stride = ubatch->n_tokens; GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer)); float * data = (float *) kq_mask->data; for (int h = 0; h < 1; ++h) { for (int s1 = 0; s1 < n_seqs; ++s1) { const llama_seq_id seq_id = ubatch->seq_id[s1][0]; for (int j = 0; j < n_seq_tokens; ++j) { const int32_t tj = s1*n_seq_tokens + j; for (int s0 = 0; s0 < n_seqs; ++s0) { for (int i = 0; i < n_seq_tokens; ++i) { const int32_t ti = s0*n_seq_tokens + i; float f = -INFINITY; for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) { if (ubatch->seq_id[s0][s] == seq_id) { if (hparams.use_alibi) { f = -std::abs(ubatch->pos[ti] - ubatch->pos[tj]); } else { f = 0.0f; } break; } } data[h*(n_tokens*n_tokens) + tj*n_stride + ti] = f; } } for (int i = n_tokens; i < n_stride; ++i) { data[h*(n_tokens*n_tokens) + tj*n_stride + i] = -INFINITY; } } } } } } } void llm_graph_input_attn_kv_unified::set_input(const llama_ubatch * ubatch) { if (self_kq_mask || self_kq_mask_swa) { const int64_t n_kv = kv_self->n; const int64_t n_tokens = ubatch->n_tokens; const int64_t n_seq_tokens = ubatch->n_seq_tokens; const int64_t n_seqs = ubatch->n_seqs; float * data = nullptr; float * data_swa = nullptr; if (self_kq_mask) { GGML_ASSERT(ggml_backend_buffer_is_host(self_kq_mask->buffer)); data = (float *) self_kq_mask->data; } if (self_kq_mask_swa) { GGML_ASSERT(ggml_backend_buffer_is_host(self_kq_mask_swa->buffer)); data_swa = (float *) self_kq_mask_swa->data; } // Use only the previous KV cells of the correct sequence for each token of the ubatch. // It's assumed that if a token in the batch has multiple sequences, they are equivalent. // Example with a cache of 10 tokens, 2 tokens populated in cache and 3 tokens in batch: // Causal mask: // xxx------- // xxxx------ // xxxxx----- // Non-causal mask: // xxxxx----- // xxxxx----- // xxxxx----- // To visualize the mask, see https://github.com/ggml-org/llama.cpp/pull/12615 for (int h = 0; h < 1; ++h) { for (int s = 0; s < n_seqs; ++s) { const llama_seq_id seq_id = ubatch->seq_id[s][0]; for (int j = 0; j < n_seq_tokens; ++j) { const llama_pos pos = ubatch->pos[s*n_seq_tokens + j]; for (int i = 0; i < n_kv; ++i) { float f; // mask the token if: if (!kv_self->cells[i].has_seq_id(seq_id) // not the correct sequence || (cparams.causal_attn && kv_self->cells[i].pos > pos) // for causal, mask future tokens ) { f = -INFINITY; } else { if (hparams.use_alibi) { f = -std::abs(kv_self->cells[i].pos - pos); } else { f = 0.0f; } } if (data) { data[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f; } // may need to cut off old tokens for sliding window // TODO @ngxson : we are currently re-using the swa logic to store the chunked mask, we should rename SWA to something more generic like "aux mask" if (data_swa) { if (hparams.n_attn_chunk) { llama_pos pos_chunk_start = (pos / hparams.n_attn_chunk) * hparams.n_attn_chunk; if (kv_self->cells[i].pos < pos_chunk_start || pos < pos_chunk_start) { f = -INFINITY; } } else { if (pos - kv_self->cells[i].pos >= (int32_t)hparams.n_swa) { f = -INFINITY; } } data_swa[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f; } } } } // mask padded tokens if (data) { for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { for (int j = 0; j < n_kv; ++j) { data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY; } } } // mask padded tokens if (data_swa) { for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { for (int j = 0; j < n_kv; ++j) { data_swa[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY; } } } } } } void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) { if (cross_kq_mask) { const int64_t n_enc = cross_kq_mask->ne[0]; const int64_t n_tokens = ubatch->n_tokens; GGML_ASSERT(ggml_backend_buffer_is_host(cross_kq_mask->buffer)); GGML_ASSERT(!ubatch->equal_seqs); // TODO: use ubatch->n_seqs instead of failing float * data = (float *) cross_kq_mask->data; for (int h = 0; h < 1; ++h) { for (int j = 0; j < n_tokens; ++j) { for (int i = 0; i < n_enc; ++i) { float f = -INFINITY; for (int s = 0; s < ubatch->n_seq_id[j]; ++s) { const llama_seq_id seq_id = ubatch->seq_id[j][s]; if (cross->seq_ids_enc[i].find(seq_id) != cross->seq_ids_enc[i].end()) { f = 0.0f; } } data[h*(n_enc*n_tokens) + j*n_enc + i] = f; } } for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { for (int j = 0; j < n_enc; ++j) { data[h*(n_enc*n_tokens) + i*n_enc + j] = -INFINITY; } } } } } // // llm_graph_context // llm_graph_context::llm_graph_context(const llm_graph_params & params) : arch (params.arch), hparams (params.hparams), cparams (params.cparams), ubatch (params.ubatch), n_embd (hparams.n_embd), n_layer (hparams.n_layer), n_rot (hparams.n_rot), n_ctx (cparams.n_ctx), n_ctx_per_seq (cparams.n_ctx / cparams.n_seq_max), n_head (hparams.n_head()), n_head_kv (hparams.n_head_kv()), n_embd_head_k (hparams.n_embd_head_k), n_embd_k_gqa (hparams.n_embd_k_gqa()), n_embd_head_v (hparams.n_embd_head_v), n_embd_v_gqa (hparams.n_embd_v_gqa()), n_expert (hparams.n_expert), n_expert_used (cparams.warmup ? hparams.n_expert : hparams.n_expert_used), freq_base (cparams.rope_freq_base), freq_scale (cparams.rope_freq_scale), ext_factor (cparams.yarn_ext_factor), attn_factor (cparams.yarn_attn_factor), beta_fast (cparams.yarn_beta_fast), beta_slow (cparams.yarn_beta_slow), norm_eps (hparams.f_norm_eps), norm_rms_eps (hparams.f_norm_rms_eps), n_tokens (ubatch.n_tokens), n_outputs (params.n_outputs), n_ctx_orig (cparams.n_ctx_orig_yarn), pooling_type (cparams.pooling_type), rope_type (hparams.rope_type), ctx0 (params.ctx), sched (params.sched), backend_cpu (params.backend_cpu), cvec (params.cvec), loras (params.loras), memory (params.memory), cross (params.cross), cb_func (params.cb), res (std::make_unique()) { } int64_t llm_graph_context::n_pos_per_token() const { return arch == LLM_ARCH_QWEN2VL ? 4 : 1; } void llm_graph_context::cb(ggml_tensor * cur, const char * name, int il) const { if (cb_func) { cb_func(ubatch, cur, name, il); } } ggml_tensor * llm_graph_context::build_cvec( ggml_tensor * cur, int il) const { return cvec->apply_to(ctx0, cur, il); } ggml_tensor * llm_graph_context::build_lora_mm( ggml_tensor * w, ggml_tensor * cur) const { ggml_tensor * res = ggml_mul_mat(ctx0, w, cur); for (const auto & lora : *loras) { llama_adapter_lora_weight * lw = lora.first->get_weight(w); if (lw == nullptr) { continue; } const float adapter_scale = lora.second; const float scale = lw->get_scale(lora.first->alpha, adapter_scale); ggml_tensor * ab_cur = ggml_mul_mat( ctx0, lw->b, ggml_mul_mat(ctx0, lw->a, cur) ); ab_cur = ggml_scale(ctx0, ab_cur, scale); res = ggml_add(ctx0, res, ab_cur); } return res; } ggml_tensor * llm_graph_context::build_lora_mm_id( ggml_tensor * w, // ggml_tensor * as ggml_tensor * cur, // ggml_tensor * b ggml_tensor * ids) const { ggml_tensor * res = ggml_mul_mat_id(ctx0, w, cur, ids); for (const auto & lora : *loras) { llama_adapter_lora_weight * lw = lora.first->get_weight(w); if (lw == nullptr) { continue; } const float alpha = lora.first->alpha; const float rank = (float) lw->b->ne[0]; const float scale = alpha ? lora.second * alpha / rank : lora.second; ggml_tensor * ab_cur = ggml_mul_mat_id( ctx0, lw->b, ggml_mul_mat_id(ctx0, lw->a, cur, ids), ids ); ab_cur = ggml_scale(ctx0, ab_cur, scale); res = ggml_add(ctx0, res, ab_cur); } return res; } ggml_tensor * llm_graph_context::build_norm( ggml_tensor * cur, ggml_tensor * mw, ggml_tensor * mb, llm_norm_type type, int il) const { switch (type) { case LLM_NORM: cur = ggml_norm (ctx0, cur, hparams.f_norm_eps); break; case LLM_NORM_RMS: cur = ggml_rms_norm(ctx0, cur, hparams.f_norm_rms_eps); break; case LLM_NORM_GROUP: { cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], 1, cur->ne[1]); cur = ggml_group_norm(ctx0, cur, hparams.n_norm_groups, hparams.f_norm_group_eps); cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], cur->ne[2]); } break; } if (mw || mb) { cb(cur, "norm", il); } if (mw) { cur = ggml_mul(ctx0, cur, mw); if (mb) { cb(cur, "norm_w", il); } } if (mb) { cur = ggml_add(ctx0, cur, mb); } return cur; } ggml_tensor * llm_graph_context::build_ffn( ggml_tensor * cur, ggml_tensor * up, ggml_tensor * up_b, ggml_tensor * up_s, ggml_tensor * gate, ggml_tensor * gate_b, ggml_tensor * gate_s, ggml_tensor * down, ggml_tensor * down_b, ggml_tensor * down_s, ggml_tensor * act_scales, llm_ffn_op_type type_op, llm_ffn_gate_type type_gate, int il) const { ggml_tensor * tmp = up ? build_lora_mm(up, cur) : cur; cb(tmp, "ffn_up", il); if (up_b) { tmp = ggml_add(ctx0, tmp, up_b); cb(tmp, "ffn_up_b", il); } if (up_s) { tmp = ggml_mul(ctx0, tmp, up_s); cb(tmp, "ffn_up_s", il); } if (gate) { switch (type_gate) { case LLM_FFN_SEQ: { cur = build_lora_mm(gate, tmp); cb(cur, "ffn_gate", il); } break; case LLM_FFN_PAR: { cur = build_lora_mm(gate, cur); cb(cur, "ffn_gate", il); } break; } if (gate_b) { cur = ggml_add(ctx0, cur, gate_b); cb(cur, "ffn_gate_b", il); } if (gate_s) { cur = ggml_mul(ctx0, cur, gate_s); cb(cur, "ffn_gate_s", il); } } else { cur = tmp; } switch (type_op) { case LLM_FFN_SILU: { cur = ggml_silu(ctx0, cur); cb(cur, "ffn_silu", il); } break; case LLM_FFN_GELU: { cur = ggml_gelu(ctx0, cur); cb(cur, "ffn_gelu", il); if (act_scales != NULL) { cur = ggml_div(ctx0, cur, act_scales); cb(cur, "ffn_act", il); } } break; case LLM_FFN_RELU: { cur = ggml_relu(ctx0, cur); cb(cur, "ffn_relu", il); } break; case LLM_FFN_RELU_SQR: { cur = ggml_relu(ctx0, cur); cb(cur, "ffn_relu", il); cur = ggml_sqr(ctx0, cur); cb(cur, "ffn_sqr(relu)", il); } break; case LLM_FFN_SWIGLU: { // Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf int64_t split_point = cur->ne[0] / 2; ggml_tensor * x0 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], 0)); ggml_tensor * x1 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], split_point * ggml_element_size(cur))); x0 = ggml_silu(ctx0, x0); cb(cur, "ffn_silu", il); cur = ggml_mul(ctx0, x0, x1); cb(cur, "ffn_mul", il); } break; } if (type_gate == LLM_FFN_PAR) { cur = ggml_mul(ctx0, cur, tmp); cb(cur, "ffn_gate_par", il); } if (down) { cur = build_lora_mm(down, cur); } if (down_b) { cb(cur, "ffn_down", il); } if (down_b) { cur = ggml_add(ctx0, cur, down_b); } if (down_s) { cur = ggml_mul(ctx0, cur, down_s); cb(cur, "ffn_down_s", il); } return cur; } ggml_tensor * llm_graph_context::build_moe_ffn( ggml_tensor * cur, ggml_tensor * gate_inp, ggml_tensor * up_exps, ggml_tensor * gate_exps, ggml_tensor * down_exps, ggml_tensor * exp_probs_b, int64_t n_expert, int64_t n_expert_used, llm_ffn_op_type type_op, bool norm_w, bool scale_w, float w_scale, llama_expert_gating_func_type gating_op, int il) const { const int64_t n_embd = cur->ne[0]; const int64_t n_tokens = cur->ne[1]; const bool weight_before_ffn = arch == LLM_ARCH_LLAMA4; // for llama4, we apply the sigmoid-ed weights before the FFN ggml_tensor * logits = build_lora_mm(gate_inp, cur); // [n_expert, n_tokens] cb(logits, "ffn_moe_logits", il); ggml_tensor * probs = nullptr; switch (gating_op) { case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX: { probs = ggml_soft_max(ctx0, logits); // [n_expert, n_tokens] } break; case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID: { probs = ggml_sigmoid(ctx0, logits); // [n_expert, n_tokens] } break; default: GGML_ABORT("fatal error"); } cb(probs, "ffn_moe_probs", il); // add experts selection bias - introduced in DeepSeek V3 // leave probs unbiased as it's later used to get expert weights ggml_tensor * selection_probs = probs; if (exp_probs_b != nullptr) { selection_probs = ggml_add(ctx0, probs, exp_probs_b); cb(selection_probs, "ffn_moe_probs_biased", il); } // llama4 doesn't have exp_probs_b, and sigmoid is only used after top_k // see: https://github.com/meta-llama/llama-models/blob/699a02993512fb36936b1b0741e13c06790bcf98/models/llama4/moe.py#L183-L198 if (arch == LLM_ARCH_LLAMA4) { selection_probs = logits; } // select experts ggml_tensor * selected_experts = ggml_top_k(ctx0, selection_probs, n_expert_used); // [n_expert_used, n_tokens] cb(selected_experts->src[0], "ffn_moe_argsort", il); cb(selected_experts, "ffn_moe_topk", il); ggml_tensor * weights = ggml_get_rows(ctx0, ggml_reshape_3d(ctx0, probs, 1, n_expert, n_tokens), selected_experts); // [1, n_expert_used, n_tokens] cb(weights, "ffn_moe_weights", il); if (norm_w) { weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens); ggml_tensor * weights_sum = ggml_sum_rows(ctx0, weights); // [1, n_tokens] cb(weights_sum, "ffn_moe_weights_sum", il); weights = ggml_div(ctx0, weights, weights_sum); // [n_expert_used, n_tokens] cb(weights, "ffn_moe_weights_norm", il); weights = ggml_reshape_3d(ctx0, weights, 1, n_expert_used, n_tokens); } if (scale_w) { weights = ggml_scale(ctx0, weights, w_scale); cb(weights, "ffn_moe_weights_scaled", il); } cur = ggml_reshape_3d(ctx0, cur, n_embd, 1, n_tokens); if (weight_before_ffn) { // TODO: this is a workaround as we don't yet have a repeat op that takes custom dim (ggml_repeat_4d) ggml_tensor * repeated = ggml_new_tensor_3d(ctx0, cur->type, n_embd, n_expert_used, n_tokens); repeated = ggml_repeat(ctx0, cur, repeated); // [n_embd, n_expert_used, n_tokens] cur = ggml_mul(ctx0, repeated, weights); cb(cur, "ffn_moe_weighted", il); } ggml_tensor * up = build_lora_mm_id(up_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens] cb(up, "ffn_moe_up", il); ggml_tensor * gate = build_lora_mm_id(gate_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens] cb(gate, "ffn_moe_gate", il); switch (type_op) { case LLM_FFN_SILU: { gate = ggml_silu(ctx0, gate); cb(gate, "ffn_moe_silu", il); } break; case LLM_FFN_GELU: { gate = ggml_gelu(ctx0, gate); cb(gate, "ffn_moe_gelu", il); } break; default: GGML_ABORT("fatal error"); } ggml_tensor * par = ggml_mul(ctx0, up, gate); // [n_ff, n_expert_used, n_tokens] cb(par, "ffn_moe_gate_par", il); ggml_tensor * experts = build_lora_mm_id(down_exps, par, selected_experts); // [n_embd, n_expert_used, n_tokens] cb(experts, "ffn_moe_down", il); if (!weight_before_ffn) { experts = ggml_mul(ctx0, experts, weights); cb(cur, "ffn_moe_weighted", il); } // aggregate experts ggml_tensor * moe_out = nullptr; for (int i = 0; i < n_expert_used; ++i) { ggml_tensor * cur_expert = ggml_view_2d(ctx0, experts, n_embd, n_tokens, experts->nb[2], i*experts->nb[1]); if (i == 0) { moe_out = cur_expert; } else { moe_out = ggml_add(ctx0, moe_out, cur_expert); } } if (n_expert_used == 1) { // avoid returning a non-contiguous tensor moe_out = ggml_cont(ctx0, moe_out); } cb(moe_out, "ffn_moe_out", il); return moe_out; } // input embeddings with optional lora ggml_tensor * llm_graph_context::build_inp_embd(ggml_tensor * tok_embd) const { const int64_t n_embd = hparams.n_embd; auto inp = std::make_unique(); ggml_tensor * cur = nullptr; if (ubatch.token) { inp->tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_tokens); //cb(inp->tokens, "inp_tokens", -1); ggml_set_input(inp->tokens); cur = ggml_get_rows(ctx0, tok_embd, inp->tokens); // apply lora for embedding tokens if needed for (const auto & lora : *loras) { llama_adapter_lora_weight * lw = lora.first->get_weight(tok_embd); if (lw == nullptr) { continue; } const float adapter_scale = lora.second; const float scale = lw->get_scale(lora.first->alpha, adapter_scale); ggml_tensor * inpL_delta = ggml_scale(ctx0, ggml_mul_mat( ctx0, lw->b, // non-transposed lora_b ggml_get_rows(ctx0, lw->a, inp->tokens) ), scale); cur = ggml_add(ctx0, cur, inpL_delta); } } else { inp->embd = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, ubatch.n_tokens); ggml_set_input(inp->embd); cur = inp->embd; } // For Granite architecture if (hparams.f_embedding_scale != 0.0f) { cur = ggml_scale(ctx0, cur, hparams.f_embedding_scale); } cb(cur, "inp_embd", -1); res->add_input(std::move(inp)); return cur; } ggml_tensor * llm_graph_context::build_inp_pos() const { auto inp = std::make_unique(n_pos_per_token()); auto & cur = inp->pos; cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens*n_pos_per_token()); ggml_set_input(cur); res->add_input(std::move(inp)); return cur; } ggml_tensor * llm_graph_context::build_inp_attn_scale() const { auto inp = std::make_unique(n_pos_per_token(), hparams.n_attn_temp_floor_scale, hparams.f_attn_temp_scale); auto & cur = inp->attn_scale; cur = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 1, 1, n_tokens*n_pos_per_token()); ggml_set_input(cur); res->add_input(std::move(inp)); return cur; } ggml_tensor * llm_graph_context::build_inp_out_ids() const { auto inp = std::make_unique(hparams, cparams, n_outputs); auto & cur = inp->out_ids; cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_outputs); ggml_set_input(cur); res->add_input(std::move(inp)); return cur; } ggml_tensor * llm_graph_context::build_inp_mean() const { auto inp = std::make_unique(cparams); auto & cur = inp->mean; cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens); ggml_set_input(cur); res->add_input(std::move(inp)); return cur; } ggml_tensor * llm_graph_context::build_inp_cls() const { auto inp = std::make_unique(cparams); auto & cur = inp->cls; cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); ggml_set_input(cur); res->add_input(std::move(inp)); return cur; } ggml_tensor * llm_graph_context::build_inp_s_copy() const { const llama_kv_cache_unified * kv_self = static_cast(memory); auto inp = std::make_unique(kv_self); const auto n_kv = kv_self->n; auto & cur = inp->s_copy; cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_kv); ggml_set_input(cur); res->add_input(std::move(inp)); return cur; } ggml_tensor * llm_graph_context::build_inp_s_mask() const { const llama_kv_cache_unified * kv_self = static_cast(memory); auto inp = std::make_unique(kv_self); const auto n_kv = kv_self->n; auto & cur = inp->s_mask; cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, 1, n_kv); ggml_set_input(cur); res->add_input(std::move(inp)); return cur; } ggml_tensor * llm_graph_context::build_inp_cross_embd() const { auto inp = std::make_unique(cross); auto & cur = inp->cross_embd; // if we have the output embeddings from the encoder, use them directly // TODO: needs more work to be correct, for now just use the tensor shape //if (cross->t_embd) { // cur = ggml_view_tensor(ctx0, cross->t_embd); // return cur; //} const auto n_embd = !cross->v_embd.empty() ? cross->n_embd : hparams.n_embd; const auto n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train; cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_enc); ggml_set_input(cur); res->add_input(std::move(inp)); return cur; } ggml_tensor * llm_graph_context::build_inp_pos_bucket_enc() const { auto inp = std::make_unique(hparams); auto & cur = inp->pos_bucket; cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_tokens, n_tokens); ggml_set_input(cur); res->add_input(std::move(inp)); return cur; } ggml_tensor * llm_graph_context::build_inp_pos_bucket_dec() const { const llama_kv_cache_unified * kv_self = static_cast(memory); auto inp = std::make_unique(hparams, kv_self); const auto n_kv = kv_self->n; auto & cur = inp->pos_bucket; cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_kv, n_tokens); ggml_set_input(cur); res->add_input(std::move(inp)); return cur; } ggml_tensor * llm_graph_context::build_pos_bias(ggml_tensor * pos_bucket, ggml_tensor * attn_rel_b) const { ggml_tensor * pos_bucket_1d = ggml_reshape_1d(ctx0, pos_bucket, pos_bucket->ne[0] * pos_bucket->ne[1]); cb(pos_bucket_1d, "pos_bucket_1d", -1); ggml_tensor * pos_bias = ggml_get_rows(ctx0, attn_rel_b, pos_bucket_1d); pos_bias = ggml_reshape_3d(ctx0, pos_bias, pos_bias->ne[0], pos_bucket->ne[0], pos_bucket->ne[1]); pos_bias = ggml_permute (ctx0, pos_bias, 2, 0, 1, 3); pos_bias = ggml_cont (ctx0, pos_bias); cb(pos_bias, "pos_bias", -1); return pos_bias; } ggml_tensor * llm_graph_context::build_attn_mha( ggml_cgraph * gf, ggml_tensor * q, ggml_tensor * k, ggml_tensor * v, ggml_tensor * kq_b, ggml_tensor * kq_mask, ggml_tensor * v_mla, bool v_trans, float kq_scale) const { //const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); //const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); //const int64_t n_head = hparams.n_head(il); //const int64_t n_head_kv = hparams.n_head_kv(il); //const auto & n_embd_head_k = hparams.n_embd_head_k; //const auto & n_embd_head_v = hparams.n_embd_head_v; const auto n_tokens = q->ne[1]; const auto n_head = q->ne[2]; const auto n_kv = k->ne[1]; ggml_tensor * cur; // TODO: replace hardcoded padding with ggml-provided padding if (cparams.flash_attn && (n_kv % 256 == 0) && kq_b == nullptr) { GGML_ASSERT(kq_b == nullptr && "Flash attention does not support KQ bias yet"); if (v_trans) { v = ggml_transpose(ctx0, v); } // this can happen when KV cache is not used (e.g. an embedding model with non-causal attn) if (k->type == GGML_TYPE_F32) { k = ggml_cast(ctx0, k, GGML_TYPE_F16); } if (v->type == GGML_TYPE_F32) { v = ggml_cast(ctx0, v, GGML_TYPE_F16); } cur = ggml_flash_attn_ext(ctx0, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias, hparams.attn_soft_cap ? hparams.f_attn_logit_softcapping : 0.0f); ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32); if (v_mla) { cur = ggml_reshape_4d(ctx0, cur, v_mla->ne[0], 1, n_head, n_tokens); cur = ggml_mul_mat(ctx0, v_mla, cur); } cur = ggml_reshape_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens); } else { ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); // note: this op tends to require high floating point range // while for some models F16 is enough, for others it is not, so we default to F32 here ggml_mul_mat_set_prec(kq, GGML_PREC_F32); if (arch == LLM_ARCH_GROK) { // need to do the following: // multiply by attn_output_multiplyer of 0.08838834764831845 // and then : // kq = 30 * tanh(kq / 30) // before the softmax below kq = ggml_tanh(ctx0, ggml_scale(ctx0, kq, 0.08838834764831845f/30.0f)); kq = ggml_scale(ctx0, kq, 30); } if (hparams.attn_soft_cap) { kq = ggml_scale(ctx0, kq, 1.0f / hparams.f_attn_logit_softcapping); kq = ggml_tanh (ctx0, kq); kq = ggml_scale(ctx0, kq, hparams.f_attn_logit_softcapping); } if (kq_b) { kq = ggml_add(ctx0, kq, kq_b); } kq = ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, hparams.f_max_alibi_bias); if (!v_trans) { // note: avoid this branch v = ggml_cont(ctx0, ggml_transpose(ctx0, v)); } ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq); // for MLA with the absorption optimization, we need to "decompress" from MQA back to MHA if (v_mla) { kqv = ggml_mul_mat(ctx0, v_mla, kqv); } cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3); cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens); if (!cparams.offload_kqv) { // all nodes between the KV store and the attention output are run on the CPU ggml_backend_sched_set_tensor_backend(sched, cur, backend_cpu); } } ggml_build_forward_expand(gf, cur); return cur; } llm_graph_input_attn_no_cache * llm_graph_context::build_attn_inp_no_cache() const { auto inp = std::make_unique(hparams, cparams); // note: there is no KV cache, so the number of KV values is equal to the number of tokens in the batch inp->kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); //cb(inp_kq_mask, "KQ_mask", -1); ggml_set_input(inp->kq_mask); inp->kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->kq_mask, GGML_TYPE_F16) : inp->kq_mask; return (llm_graph_input_attn_no_cache *) res->add_input(std::move(inp)); } ggml_tensor * llm_graph_context::build_attn( llm_graph_input_attn_no_cache * inp, ggml_cgraph * gf, ggml_tensor * wo, ggml_tensor * wo_b, ggml_tensor * q_cur, ggml_tensor * k_cur, ggml_tensor * v_cur, ggml_tensor * kq_b, ggml_tensor * v_mla, float kq_scale, int il) const { GGML_UNUSED(n_tokens); // these nodes are added to the graph together so that they are not reordered // by doing so, the number of splits in the graph is reduced ggml_build_forward_expand(gf, q_cur); ggml_build_forward_expand(gf, k_cur); ggml_build_forward_expand(gf, v_cur); const auto & kq_mask = inp->get_kq_mask(); ggml_tensor * q = ggml_permute(ctx0, q_cur, 0, 2, 1, 3); //cb(q, "q", il); ggml_tensor * k = ggml_permute(ctx0, k_cur, 0, 2, 1, 3); //cb(k, "k", il); ggml_tensor * v = ggml_permute(ctx0, v_cur, 0, 2, 1, 3); //cb(k, "v", il); ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, false, kq_scale); cb(cur, "kqv_out", il); if (wo) { cur = build_lora_mm(wo, cur); } if (wo_b) { //cb(cur, "kqv_wo", il); } if (wo_b) { cur = ggml_add(ctx0, cur, wo_b); } return cur; } llm_graph_input_attn_kv_unified * llm_graph_context::build_attn_inp_kv_unified() const { const llama_kv_cache_unified * kv_self = static_cast(memory); auto inp = std::make_unique(hparams, cparams, kv_self); const auto n_kv = kv_self->n; inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); //cb(inp->self_kq_mask, "KQ_mask", -1); ggml_set_input(inp->self_kq_mask); inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask; if (hparams.n_swa_pattern > 1) { GGML_ASSERT(hparams.n_swa > 0); inp->self_kq_mask_swa = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); //cb(inp->self_kq_mask_swa, "KQ_mask_swa", -1); ggml_set_input(inp->self_kq_mask_swa); inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa; } return (llm_graph_input_attn_kv_unified *) res->add_input(std::move(inp)); } ggml_tensor * llm_graph_context::build_attn( llm_graph_input_attn_kv_unified * inp, ggml_cgraph * gf, ggml_tensor * wo, ggml_tensor * wo_b, ggml_tensor * q_cur, ggml_tensor * k_cur, ggml_tensor * v_cur, ggml_tensor * kq_b, ggml_tensor * v_mla, float kq_scale, int il) const { // these nodes are added to the graph together so that they are not reordered // by doing so, the number of splits in the graph is reduced ggml_build_forward_expand(gf, q_cur); ggml_build_forward_expand(gf, k_cur); ggml_build_forward_expand(gf, v_cur); const llama_kv_cache_unified * kv_self = static_cast(memory); const auto & n_ctx = cparams.n_ctx; const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); const auto n_tokens = q_cur->ne[2]; const bool v_trans = !cparams.flash_attn; // store to KV cache { GGML_ASSERT(!kv_self->recurrent); const auto kv_head = kv_self->head; GGML_ASSERT(kv_self->size == n_ctx); ggml_tensor * k_cache_view = ggml_view_1d(ctx0, kv_self->k_l[il], n_tokens*n_embd_k_gqa, ggml_row_size(kv_self->k_l[il]->type, n_embd_k_gqa)*kv_head); //cb(k_cache_view, "k_cache_view", il); // note: storing RoPE-ed version of K in the KV cache ggml_build_forward_expand(gf, ggml_cpy(ctx0, k_cur, k_cache_view)); v_cur = ggml_reshape_2d(ctx0, v_cur, n_embd_v_gqa, n_tokens); ggml_tensor * v_cache_view = nullptr; if (!v_trans) { v_cache_view = ggml_view_1d(ctx0, kv_self->v_l[il], n_tokens*n_embd_v_gqa, ggml_row_size(kv_self->v_l[il]->type, n_embd_v_gqa)*kv_head); } else { // note: the V cache is transposed when not using flash attention v_cache_view = ggml_view_2d(ctx0, kv_self->v_l[il], n_tokens, n_embd_v_gqa, ( n_ctx)*ggml_element_size(kv_self->v_l[il]), (kv_head)*ggml_element_size(kv_self->v_l[il])); v_cur = ggml_transpose(ctx0, v_cur); } //cb(v_cache_view, "v_cache_view", il); ggml_build_forward_expand(gf, ggml_cpy(ctx0, v_cur, v_cache_view)); } const bool is_swa = hparams.is_swa(il); const auto & kq_mask = is_swa ? inp->get_kq_mask_swa() : inp->get_kq_mask(); const auto n_kv = kv_self->n; const int64_t n_head_kv = hparams.n_head_kv(il); const auto & n_embd_head_k = hparams.n_embd_head_k; const auto & n_embd_head_v = hparams.n_embd_head_v; ggml_tensor * q = ggml_permute(ctx0, q_cur, 0, 2, 1, 3); //cb(q, "q", il); ggml_tensor * k = ggml_view_3d(ctx0, kv_self->k_l[il], n_embd_head_k, n_kv, n_head_kv, ggml_row_size(kv_self->k_l[il]->type, n_embd_k_gqa), ggml_row_size(kv_self->k_l[il]->type, n_embd_head_k), 0); //cb(k, "k", il); ggml_tensor * v = !v_trans ? ggml_view_3d(ctx0, kv_self->v_l[il], n_embd_head_v, n_kv, n_head_kv, ggml_row_size(kv_self->v_l[il]->type, n_embd_v_gqa), ggml_row_size(kv_self->v_l[il]->type, n_embd_head_v), 0) : ggml_view_3d(ctx0, kv_self->v_l[il], n_kv, n_embd_head_v, n_head_kv, ggml_element_size(kv_self->v_l[il])*n_ctx, ggml_element_size(kv_self->v_l[il])*n_ctx*n_embd_head_v, 0); ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, v_trans, kq_scale); cb(cur, "kqv_out", il); if (wo) { cur = build_lora_mm(wo, cur); } if (wo_b) { //cb(cur, "kqv_wo", il); } if (wo_b) { cur = ggml_add(ctx0, cur, wo_b); } return cur; } llm_graph_input_attn_cross * llm_graph_context::build_attn_inp_cross() const { auto inp = std::make_unique(cross); const int32_t n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train; inp->cross_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_enc, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); ggml_set_input(inp->cross_kq_mask); inp->cross_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->cross_kq_mask, GGML_TYPE_F16) : inp->cross_kq_mask; return (llm_graph_input_attn_cross *) res->add_input(std::move(inp)); } ggml_tensor * llm_graph_context::build_attn( llm_graph_input_attn_cross * inp, ggml_cgraph * gf, ggml_tensor * wo, ggml_tensor * wo_b, ggml_tensor * q_cur, ggml_tensor * k_cur, ggml_tensor * v_cur, ggml_tensor * kq_b, ggml_tensor * v_mla, float kq_scale, int il) const { // these nodes are added to the graph together so that they are not reordered // by doing so, the number of splits in the graph is reduced ggml_build_forward_expand(gf, q_cur); ggml_build_forward_expand(gf, k_cur); ggml_build_forward_expand(gf, v_cur); const auto & kq_mask = inp->get_kq_mask_cross(); ggml_tensor * q = ggml_permute(ctx0, q_cur, 0, 2, 1, 3); //cb(q, "q", il); ggml_tensor * k = ggml_permute(ctx0, k_cur, 0, 2, 1, 3); //cb(k, "k", il); ggml_tensor * v = ggml_permute(ctx0, v_cur, 0, 2, 1, 3); //cb(k, "v", il); ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, false, kq_scale); cb(cur, "kqv_out", il); if (wo) { cur = build_lora_mm(wo, cur); } if (wo_b) { //cb(cur, "kqv_wo", il); } if (wo_b) { cur = ggml_add(ctx0, cur, wo_b); } return cur; } ggml_tensor * llm_graph_context::build_copy_mask_state( ggml_cgraph * gf, ggml_tensor * s, ggml_tensor * state_copy, ggml_tensor * state_mask, int32_t n_state, int32_t n_seqs) const { const llama_kv_cache_unified * kv_self = static_cast(memory); const auto n_kv = kv_self->n; const auto kv_head = kv_self->head; ggml_tensor * states = ggml_reshape_2d(ctx0, s, n_state, kv_self->size); // copy states // NOTE: assuming the copy destinations are ALL contained between kv_head and kv_head + n_kv // this shrinks the tensors's ne[1] to n_kv states = ggml_get_rows(ctx0, states, state_copy); // clear states of sequences which are starting at the beginning of this batch // FIXME: zero-out NANs? states = ggml_mul(ctx0, states, state_mask); // copy states which won't be changed further (between n_seqs and n_kv) ggml_build_forward_expand(gf, ggml_cpy(ctx0, ggml_view_1d(ctx0, states, n_state*(n_kv - n_seqs), (n_seqs )*n_state*ggml_element_size(states)), ggml_view_1d(ctx0, s, n_state*(n_kv - n_seqs), (kv_head + n_seqs)*n_state*ggml_element_size(s)))); // the part of the states that will be used and modified return ggml_view_2d(ctx0, states, n_state, n_seqs, states->nb[1], 0); } ggml_tensor * llm_graph_context::build_rwkv_token_shift_load( ggml_cgraph * gf, ggml_tensor * state_copy, ggml_tensor * state_mask, const llama_ubatch & ubatch, int il) const { const llama_kv_cache_unified * kv_self = static_cast(memory); const auto token_shift_count = hparams.token_shift_count; const int64_t n_seqs = ubatch.n_seqs; ggml_tensor * token_shift_all = kv_self->k_l[il]; ggml_tensor * token_shift = build_copy_mask_state( gf, token_shift_all, state_copy, state_mask, hparams.n_embd_k_s(), n_seqs); token_shift = ggml_reshape_3d(ctx0, token_shift, hparams.n_embd, token_shift_count, n_seqs); return token_shift; } ggml_tensor * llm_graph_context::build_rwkv_token_shift_store( ggml_tensor * token_shift, const llama_ubatch & ubatch, int il) const { const llama_kv_cache_unified * kv_self = static_cast(memory); const auto token_shift_count = hparams.token_shift_count; const auto n_embd = hparams.n_embd; const int64_t n_seqs = ubatch.n_seqs; const auto kv_head = kv_self->head; return ggml_cpy( ctx0, ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * token_shift_count, 0), ggml_view_1d(ctx0, kv_self->k_l[il], hparams.n_embd_k_s() * n_seqs, hparams.n_embd_k_s() * kv_head * ggml_element_size(kv_self->k_l[il])) ); } void llm_graph_context::build_pooling( ggml_cgraph * gf, ggml_tensor * cls, ggml_tensor * cls_b, ggml_tensor * cls_out, ggml_tensor * cls_out_b) const { if (!cparams.embeddings) { return; } ggml_tensor * inp = res->t_embd; //// find result_norm tensor for input //for (int i = ggml_graph_n_nodes(gf) - 1; i >= 0; --i) { // inp = ggml_graph_node(gf, i); // if (strcmp(inp->name, "result_norm") == 0 || strcmp(inp->name, "result_embd") == 0) { // break; // } // inp = nullptr; //} GGML_ASSERT(inp != nullptr && "missing result_norm/result_embd tensor"); ggml_tensor * cur; switch (pooling_type) { case LLAMA_POOLING_TYPE_NONE: { cur = inp; } break; case LLAMA_POOLING_TYPE_MEAN: { ggml_tensor * inp_mean = build_inp_mean(); cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, inp)), inp_mean); } break; case LLAMA_POOLING_TYPE_CLS: case LLAMA_POOLING_TYPE_LAST: { ggml_tensor * inp_cls = build_inp_cls(); cur = ggml_get_rows(ctx0, inp, inp_cls); } break; case LLAMA_POOLING_TYPE_RANK: { ggml_tensor * inp_cls = build_inp_cls(); inp = ggml_get_rows(ctx0, inp, inp_cls); // classification head // https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/roberta/modeling_roberta.py#L1566 GGML_ASSERT(cls != nullptr); GGML_ASSERT(cls_b != nullptr); cur = ggml_add (ctx0, ggml_mul_mat(ctx0, cls, inp), cls_b); cur = ggml_tanh(ctx0, cur); // some models don't have `cls_out`, for example: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en // https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/blob/cb5347e43979c3084a890e3f99491952603ae1b7/modeling_bert.py#L884-L896 if (cls_out) { GGML_ASSERT(cls_out_b != nullptr); cur = ggml_add (ctx0, ggml_mul_mat(ctx0, cls_out, cur), cls_out_b); } } break; default: { GGML_ABORT("unknown pooling type"); } } cb(cur, "result_embd_pooled", -1); res->t_embd_pooled = cur; ggml_build_forward_expand(gf, cur); }