.. |
acc.cu
|
|
|
acc.cuh
|
|
|
arange.cu
|
|
|
arange.cuh
|
|
|
argsort.cu
|
ggml : mul_mat_id use the same tensor for all the experts (llama/6387)
|
2024-04-07 16:15:57 +03:00 |
argsort.cuh
|
|
|
binbcast.cu
|
ggml : group all experts in a single ggml_mul_mat_id (llama/6505)
|
2024-05-13 11:02:26 +03:00 |
binbcast.cuh
|
|
|
clamp.cu
|
Introduction of CUDA Graphs to LLama.cpp (llama/6766)
|
2024-05-13 11:02:26 +03:00 |
clamp.cuh
|
|
|
common.cuh
|
CUDA: deduplicate FlashAttention code (llama/7352)
|
2024-06-16 18:19:48 +03:00 |
concat.cu
|
|
|
concat.cuh
|
|
|
convert.cu
|
Introduction of CUDA Graphs to LLama.cpp (llama/6766)
|
2024-05-13 11:02:26 +03:00 |
convert.cuh
|
llama : add Command R Plus support (llama/6491)
|
2024-04-09 20:26:18 +03:00 |
cpy.cu
|
Introduction of CUDA Graphs to LLama.cpp (llama/6766)
|
2024-05-13 11:02:26 +03:00 |
cpy.cuh
|
Introduction of CUDA Graphs to LLama.cpp (llama/6766)
|
2024-05-13 11:02:26 +03:00 |
dequantize.cuh
|
llama : add Command R Plus support (llama/6491)
|
2024-04-09 20:26:18 +03:00 |
diagmask.cu
|
|
|
diagmask.cuh
|
|
|
dmmv.cu
|
llama : add Command R Plus support (llama/6491)
|
2024-04-09 20:26:18 +03:00 |
dmmv.cuh
|
sync : llama.cpp (skip)
|
2024-04-07 16:15:57 +03:00 |
fattn-common.cuh
|
CUDA: deduplicate FlashAttention code (llama/7352)
|
2024-06-16 18:19:48 +03:00 |
fattn-tile-f16.cu
|
CUDA: deduplicate FlashAttention code (llama/7352)
|
2024-06-16 18:19:48 +03:00 |
fattn-tile-f16.cuh
|
CUDA: faster large batch FA without tensor cores (llama/7314)
|
2024-06-16 18:19:48 +03:00 |
fattn-tile-f32.cu
|
CUDA: deduplicate FlashAttention code (llama/7352)
|
2024-06-16 18:19:48 +03:00 |
fattn-tile-f32.cuh
|
CUDA: faster large batch FA without tensor cores (llama/7314)
|
2024-06-16 18:19:48 +03:00 |
fattn-vec-f16.cu
|
CUDA: add FP32 FlashAttention vector kernel (llama/7188)
|
2024-05-14 19:16:29 +03:00 |
fattn-vec-f16.cuh
|
CUDA: add FP32 FlashAttention vector kernel (llama/7188)
|
2024-05-14 19:16:29 +03:00 |
fattn-vec-f32.cu
|
CUDA: add FP32 FlashAttention vector kernel (llama/7188)
|
2024-05-14 19:16:29 +03:00 |
fattn-vec-f32.cuh
|
CUDA: add FP32 FlashAttention vector kernel (llama/7188)
|
2024-05-14 19:16:29 +03:00 |
fattn.cu
|
CUDA: deduplicate FlashAttention code (llama/7352)
|
2024-06-16 18:19:48 +03:00 |
fattn.cuh
|
ggml : add Flash Attention (llama/5021)
|
2024-05-13 11:02:26 +03:00 |
getrows.cu
|
|
|
getrows.cuh
|
|
|
im2col.cu
|
|
|
im2col.cuh
|
|
|
mmq.cu
|
CUDA: fix unused warning in mmq.cu (llama/7442)
|
2024-06-16 18:19:48 +03:00 |
mmq.cuh
|
|
|
mmvq.cu
|
cuda : fix bounds check for src0 rows in MMVQ kernel (#2231)
|
2024-06-11 17:39:01 +03:00 |
mmvq.cuh
|
|
|
norm.cu
|
|
|
norm.cuh
|
|
|
pad.cu
|
|
|
pad.cuh
|
|
|
pool2d.cu
|
|
|
pool2d.cuh
|
|
|
quantize.cu
|
llama : add Command R Plus support (llama/6491)
|
2024-04-09 20:26:18 +03:00 |
quantize.cuh
|
llama : add Command R Plus support (llama/6491)
|
2024-04-09 20:26:18 +03:00 |
rope.cu
|
|
|
rope.cuh
|
|
|
scale.cu
|
Introduction of CUDA Graphs to LLama.cpp (llama/6766)
|
2024-05-13 11:02:26 +03:00 |
scale.cuh
|
|
|
softmax.cu
|
CUDA: deduplicate FlashAttention code (llama/7352)
|
2024-06-16 18:19:48 +03:00 |
softmax.cuh
|
|
|
sumrows.cu
|
|
|
sumrows.cuh
|
|
|
tsembd.cu
|
|
|
tsembd.cuh
|
|
|
unary.cu
|
feat: implemented sigmoid function (ggml/806)
|
2024-05-13 11:02:26 +03:00 |
unary.cuh
|
feat: implemented sigmoid function (ggml/806)
|
2024-05-13 11:02:26 +03:00 |
upscale.cu
|
ggml : add ggml_upscale_ext (ggml/814)
|
2024-06-16 18:19:48 +03:00 |
upscale.cuh
|
|
|
vecdotq.cuh
|
|
|