mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2024-12-18 20:27:53 +00:00
ggml : disable CUDA graphs for non-llama.cpp projects
This commit is contained in:
parent
dc8cc2dd6f
commit
ceb77363cd
@ -295,7 +295,16 @@ if (GGML_CUDA)
|
||||
|
||||
list(APPEND GGML_CDEF_PUBLIC GGML_USE_CUDA)
|
||||
|
||||
# TODO: for now CUDA graphs should be used only with llama.cpp
|
||||
# https://github.com/ggerganov/whisper.cpp/issues/2258
|
||||
message(STATUS "CMAKE_PROJECT_NAME: ${CMAKE_PROJECT_NAME}")
|
||||
if (CMAKE_PROJECT_NAME STREQUAL "llama.cpp")
|
||||
add_compile_definitions(GGML_CUDA_USE_GRAPHS)
|
||||
message(STATUS "GGML_CUDA_USE_GRAPHS enabled")
|
||||
else()
|
||||
message(STATUS "GGML_CUDA_USE_GRAPHS disabled")
|
||||
endif()
|
||||
|
||||
add_compile_definitions(GGML_CUDA_DMMV_X=${GGML_CUDA_DMMV_X})
|
||||
add_compile_definitions(GGML_CUDA_MMV_Y=${GGML_CUDA_MMV_Y})
|
||||
add_compile_definitions(K_QUANTS_PER_ITERATION=${GGML_CUDA_KQUANTS_ITER})
|
||||
|
Loading…
Reference in New Issue
Block a user