Skip to content

Commit 2796f4e

Browse files
committed
Add all missing ggml dependencies
1 parent d1cb50b commit 2796f4e

File tree

1 file changed

+17
-13
lines changed

1 file changed

+17
-13
lines changed

Diff for: CMakeLists.txt

+17-13
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,10 @@ option(LLAMA_BUILD "Build llama.cpp shared library and install alongside python
66
option(LLAVA_BUILD "Build llava shared library and install alongside python package" ON)
77

88
function(llama_cpp_python_install_target target)
9+
if(NOT TARGET ${target})
10+
return()
11+
endif()
12+
913
install(
1014
TARGETS ${target}
1115
LIBRARY DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp/lib
@@ -75,21 +79,21 @@ if (LLAMA_BUILD)
7579
add_subdirectory(vendor/llama.cpp)
7680
llama_cpp_python_install_target(llama)
7781
llama_cpp_python_install_target(ggml)
78-
llama_cpp_python_install_target(ggml-cpu)
79-
llama_cpp_python_install_target(ggml-base)
80-
llama_cpp_python_install_target(ggml-amx)
81-
82-
if (GGML_METAL)
83-
llama_cpp_python_install_target(ggml-metal)
84-
endif()
8582

86-
if (GGML_CUDA)
87-
llama_cpp_python_install_target(ggml-cuda)
88-
endif()
83+
llama_cpp_python_install_target(ggml-base)
8984

90-
if (GGML_VULKAN)
91-
llama_cpp_python_install_target(ggml-vulkan)
92-
endif()
85+
llama_cpp_python_install_target(ggml-amx)
86+
llama_cpp_python_install_target(ggml-blas)
87+
llama_cpp_python_install_target(ggml-can)
88+
llama_cpp_python_install_target(ggml-cpu)
89+
llama_cpp_python_install_target(ggml-cuda)
90+
llama_cpp_python_install_target(ggml-hip)
91+
llama_cpp_python_install_target(ggml-kompute)
92+
llama_cpp_python_install_target(ggml-metal)
93+
llama_cpp_python_install_target(ggml-musa)
94+
llama_cpp_python_install_target(ggml-rpc)
95+
llama_cpp_python_install_target(ggml-sycl)
96+
llama_cpp_python_install_target(ggml-vulkan)
9397

9498
# Workaround for Windows + CUDA https://github.com/abetlen/llama-cpp-python/issues/563
9599
if (WIN32)

0 commit comments

Comments
 (0)