Skip to content

Commit cb09488

Browse files
slarenhodlen
authored andcommitted
fallback to CPU buffer if host buffer alloc fails (ggml-org#4610)
1 parent 82e5e6a commit cb09488

File tree

2 files changed

+17
-10
lines changed

2 files changed

+17
-10
lines changed

ggml-cuda.cu

+6-5
Original file line numberDiff line numberDiff line change
@@ -6729,8 +6729,7 @@ void * ggml_cuda_host_malloc(size_t size) {
67296729
void * ptr = nullptr;
67306730
cudaError_t err = cudaMallocHost((void **) &ptr, size);
67316731
if (err != cudaSuccess) {
6732-
// The allocation error can be bypassed. A null ptr will assigned out of this function.
6733-
// This can fixed the OOM error in WSL.
6732+
// clear the error
67346733
cudaGetLastError();
67356734
fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory: %s\n",
67366735
size/1024.0/1024.0, cudaGetErrorString(err));
@@ -9674,12 +9673,14 @@ ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device) {
96749673
// host buffer type
96759674

96769675
static void ggml_backend_cuda_host_buffer_free_buffer(ggml_backend_buffer_t buffer) {
9677-
CUDA_CHECK(cudaFreeHost(buffer->context));
9676+
ggml_cuda_host_free(buffer->context);
96789677
}
96799678

96809679
static ggml_backend_buffer_t ggml_backend_cuda_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
9681-
void * ptr;
9682-
CUDA_CHECK(cudaMallocHost(&ptr, size));
9680+
void * ptr = ggml_cuda_host_malloc(size);
9681+
if (ptr == nullptr) {
9682+
return nullptr;
9683+
}
96839684

96849685
// FIXME: this is a hack to avoid having to implement a new buffer type
96859686
ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);

llama.cpp

+11-5
Original file line numberDiff line numberDiff line change
@@ -1177,21 +1177,27 @@ static std::string llama_token_to_piece(const struct llama_context * ctx, llama_
11771177
}
11781178

11791179
static ggml_backend_buffer_type_t llama_default_buffer_type(int n_gpu_layers) {
1180+
ggml_backend_buffer_type_t buft = nullptr;
1181+
11801182
#ifdef GGML_USE_METAL
11811183
if (n_gpu_layers > 0) {
1182-
return ggml_backend_metal_buffer_type();
1184+
buft = ggml_backend_metal_buffer_type();
11831185
}
11841186
#elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST)
11851187
if (n_gpu_layers > 0) {
1186-
return ggml_backend_cuda_buffer_type(0);
1188+
buft = ggml_backend_cuda_buffer_type(0);
11871189
}
11881190
#elif defined(GGML_USE_CUBLAS)
1189-
return ggml_backend_cuda_host_buffer_type();
1191+
buft = ggml_backend_cuda_host_buffer_type();
11901192
#elif defined(GGML_USE_CPU_HBM)
1191-
return ggml_backend_cpu_hbm_buffer_type();
1193+
buft = ggml_backend_cpu_hbm_buffer_type();
11921194
#endif
11931195

1194-
return ggml_backend_cpu_buffer_type();
1196+
if (buft == nullptr) {
1197+
buft = ggml_backend_cpu_buffer_type();
1198+
}
1199+
1200+
return buft;
11951201

11961202
GGML_UNUSED(n_gpu_layers);
11971203
}

0 commit comments

Comments
 (0)