Skip to content

Commit 074bd14

Browse files
committed
update
1 parent 3a54c1f commit 074bd14

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

llama.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -2683,8 +2683,8 @@ static void llm_load_tensors(
26832683

26842684
// output
26852685
{
2686-
ggml_backend backend_norm;
2687-
ggml_backend backend_output;
2686+
ggml_backend_type backend_norm;
2687+
ggml_backend_type backend_output;
26882688

26892689
if (n_gpu_layers > int(n_layer)) {
26902690
// norm is not performance relevant on its own but keeping it in VRAM reduces data copying
@@ -2719,8 +2719,8 @@ static void llm_load_tensors(
27192719
model.layers.resize(n_layer);
27202720

27212721
for (uint32_t i = 0; i < n_layer; ++i) {
2722-
const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
2723-
const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
2722+
const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
2723+
const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
27242724

27252725
auto & layer = model.layers[i];
27262726

0 commit comments

Comments
 (0)