We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 05c2f76 commit 79a37ecCopy full SHA for 79a37ec
llama.cpp
@@ -715,8 +715,12 @@ struct llama_model_loader {
715
load_data_for(lt);
716
switch(lt.ggml_tensor->backend) {
717
case GGML_BACKEND_CPU:
718
- lt.ggml_tensor->data = lt.data;
719
- break;
+ lt.ggml_tensor->data = lt.data;
+ if (use_mmap && lmlock)
720
+ {
721
+ lmlock->grow_to(done_size);
722
+ }
723
+ break;
724
#ifdef GGML_USE_CUBLAS
725
case GGML_BACKEND_CUDA:
726
ggml_cuda_load_data(lt.data, lt.ggml_tensor);
@@ -731,9 +735,6 @@ struct llama_model_loader {
731
735
continue;
732
736
}
733
737
done_size += lt.size;
734
- if (use_mmap && lmlock) {
- lmlock->grow_to(done_size);
- }
738
739
740
0 commit comments