Skip to content

Commit 4d25b26

Browse files
committed
Add compatibility with ggml-org#801
1 parent cd5b2f9 commit 4d25b26

File tree

2 files changed

+10
-3
lines changed

2 files changed

+10
-3
lines changed

examples/common.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
145145
break;
146146
}
147147
params.lora_adapter = argv[i];
148+
params.use_mmap = false;
148149
} else if (arg == "-i" || arg == "--interactive") {
149150
params.interactive = true;
150151
} else if (arg == "--embedding") {
@@ -248,7 +249,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
248249
}
249250
fprintf(stderr, " --mtest compute maximum memory usage\n");
250251
fprintf(stderr, " --verbose-prompt print prompt before generation\n");
251-
fprintf(stderr, " --lora FNAME apply LoRA adapter\n");
252+
fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
252253
fprintf(stderr, " -m FNAME, --model FNAME\n");
253254
fprintf(stderr, " model path (default: %s)\n", params.model.c_str());
254255
fprintf(stderr, "\n");

llama.cpp

+8-2
Original file line numberDiff line numberDiff line change
@@ -1807,6 +1807,12 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
18071807
ggml_context* lora_ctx = ggml_init(params);
18081808
std::unordered_map<std::string, struct ggml_tensor *> lora_tensors;
18091809

1810+
// create a name -> tensor map of the model to accelerate lookups
1811+
std::unordered_map<std::string, struct ggml_tensor*> model_tensors;
1812+
for (auto & kv: model.tensors_by_name) {
1813+
model_tensors.insert(kv);
1814+
}
1815+
18101816
fprintf(stderr, "%s: ", __func__);
18111817

18121818
// read tensors and apply
@@ -1846,7 +1852,7 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
18461852
base_name.erase(pos);
18471853
// fprintf(stderr, "%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
18481854

1849-
if (model.tensors.find(base_name.data()) == model.tensors.end()) {
1855+
if (model_tensors.find(base_name.data()) == model_tensors.end()) {
18501856
fprintf(stderr, "%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
18511857
return 1;
18521858
}
@@ -1885,7 +1891,7 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
18851891
if (lora_tensors.find(base_name + ".loraA") != lora_tensors.end() &&
18861892
lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) {
18871893

1888-
ggml_tensor * tensor = model.tensors[base_name];
1894+
ggml_tensor * tensor = model_tensors[base_name];
18891895
ggml_tensor * loraA = lora_tensors[base_name + ".loraA"];
18901896
ggml_tensor * loraB = lora_tensors[base_name + ".loraB"];
18911897

0 commit comments

Comments
 (0)