Skip to content

Commit c920f00

Browse files
committed
Add compatibility with ggml-org#801
1 parent 87c518b commit c920f00

File tree

2 files changed

+10
-3
lines changed

2 files changed

+10
-3
lines changed

examples/common.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
145145
break;
146146
}
147147
params.lora_adapter = argv[i];
148+
params.use_mmap = false;
148149
} else if (arg == "-i" || arg == "--interactive") {
149150
params.interactive = true;
150151
} else if (arg == "--embedding") {
@@ -248,7 +249,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
248249
}
249250
fprintf(stderr, " --mtest compute maximum memory usage\n");
250251
fprintf(stderr, " --verbose-prompt print prompt before generation\n");
251-
fprintf(stderr, " --lora FNAME apply LoRA adapter\n");
252+
fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
252253
fprintf(stderr, " -m FNAME, --model FNAME\n");
253254
fprintf(stderr, " model path (default: %s)\n", params.model.c_str());
254255
fprintf(stderr, "\n");

llama.cpp

+8-2
Original file line numberDiff line numberDiff line change
@@ -1808,6 +1808,12 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
18081808
ggml_context* lora_ctx = ggml_init(params);
18091809
std::unordered_map<std::string, struct ggml_tensor *> lora_tensors;
18101810

1811+
// create a name -> tensor map of the model to accelerate lookups
1812+
std::unordered_map<std::string, struct ggml_tensor*> model_tensors;
1813+
for (auto & kv: model.tensors_by_name) {
1814+
model_tensors.insert(kv);
1815+
}
1816+
18111817
fprintf(stderr, "%s: ", __func__);
18121818

18131819
// read tensors and apply
@@ -1847,7 +1853,7 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
18471853
base_name.erase(pos);
18481854
// fprintf(stderr, "%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
18491855

1850-
if (model.tensors.find(base_name.data()) == model.tensors.end()) {
1856+
if (model_tensors.find(base_name.data()) == model_tensors.end()) {
18511857
fprintf(stderr, "%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
18521858
return 1;
18531859
}
@@ -1886,7 +1892,7 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
18861892
if (lora_tensors.find(base_name + ".loraA") != lora_tensors.end() &&
18871893
lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) {
18881894

1889-
ggml_tensor * tensor = model.tensors[base_name];
1895+
ggml_tensor * tensor = model_tensors[base_name];
18901896
ggml_tensor * loraA = lora_tensors[base_name + ".loraA"];
18911897
ggml_tensor * loraB = lora_tensors[base_name + ".loraB"];
18921898

0 commit comments

Comments
 (0)