@@ -1807,6 +1807,12 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
1807
1807
ggml_context* lora_ctx = ggml_init (params);
1808
1808
std::unordered_map<std::string, struct ggml_tensor *> lora_tensors;
1809
1809
1810
+ // create a name -> tensor map of the model to accelerate lookups
1811
+ std::unordered_map<std::string, struct ggml_tensor *> model_tensors;
1812
+ for (auto & kv: model.tensors_by_name ) {
1813
+ model_tensors.insert (kv);
1814
+ }
1815
+
1810
1816
fprintf (stderr, " %s: " , __func__);
1811
1817
1812
1818
// read tensors and apply
@@ -1846,7 +1852,7 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
1846
1852
base_name.erase (pos);
1847
1853
// fprintf(stderr, "%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
1848
1854
1849
- if (model. tensors . find (base_name.data ()) == model. tensors .end ()) {
1855
+ if (model_tensors. find (base_name.data ()) == model_tensors .end ()) {
1850
1856
fprintf (stderr, " %s: unknown tensor '%s' in lora adapter\n " , __func__, name.data ());
1851
1857
return 1 ;
1852
1858
}
@@ -1885,7 +1891,7 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
1885
1891
if (lora_tensors.find (base_name + " .loraA" ) != lora_tensors.end () &&
1886
1892
lora_tensors.find (base_name + " .loraB" ) != lora_tensors.end ()) {
1887
1893
1888
- ggml_tensor * tensor = model. tensors [base_name];
1894
+ ggml_tensor * tensor = model_tensors [base_name];
1889
1895
ggml_tensor * loraA = lora_tensors[base_name + " .loraA" ];
1890
1896
ggml_tensor * loraB = lora_tensors[base_name + " .loraB" ];
1891
1897
0 commit comments