Skip to content

Commit 0df4d69

Browse files
committed
If lora base is not set avoid re-loading the model by passing NULL
1 parent 95c0dc1 commit 0df4d69

File tree

1 file changed

+10
-11
lines changed

1 file changed

+10
-11
lines changed

llama_cpp/llama.py

+10-11
Original file line numberDiff line numberDiff line change
@@ -112,21 +112,20 @@ def __init__(
112112
self.model_path.encode("utf-8"), self.params
113113
)
114114

115-
self.lora_base = None
116-
self.lora_path = None
117-
if lora_path:
118-
self.lora_base = lora_base
119-
# Use lora_base if set otherwise revert to using model_path.
120-
lora_base = lora_base if lora_base is not None else model_path
121-
122-
self.lora_path = lora_path
115+
self.lora_base = lora_base
116+
self.lora_path = lora_path
117+
if self.lora_path:
123118
if llama_cpp.llama_apply_lora_from_file(
124119
self.ctx,
125-
lora_path.encode("utf-8"),
126-
lora_base.encode("utf-8"),
120+
llama_cpp.c_char_p(self.lora_path.encode("utf-8")),
121+
llama_cpp.c_char_p(self.lora_base.encode("utf-8"))
122+
if self.lora_base is not None
123+
else llama_cpp.c_char_p(0),
127124
llama_cpp.c_int(self.n_threads),
128125
):
129-
raise RuntimeError(f"Failed to apply LoRA from lora path: {lora_path} to base path: {lora_base}")
126+
raise RuntimeError(
127+
f"Failed to apply LoRA from lora path: {self.lora_path} to base path: {self.lora_base}"
128+
)
130129

131130
if self.verbose:
132131
print(llama_cpp.llama_print_system_info().decode("utf-8"), file=sys.stderr)

0 commit comments

Comments
 (0)