Skip to content

Commit 87c518b

Browse files
committed
Update exporter and support scaling
1 parent dc65707 commit 87c518b

File tree

2 files changed

+92
-31
lines changed

2 files changed

+92
-31
lines changed

convert-lora-to-ggml.py

+75-25
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import json
12
import os
23
import re
34
import struct
@@ -14,15 +15,18 @@
1415
class UnquantizedDataType:
1516
name: str
1617

17-
DT_F16 = UnquantizedDataType('F16')
18-
DT_F32 = UnquantizedDataType('F32')
18+
19+
DT_F16 = UnquantizedDataType("F16")
20+
DT_F32 = UnquantizedDataType("F32")
21+
1922

2023
@dataclass(frozen=True)
2124
class QuantizedDataType:
2225
groupsize: int
2326
have_addends: bool
2427
have_g_idx: bool
2528

29+
2630
DataType = UnquantizedDataType
2731

2832
DATA_TYPE_TO_FTYPE: dict[DataType, int] = {
@@ -35,17 +39,28 @@ class QuantizedDataType:
3539
DT_F32: np.dtype(np.float32),
3640
}
3741

38-
NUMPY_TYPE_TO_DATA_TYPE: dict[np.dtype[Any], DataType] = {dtype: data_type for (data_type, dtype) in DATA_TYPE_TO_NUMPY.items()}
42+
NUMPY_TYPE_TO_DATA_TYPE: dict[np.dtype[Any], DataType] = {
43+
dtype: data_type for (data_type, dtype) in DATA_TYPE_TO_NUMPY.items()
44+
}
3945

4046
HF_SUBLAYER_TO_GGML = {
4147
"self_attn.q_proj": "attention.wq.weight",
4248
"self_attn.k_proj": "attention.wk.weight",
4349
"self_attn.v_proj": "attention.wv.weight",
4450
"self_attn.o_proj": "attention.wo.weight",
51+
# "embed_tokens.weight": "tok_embeddings.weight",
52+
# "norm.weight": "norm.weight",
53+
# "lm_head.weight": "output.weight",
54+
# "mlp.gate_proj": "feed_forward.w1.weight",
55+
# "mlp.down_proj": "feed_forward.w2.weight",
56+
# "mlp.up_proj": "feed_forward.w3.weight",
57+
# "input_layernorm": "attention_norm.weight",
58+
# "post_attention_layernorm": "ffn_norm.weight",
4559
}
4660

61+
4762
def translate_tensor_name(t):
48-
match = re.match(r'.*layers\.(\d+)\.(\w+\.\w+)\.lora_(A|B)\.weight', t)
63+
match = re.match(r".*layers\.(\d+)\.(\w+\.\w+)\.lora_(A|B)\.weight", t)
4964
if match:
5065
nn = match.group(1)
5166
sub_layer = match.group(2)
@@ -54,50 +69,85 @@ def translate_tensor_name(t):
5469
sub_layer_renamed = HF_SUBLAYER_TO_GGML.get(sub_layer)
5570
if sub_layer_renamed is None:
5671
print(f"Error: unrecognized sub-layer {sub_layer} in tensor {t}")
57-
exit(1)
72+
sys.exit(1)
5873

5974
output_string = f"layers.{nn}.{HF_SUBLAYER_TO_GGML[sub_layer]}.lora{lora_type}"
6075
return output_string
6176
else:
6277
print(f"Error: unrecognized tensor {t}")
63-
exit(1)
78+
sys.exit(1)
6479

65-
def write_file_header(fout):
66-
fout.write(b"ggla"[::-1]) # magic (ggml lora)
67-
fout.write(struct.pack("i", 1)) # file version
80+
81+
def write_file_header(fout, params):
82+
fout.write(b"ggla"[::-1]) # magic (ggml lora)
83+
fout.write(struct.pack("i", 1)) # file version
84+
fout.write(struct.pack("ii", params["r"], params["lora_alpha"]))
6885

6986

7087
def write_tensor_header(self, name: str, shape: Sequence[int], data_type: 1) -> None:
71-
sname = name.encode('utf-8')
72-
fout.write(struct.pack("iii", len(shape), len(sname), DATA_TYPE_TO_FTYPE[NUMPY_TYPE_TO_DATA_TYPE[data_type]]))
88+
sname = name.encode("utf-8")
89+
fout.write(
90+
struct.pack(
91+
"iii",
92+
len(shape),
93+
len(sname),
94+
DATA_TYPE_TO_FTYPE[NUMPY_TYPE_TO_DATA_TYPE[data_type]],
95+
)
96+
)
7397
fout.write(struct.pack("i" * len(shape), *shape[::-1]))
7498
fout.write(sname)
7599
fout.seek((fout.tell() + 31) & -32)
76-
77100

78-
if len(sys.argv) < 2:
79-
print(f"Usage: python {sys.argv[0]} adapter_model.bin [ggml_adapter_model.bin]")
101+
102+
if len(sys.argv) != 2:
103+
print(f"Usage: python {sys.argv[0]} <path>")
104+
print(
105+
"Path must contain HuggingFace PEFT LoRA files 'adapter_config.json' and 'adapter_model.bin'"
106+
)
80107
sys.exit(1)
81108

82-
input_path = sys.argv[1]
83-
if len(sys.argv) > 2:
84-
output_path = sys.argv[2]
85-
else:
86-
output_filename = f"ggml_{os.path.basename(input_path)}"
87-
output_path = os.path.join(os.path.dirname(input_path), output_filename)
109+
input_json = os.path.join(sys.argv[1], "adapter_config.json")
110+
input_model = os.path.join(sys.argv[1], "adapter_model.bin")
111+
output_path = os.path.join(sys.argv[1], "ggml-adapter-model.bin")
88112

89-
model = torch.load(input_path, map_location="cpu")
113+
model = torch.load(input_model, map_location="cpu")
114+
115+
with open(input_json, "r") as f:
116+
params = json.load(f)
117+
118+
if params["peft_type"] != "LORA":
119+
print(f"Error: unsupported adapter type {params['peft_type']}, expected LORA")
120+
sys.exit(1)
121+
122+
if params["fan_in_fan_out"] == True:
123+
print("Error: param fan_in_fan_out is not supported")
124+
sys.exit(1)
125+
126+
if params["bias"] is not None and params["bias"] != "none":
127+
print("Error: param bias is not supported")
128+
sys.exit(1)
129+
130+
# TODO: these seem to be layers that have been trained but without lora.
131+
# doesn't seem widely used but eventually should be supported
132+
if params["modules_to_save"] is not None and len(params["modules_to_save"]) > 0:
133+
print("Error: param modules_to_save is not supported")
134+
sys.exit(1)
90135

91136
with open(output_path, "wb") as fout:
92-
write_file_header(fout)
137+
fout.truncate()
138+
139+
write_file_header(fout, params)
93140
for k, v in model.items():
94141
# since ggml doesn't always support other types for the second operand,
95142
# the tensors are always converted and exported as f32
96-
t = v.float().numpy()
143+
v = v.float()
144+
t = v.numpy()
97145
if "lora_A" in k:
98146
t = t.T
99-
print(f"{k} => {translate_tensor_name(k)} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB")
147+
print(
148+
f"{k} => {translate_tensor_name(k)} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB"
149+
)
100150
write_tensor_header(fout, translate_tensor_name(k), t.shape, t.dtype)
101151
t.tofile(fout)
102152

103-
print(f"Converted {input_path} to {output_path}")
153+
print(f"Converted {input_json} and {input_model} to {output_path}")

llama.cpp

+17-6
Original file line numberDiff line numberDiff line change
@@ -1789,6 +1789,15 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
17891789
}
17901790
}
17911791

1792+
int32_t lora_r;
1793+
int32_t lora_alpha;
1794+
fin.read((char *) &lora_r, sizeof(lora_r));
1795+
fin.read((char *) &lora_alpha, sizeof(lora_alpha));
1796+
float scaling = (float)lora_alpha / (float)lora_r;
1797+
1798+
fprintf(stderr, "%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
1799+
1800+
17921801
// create a temporary ggml context to store the lora tensors
17931802
std::vector<uint8_t> buf(1024 * 1024 * 100);
17941803
struct ggml_init_params params;
@@ -1890,19 +1899,21 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
18901899
// w = w + BA*s
18911900
ggml_tensor * BA = ggml_mul_mat(lora_ctx, loraB, loraA);
18921901

1893-
//if (true) {
1894-
// ggml_tensor * scale_tensor = ggml_new_f32(lora_ctx, 1.0f);
1895-
// BA = ggml_scale(lora_ctx, BA, scale_tensor);
1896-
//}
1897-
ggml_tensor * r = ggml_add(lora_ctx, tensor, BA);
1902+
if (scaling != 1.0f) {
1903+
ggml_tensor * scale_tensor = ggml_new_f32(lora_ctx, scaling);
1904+
BA = ggml_scale(lora_ctx, BA, scale_tensor);
1905+
}
1906+
1907+
ggml_tensor * r = ggml_add_inplace(lora_ctx, tensor, BA);
1908+
//ggml_tensor * r = ggml_add(lora_ctx, tensor, BA);
18981909
//r = ggml_cpy(lora_ctx, r, tensor);
18991910

19001911
struct ggml_cgraph gf = ggml_build_forward(r);
19011912
gf.n_threads = n_threads;
19021913
ggml_graph_compute(lora_ctx, &gf);
19031914

19041915
// hack until ggml_cpy supports quantized tensors
1905-
memcpy(tensor->data, r->data, ggml_nbytes(tensor));
1916+
// memcpy(tensor->data, r->data, ggml_nbytes(tensor));
19061917

19071918
// we won't need these tensors again, reset the context to save memory
19081919
ggml_free(lora_ctx);

0 commit comments

Comments
 (0)