From 8afc1ef312f2c6a9b490879ffdc877c6e5d05e22 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 20 Aug 2023 09:34:48 -0600 Subject: [PATCH 01/11] First pass at converting GGMLv3 LLaMA models to GGUF --- convert-llama-ggmlv3-to-gguf.py | 223 ++++++++++++++++++++++++++++++++ gguf.py | 52 ++++++-- 2 files changed, 263 insertions(+), 12 deletions(-) create mode 100644 convert-llama-ggmlv3-to-gguf.py diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py new file mode 100644 index 0000000000000..51bf44f0743a4 --- /dev/null +++ b/convert-llama-ggmlv3-to-gguf.py @@ -0,0 +1,223 @@ +import sys, struct, math, argparse + +import numpy as np + +import gguf + +# Note: Does not support GGML_QKK_64 +QK_K = 256 +# Items here are (block size, type size) +GGML_QUANT_SIZES = { + gguf.GGMLQuantizationType.F32 : (1, 4), + gguf.GGMLQuantizationType.F16 : (1, 2), + gguf.GGMLQuantizationType.Q4_0 : (32, 2 + 16), + gguf.GGMLQuantizationType.Q4_1 : (32, 2 + 2 + 16), + gguf.GGMLQuantizationType.Q5_0 : (32, 2 + 4 + 16), + gguf.GGMLQuantizationType.Q5_1 : (32, 2 + 2 + 4 + 16), + gguf.GGMLQuantizationType.Q8_0 : (32, 2 + 32), + gguf.GGMLQuantizationType.Q8_1 : (32, 4 + 4 + 32), + gguf.GGMLQuantizationType.Q2_K : (256, 2 + 2 + QK_K // 16 + QK_K // 4), + gguf.GGMLQuantizationType.Q3_K : (256, 2 + QK_K // 4 + QK_K // 8 + 12), + gguf.GGMLQuantizationType.Q4_K : (256, 2 + 2 + QK_K // 2 + 12), + gguf.GGMLQuantizationType.Q5_K : (256, 2 + 2 + QK_K // 2 + QK_K // 8 + 12), + gguf.GGMLQuantizationType.Q6_K : (256, 2 + QK_K // 2 + QK_K // 4 + QK_K // 16), + gguf.GGMLQuantizationType.Q8_K : (256, 2 + QK_K + QK_K // 8), +} + +class Hyperparameters: + def __init__(self): + self.n_vocab = self.n_embd = self.n_mult = self.n_head = self.n_layer = self.n_rot = self.ftype = 0 + + def load(self, data, offset): + ( + self.n_vocab, + self.n_embd, + self.n_mult, + self.n_head, + self.n_layer, + self.n_rot, + self.ftype, + ) = struct.unpack('<7I', data[offset:offset + (4 * 7)]) + return 4 * 7 + + def __str__(self): + return f'' + +class Vocab: + def __init__(self): + self.items = [] + + def load(self, data, offset, n_vocab): + orig_offset = offset + for _ in range(n_vocab): + itemlen = struct.unpack('= 0 and n_dims <= 4, f'Invalid tensor dimensions {n_dims}' + assert name_len < 4096, 'Absurd tensor name length' + quant = GGML_QUANT_SIZES.get(dtype) + assert quant is not None, 'Unknown tensor type' + (blksize, tysize) = quant + offset += 12 + self.dtype= dtype + self.dims = struct.unpack(f'<{n_dims}I', data[offset:offset + (4 * n_dims)]) + offset += 4 * n_dims + self.name = bytes(data[offset:offset + name_len]) + offset += name_len + pad = ((offset + 31) & ~31) - offset + offset += pad + n_elems = np.prod(self.dims) + n_bytes = (n_elems * tysize) // blksize + self.start_offset = offset + self.len_bytes = n_bytes + offset += n_bytes + # print(n_dims, name_len, dtype, self.dims, self.name, pad) + return offset - orig_offset + +class GGMLV3Model: + def __init__(self): + self.hyperparameters = None + self.vocab = None + self.tensor_map = {} + self.tensors = [] + + def validate_header(self, data, offset): + if bytes(data[offset:offset + 4]) != b'tjgg' or struct.unpack(' 1 and vbytes[0] == 32: + vbytes = vbytes.replace(b' ', b'\xe2\x96\x81') + tt = 1 + if len(vbytes) == 0: + tt = 3 + elif tokid >= 3 and tokid <= 258 and len(vbytes) == 1: + hv = hex(vbytes[0])[2:].upper() + vbytes = bytes(f'<0x{hv}>', encoding = 'UTF-8') + tt = 6 + toktypes.append(tt) + tokens.append(vbytes) + scores.append(vscore) + gguf_writer.add_token_list(tokens) + gguf_writer.add_token_scores(scores) + gguf_writer.add_token_types(toktypes) + print('* Adding tensors') + for tensor in ggml_model.tensors: + name = str(tensor.name, 'UTF-8') + if name.endswith('.weight'): + name = name[:-7] + suffix = '.weight' + elif name.endswith('.bias'): + name = name[:-5] + suffix = '.bias' + mapped_name = nm.get(name) + assert mapped_name is not None, f'Bad name {name}' + mapped_name += suffix + tempdims = list(tensor.dims[:]) + if len(tempdims) > 1: + temp = tempdims[1] + tempdims[1] = tempdims[0] + tempdims[0] = temp + print(f'+ {tensor.name} | {mapped_name} {tensor.dims} :: {tempdims}') + gguf_writer.add_tensor(mapped_name, data[tensor.start_offset:tensor.start_offset + tensor.len_bytes], raw_shape = tempdims, raw_dtype = tensor.dtype) + print("gguf: write header") + gguf_writer.write_header_to_file() + print("gguf: write metadata") + gguf_writer.write_kv_data_to_file() + print("gguf: write tensors") + gguf_writer.write_tensors_to_file() + + gguf_writer.close() + +def handle_args(): + parser = argparse.ArgumentParser(description = 'Convert GGMLv3 models to GGUF') + parser.add_argument('--input', '-i', help = 'Input GGMLv3 filename') + parser.add_argument('--output', '-o', help ='Output GGUF filename') + parser.add_argument('--gqa', type = int, default = 1, help = 'grouped-query attention factor (use 8 for LLaMA2 70B)') + parser.add_argument('--eps', default = '5.0e-06', help = 'RMS norm eps (use 1e-5 for LLaMA2)') + parser.add_argument('--context-length', '-c', type=int, default = 2048, help = 'Default max context length') + return parser.parse_args() + +def main(): + cfg = handle_args() + data = np.memmap(cfg.input, mode = 'r') + model = GGMLV3Model() + offset = model.load(data, 0) + print(model.hyperparameters) + # print(model.vocab.items) + # return + save_gguf(model, data, cfg) + +main() diff --git a/gguf.py b/gguf.py index e5eb85ded3f59..5c82414fa710f 100644 --- a/gguf.py +++ b/gguf.py @@ -5,7 +5,7 @@ import numpy as np from enum import IntEnum, auto -from typing import Any, IO, List +from typing import Any, IO, List, Optional # # constants @@ -325,8 +325,20 @@ def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> dict: class GGMLQuantizationType(IntEnum): - F32 = 0 - F16 = 1 + F32 = 0 + F16 = 1 + Q4_0 = 2 + Q4_1 = 3 + Q5_0 = 6 + Q5_1 = 7 + Q8_0 = 8 + Q8_1 = 9 + Q2_K = 10 + Q3_K = 11 + Q4_K = 12 + Q5_K = 13 + Q6_K = 14 + Q8_K = 15 class GGUFValueType(IntEnum): @@ -359,7 +371,7 @@ def get_type(val): class GGUFWriter: - def __init__(self, path: str, arch: str): + def __init__(self, path: str, arch: str, use_temp_file = True): self.fout = open(path, "wb") self.arch = arch self.offset_tensor = 0 @@ -369,6 +381,8 @@ def __init__(self, path: str, arch: str): self.ti_data = b"" self.ti_data_count = 0 self.add_architecture() + self.use_temp_file = use_temp_file + self.tensors = [] def write_header_to_file(self): self.fout.write(struct.pack(" int: return ((x + n - 1) // n) * n - def add_tensor_info(self, name: str, tensor_shape: np.ndarray, tensor_dtype: np.dtype, tensor_nbytes: int): - assert tensor_dtype in (np.float32, np.float16), "Only F32 and F16 tensors are supported for now" + def add_tensor_info(self, name: str, tensor_shape: np.ndarray, tensor_dtype: np.dtype, tensor_nbytes: int, raw_dtype: Optional[GGMLQuantizationType] = None): + assert raw_dtype is not None or tensor_dtype in (np.float32, np.float16), "Only F32 and F16 tensors are supported for now" encoded_name = name.encode("utf8") self.ti_data += struct.pack(" Date: Sun, 20 Aug 2023 10:26:43 -0600 Subject: [PATCH 02/11] Cleanups, better output during conversion --- convert-llama-ggmlv3-to-gguf.py | 183 ++++++++++++++++++-------------- 1 file changed, 103 insertions(+), 80 deletions(-) diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py index 51bf44f0743a4..c522585afa38c 100644 --- a/convert-llama-ggmlv3-to-gguf.py +++ b/convert-llama-ggmlv3-to-gguf.py @@ -123,83 +123,103 @@ def load(self, data, offset): self.tensor_map = tensor_map return offset -def save_gguf(ggml_model, data, cfg): - hp = ggml_model.hyperparameters - ff_tensor_idx = ggml_model.tensor_map.get(b'layers.0.feed_forward.w1.weight') - assert ff_tensor_idx is not None, 'Missing layer 0 FF tensor' - ff_tensor = ggml_model.tensors[ff_tensor_idx] - if cfg.gqa == 1: - n_kv_head = hp.n_head - else: - gqa = float(cfg.gqa) - n_kv_head = None - for x in range(1, 256): - if float(hp.n_head) / float(x) == gqa: - n_kv_head = x - assert n_kv_head is not None, "Couldn't determine n_kv_head from GQA param" - print(f'- Guessed n_kv_head = {n_kv_head} based on GQA {cfg.gqa}') - nm = gguf.get_tensor_name_map(gguf.MODEL_ARCH.LLAMA, hp.n_layer) - gguf_writer = gguf.GGUFWriter(cfg.output, gguf.MODEL_ARCH_NAMES[gguf.MODEL_ARCH.LLAMA], use_temp_file = False) - #gguf_writer.add_name('meep') - #gguf_writer.add_source_hf_repo('merp') - # gguf_writer.add_tensor_data_layout("Meta AI original pth") - gguf_writer.add_context_length(cfg.context_length) - gguf_writer.add_embedding_length(hp.n_embd) - gguf_writer.add_block_count(hp.n_layer) - gguf_writer.add_feed_forward_length(ff_tensor.dims[1]) - print('FF dim', ff_tensor.dims[1]) - gguf_writer.add_rope_dimension_count(hp.n_embd // hp.n_head) - gguf_writer.add_head_count(hp.n_head) - gguf_writer.add_head_count_kv(n_kv_head) - gguf_writer.add_layer_norm_rms_eps(float(cfg.eps)) - gguf_writer.add_tokenizer_model('llama') - tokens = [] - scores = [] - print(f'* Adding {hp.n_vocab} vocab item(s)') - toktypes = [] - for (tokid, (vbytes, vscore)) in enumerate(ggml_model.vocab.items): - if len(vbytes) > 1 and vbytes[0] == 32: - vbytes = vbytes.replace(b' ', b'\xe2\x96\x81') - tt = 1 - if len(vbytes) == 0: - tt = 3 - elif tokid >= 3 and tokid <= 258 and len(vbytes) == 1: - hv = hex(vbytes[0])[2:].upper() - vbytes = bytes(f'<0x{hv}>', encoding = 'UTF-8') - tt = 6 - toktypes.append(tt) - tokens.append(vbytes) - scores.append(vscore) - gguf_writer.add_token_list(tokens) - gguf_writer.add_token_scores(scores) - gguf_writer.add_token_types(toktypes) - print('* Adding tensors') - for tensor in ggml_model.tensors: - name = str(tensor.name, 'UTF-8') - if name.endswith('.weight'): - name = name[:-7] - suffix = '.weight' - elif name.endswith('.bias'): - name = name[:-5] - suffix = '.bias' - mapped_name = nm.get(name) - assert mapped_name is not None, f'Bad name {name}' - mapped_name += suffix - tempdims = list(tensor.dims[:]) - if len(tempdims) > 1: - temp = tempdims[1] - tempdims[1] = tempdims[0] - tempdims[0] = temp - print(f'+ {tensor.name} | {mapped_name} {tensor.dims} :: {tempdims}') - gguf_writer.add_tensor(mapped_name, data[tensor.start_offset:tensor.start_offset + tensor.len_bytes], raw_shape = tempdims, raw_dtype = tensor.dtype) - print("gguf: write header") - gguf_writer.write_header_to_file() - print("gguf: write metadata") - gguf_writer.write_kv_data_to_file() - print("gguf: write tensors") - gguf_writer.write_tensors_to_file() - - gguf_writer.close() +class GGMLToGGUF: + def __init__(self, ggml_model, data, cfg): + hp = ggml_model.hyperparameters + self.model = ggml_model + self.data = data + self.cfg = cfg + ff_tensor_idx = ggml_model.tensor_map.get(b'layers.0.feed_forward.w1.weight') + assert ff_tensor_idx is not None, 'Missing layer 0 FF tensor' + ff_tensor = ggml_model.tensors[ff_tensor_idx] + self.ff_length = ff_tensor.dims[1] + if cfg.gqa == 1: + n_kv_head = hp.n_head + else: + gqa = float(cfg.gqa) + n_kv_head = None + for x in range(1, 256): + if float(hp.n_head) / float(x) == gqa: + n_kv_head = x + assert n_kv_head is not None, "Couldn't determine n_kv_head from GQA param" + print(f'- Guessed n_kv_head = {n_kv_head} based on GQA {cfg.gqa}') + self.n_kv_head = n_kv_head + self.name_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.LLAMA, ggml_model.hyperparameters.n_layer) + + def save(self): + print('* Preparing to save GGUF file') + gguf_writer = gguf.GGUFWriter(self.cfg.output, gguf.MODEL_ARCH_NAMES[gguf.MODEL_ARCH.LLAMA], use_temp_file = False) + self.add_params(gguf_writer) + self.add_vocab(gguf_writer) + self.add_tensors(gguf_writer) + print(" gguf: write header") + gguf_writer.write_header_to_file() + print(" gguf: write metadata") + gguf_writer.write_kv_data_to_file() + print(" gguf: write tensors") + gguf_writer.write_tensors_to_file() + gguf_writer.close() + + def add_params(self, gguf_writer): + hp = self.model.hyperparameters + cfg = self.cfg + print('* Adding model parameters and KV items') + gguf_writer.add_context_length(cfg.context_length) + gguf_writer.add_embedding_length(hp.n_embd) + gguf_writer.add_block_count(hp.n_layer) + gguf_writer.add_feed_forward_length(self.ff_length) + gguf_writer.add_rope_dimension_count(hp.n_embd // hp.n_head) + gguf_writer.add_head_count(hp.n_head) + gguf_writer.add_head_count_kv(self.n_kv_head) + gguf_writer.add_layer_norm_rms_eps(float(cfg.eps)) + gguf_writer.add_tokenizer_model('llama') + + def add_vocab(self, gguf_writer): + hp = self.model.hyperparameters + tokens = [] + scores = [] + print(f'* Adding {hp.n_vocab} vocab item(s)') + toktypes = [] + for (tokid, (vbytes, vscore)) in enumerate(self.model.vocab.items): + tt = 1 + if len(vbytes) > 1 and vbytes[0] == 32: + vbytes = vbytes.replace(b' ', b'\xe2\x96\x81') + elif len(vbytes) == 0: + tt = 3 + elif tokid >= 3 and tokid <= 258 and len(vbytes) == 1: + hv = hex(vbytes[0])[2:].upper() + vbytes = bytes(f'<0x{hv}>', encoding = 'UTF-8') + tt = 6 + toktypes.append(tt) + tokens.append(vbytes) + scores.append(vscore) + gguf_writer.add_token_list(tokens) + gguf_writer.add_token_scores(scores) + gguf_writer.add_token_types(toktypes) + + def add_tensors(self, gguf_writer): + nm = self.name_map + data = self.data + print(f'* Adding {len(self.model.tensors)} tensor(s)') + for tensor in self.model.tensors: + name = str(tensor.name, 'UTF-8') + if name.endswith('.weight'): + name = name[:-7] + suffix = '.weight' + elif name.endswith('.bias'): + name = name[:-5] + suffix = '.bias' + mapped_name = nm.get(name) + assert mapped_name is not None, f'Bad name {name}' + mapped_name += suffix + tempdims = list(tensor.dims[:]) + if len(tempdims) > 1: + temp = tempdims[1] + tempdims[1] = tempdims[0] + tempdims[0] = temp + # print(f'+ {tensor.name} | {mapped_name} {tensor.dims} :: {tempdims}') + gguf_writer.add_tensor(mapped_name, data[tensor.start_offset:tensor.start_offset + tensor.len_bytes], raw_shape = tempdims, raw_dtype = tensor.dtype) + def handle_args(): parser = argparse.ArgumentParser(description = 'Convert GGMLv3 models to GGUF') @@ -212,12 +232,15 @@ def handle_args(): def main(): cfg = handle_args() + print(f'* Using config: {cfg}') + print('\n=== WARNING === Be aware that this conversion script is best-effort. Use a native GGUF model if possible. === WARNING ===\n') data = np.memmap(cfg.input, mode = 'r') model = GGMLV3Model() + print('* Scanning GGML input file') offset = model.load(data, 0) print(model.hyperparameters) - # print(model.vocab.items) - # return - save_gguf(model, data, cfg) + converter = GGMLToGGUF(model, data, cfg) + converter.save() + print(f'* Successful completion. Output saved to: {cfg.output}') main() From 08959c88c21d02808b73198a038a96b7b34f2f5e Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 20 Aug 2023 10:36:57 -0600 Subject: [PATCH 03/11] Fix vocab space conversion logic --- convert-llama-ggmlv3-to-gguf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py index c522585afa38c..58fd3a09a6411 100644 --- a/convert-llama-ggmlv3-to-gguf.py +++ b/convert-llama-ggmlv3-to-gguf.py @@ -182,7 +182,7 @@ def add_vocab(self, gguf_writer): toktypes = [] for (tokid, (vbytes, vscore)) in enumerate(self.model.vocab.items): tt = 1 - if len(vbytes) > 1 and vbytes[0] == 32: + if len(vbytes) > 0 and vbytes[0] == 32: vbytes = vbytes.replace(b' ', b'\xe2\x96\x81') elif len(vbytes) == 0: tt = 3 From 8083e20d1948844cdf66abead57dd30a822afd04 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 20 Aug 2023 11:23:13 -0600 Subject: [PATCH 04/11] More vocab conversion fixes --- convert-llama-ggmlv3-to-gguf.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py index 58fd3a09a6411..e8ecdf6dad286 100644 --- a/convert-llama-ggmlv3-to-gguf.py +++ b/convert-llama-ggmlv3-to-gguf.py @@ -181,15 +181,15 @@ def add_vocab(self, gguf_writer): print(f'* Adding {hp.n_vocab} vocab item(s)') toktypes = [] for (tokid, (vbytes, vscore)) in enumerate(self.model.vocab.items): - tt = 1 - if len(vbytes) > 0 and vbytes[0] == 32: - vbytes = vbytes.replace(b' ', b'\xe2\x96\x81') - elif len(vbytes) == 0: - tt = 3 + tt = 1 # Normal + if len(vbytes) == 0: + tt = 3 # Control elif tokid >= 3 and tokid <= 258 and len(vbytes) == 1: hv = hex(vbytes[0])[2:].upper() vbytes = bytes(f'<0x{hv}>', encoding = 'UTF-8') - tt = 6 + tt = 6 # Byte + else: + vbytes = vbytes.replace(b' ', b'\xe2\x96\x81') toktypes.append(tt) tokens.append(vbytes) scores.append(vscore) From ff2513439062afd39f405c227286fcf630c30ec6 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 20 Aug 2023 13:03:19 -0600 Subject: [PATCH 05/11] Add description to converted GGUF files --- convert-llama-ggmlv3-to-gguf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py index e8ecdf6dad286..336767fb6e580 100644 --- a/convert-llama-ggmlv3-to-gguf.py +++ b/convert-llama-ggmlv3-to-gguf.py @@ -164,6 +164,7 @@ def add_params(self, gguf_writer): hp = self.model.hyperparameters cfg = self.cfg print('* Adding model parameters and KV items') + gguf_writer.add_description('converted from legacy GGJTv3 format') gguf_writer.add_context_length(cfg.context_length) gguf_writer.add_embedding_length(hp.n_embd) gguf_writer.add_block_count(hp.n_layer) From 80912f07414a19f79022e9795fec77524eeaf966 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 20 Aug 2023 13:15:01 -0600 Subject: [PATCH 06/11] Improve help text, expand warning --- convert-llama-ggmlv3-to-gguf.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py index 336767fb6e580..137c222cb0a0d 100644 --- a/convert-llama-ggmlv3-to-gguf.py +++ b/convert-llama-ggmlv3-to-gguf.py @@ -227,19 +227,19 @@ def handle_args(): parser.add_argument('--input', '-i', help = 'Input GGMLv3 filename') parser.add_argument('--output', '-o', help ='Output GGUF filename') parser.add_argument('--gqa', type = int, default = 1, help = 'grouped-query attention factor (use 8 for LLaMA2 70B)') - parser.add_argument('--eps', default = '5.0e-06', help = 'RMS norm eps (use 1e-5 for LLaMA2)') - parser.add_argument('--context-length', '-c', type=int, default = 2048, help = 'Default max context length') + parser.add_argument('--eps', default = '5.0e-06', help = 'RMS norm eps: Use 1e-6 for LLaMA1 and OpenLLaMA, use 1e-5 for LLaMA2') + parser.add_argument('--context-length', '-c', type=int, default = 2048, help = 'Default max context length: LLaMA1 is typically 2048, LLaMA2 is typically 4096') return parser.parse_args() def main(): cfg = handle_args() print(f'* Using config: {cfg}') - print('\n=== WARNING === Be aware that this conversion script is best-effort. Use a native GGUF model if possible. === WARNING ===\n') + print('\n=== WARNING === Be aware that this conversion script is best-effort. Special tokens may not be converted correctly. Use a native GGUF model if possible. === WARNING ===\n') data = np.memmap(cfg.input, mode = 'r') model = GGMLV3Model() print('* Scanning GGML input file') offset = model.load(data, 0) - print(model.hyperparameters) + print(f'* GGML model hyperparameters: {model.hyperparameters}') converter = GGMLToGGUF(model, data, cfg) converter.save() print(f'* Successful completion. Output saved to: {cfg.output}') From f56db2164a09d30368dd6c130bc8fd263ff1d524 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 20 Aug 2023 14:24:26 -0600 Subject: [PATCH 07/11] Allow specifying name and description for output GGUF --- convert-llama-ggmlv3-to-gguf.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py index 137c222cb0a0d..89c94adf57f92 100644 --- a/convert-llama-ggmlv3-to-gguf.py +++ b/convert-llama-ggmlv3-to-gguf.py @@ -1,4 +1,5 @@ import sys, struct, math, argparse +from pathlib import Path import numpy as np @@ -163,8 +164,16 @@ def save(self): def add_params(self, gguf_writer): hp = self.model.hyperparameters cfg = self.cfg + desc = cfg.desc if cfg.desc is not None else 'converted from legacy GGJTv3 format' + try: + # Filenames aren't necessarily valid UTF8. + name = cfg.name if cfg.name is not None else cfg.input.name + except UnicodeDecodeError: + name = None print('* Adding model parameters and KV items') - gguf_writer.add_description('converted from legacy GGJTv3 format') + if name is not None: + gguf_writer.add_name(name) + gguf_writer.add_description(desc) gguf_writer.add_context_length(cfg.context_length) gguf_writer.add_embedding_length(hp.n_embd) gguf_writer.add_block_count(hp.n_layer) @@ -224,8 +233,10 @@ def add_tensors(self, gguf_writer): def handle_args(): parser = argparse.ArgumentParser(description = 'Convert GGMLv3 models to GGUF') - parser.add_argument('--input', '-i', help = 'Input GGMLv3 filename') - parser.add_argument('--output', '-o', help ='Output GGUF filename') + parser.add_argument('--input', '-i', type = Path, help = 'Input GGMLv3 filename') + parser.add_argument('--output', '-o', type = Path, help ='Output GGUF filename') + parser.add_argument('--name', help = 'Set model name') + parser.add_argument('--desc', help = 'Set model description') parser.add_argument('--gqa', type = int, default = 1, help = 'grouped-query attention factor (use 8 for LLaMA2 70B)') parser.add_argument('--eps', default = '5.0e-06', help = 'RMS norm eps: Use 1e-6 for LLaMA1 and OpenLLaMA, use 1e-5 for LLaMA2') parser.add_argument('--context-length', '-c', type=int, default = 2048, help = 'Default max context length: LLaMA1 is typically 2048, LLaMA2 is typically 4096') From e854cd7dc60992c59ec959c5ae27c8f8b4a5ed6d Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 20 Aug 2023 15:58:02 -0600 Subject: [PATCH 08/11] Allow overriding vocab and hyperparams from original model metadata --- convert-llama-ggmlv3-to-gguf.py | 93 ++++++++++++++++++++++++++++----- 1 file changed, 79 insertions(+), 14 deletions(-) diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py index 89c94adf57f92..eca7be6f6ac7f 100644 --- a/convert-llama-ggmlv3-to-gguf.py +++ b/convert-llama-ggmlv3-to-gguf.py @@ -125,25 +125,30 @@ def load(self, data, offset): return offset class GGMLToGGUF: - def __init__(self, ggml_model, data, cfg): + def __init__(self, ggml_model, data, cfg, params_override = None, vocab_override = None): hp = ggml_model.hyperparameters self.model = ggml_model self.data = data self.cfg = cfg + self.params_override = params_override + self.vocab_override = vocab_override ff_tensor_idx = ggml_model.tensor_map.get(b'layers.0.feed_forward.w1.weight') assert ff_tensor_idx is not None, 'Missing layer 0 FF tensor' ff_tensor = ggml_model.tensors[ff_tensor_idx] self.ff_length = ff_tensor.dims[1] - if cfg.gqa == 1: - n_kv_head = hp.n_head + if params_override is not None: + n_kv_head = params_override.n_head_kv else: - gqa = float(cfg.gqa) - n_kv_head = None - for x in range(1, 256): - if float(hp.n_head) / float(x) == gqa: - n_kv_head = x - assert n_kv_head is not None, "Couldn't determine n_kv_head from GQA param" - print(f'- Guessed n_kv_head = {n_kv_head} based on GQA {cfg.gqa}') + if cfg.gqa == 1: + n_kv_head = hp.n_head + else: + gqa = float(cfg.gqa) + n_kv_head = None + for x in range(1, 256): + if float(hp.n_head) / float(x) == gqa: + n_kv_head = x + assert n_kv_head is not None, "Couldn't determine n_kv_head from GQA param" + print(f'- Guessed n_kv_head = {n_kv_head} based on GQA {cfg.gqa}') self.n_kv_head = n_kv_head self.name_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.LLAMA, ggml_model.hyperparameters.n_layer) @@ -174,6 +179,20 @@ def add_params(self, gguf_writer): if name is not None: gguf_writer.add_name(name) gguf_writer.add_description(desc) + if self.params_override is not None: + po = self.params_override + assert po.n_embd == hp.n_embd, 'Model hyperparams mismatch' + assert po.n_layer == hp.n_layer, 'Model hyperparams mismatch' + assert po.n_head == hp.n_head, 'Model hyperparams mismatch' + gguf_writer.add_context_length (po.n_ctx) + gguf_writer.add_embedding_length (po.n_embd) + gguf_writer.add_block_count (po.n_layer) + gguf_writer.add_feed_forward_length (po.n_ff) + gguf_writer.add_rope_dimension_count(po.n_embd // po.n_head) + gguf_writer.add_head_count (po.n_head) + gguf_writer.add_head_count_kv (po.n_head_kv) + gguf_writer.add_layer_norm_rms_eps (po.f_norm_eps) + return gguf_writer.add_context_length(cfg.context_length) gguf_writer.add_embedding_length(hp.n_embd) gguf_writer.add_block_count(hp.n_layer) @@ -182,14 +201,32 @@ def add_params(self, gguf_writer): gguf_writer.add_head_count(hp.n_head) gguf_writer.add_head_count_kv(self.n_kv_head) gguf_writer.add_layer_norm_rms_eps(float(cfg.eps)) - gguf_writer.add_tokenizer_model('llama') def add_vocab(self, gguf_writer): hp = self.model.hyperparameters + gguf_writer.add_tokenizer_model('llama') tokens = [] scores = [] - print(f'* Adding {hp.n_vocab} vocab item(s)') toktypes = [] + if self.vocab_override is not None: + vo = self.vocab_override + print('* Adding vocab item(s)') + for (idx, vitem) in enumerate(vo.all_tokens()): + if len(vitem) == 3: + tokens.append(vitem[0]) + scores.append(vitem[1]) + toktypes.append(vitem[2]) + else: + # Maybe try to guess the token type here? + tokens.append(vitem[0]) + scores.append(vitem[1]) + assert len(tokens) == hp.n_vocab, f'Override vocab has a different number of items than hyperparameters - override = {len(tokens)} but n_vocab={hp.n_vocab}' + gguf_writer.add_token_list(tokens) + gguf_writer.add_token_scores(scores) + if len(toktypes) > 0: + gguf_writer.add_token_types(toktypes) + return + print(f'* Adding {hp.n_vocab} vocab item(s)') for (tokid, (vbytes, vscore)) in enumerate(self.model.vocab.items): tt = 1 # Normal if len(vbytes) == 0: @@ -230,6 +267,23 @@ def add_tensors(self, gguf_writer): # print(f'+ {tensor.name} | {mapped_name} {tensor.dims} :: {tempdims}') gguf_writer.add_tensor(mapped_name, data[tensor.start_offset:tensor.start_offset + tensor.len_bytes], raw_shape = tempdims, raw_dtype = tensor.dtype) +def handle_metadata(cfg): + import convert + assert cfg.model_metadata_dir.is_dir(), 'Metadata dir is not a directory' + hf_config_path = cfg.model_metadata_dir / "config.json" + orig_config_path = cfg.model_metadata_dir / "params.json" + # Passing None to these load functions is not kosher but it should + # currently work for HF and only fail for original mode if + # n_vocab or n_ff is missing in params.json + if hf_config_path.exists(): + params = convert.Params.loadHFTransformerJson(None, hf_config_path) + elif orig_config_path.exists(): + params = convert.Params.loadOriginalParamsJson(None, orig_config_path) + else: + raise ValueError('Unable to load metadata') + vocab = convert.load_vocab(cfg.vocab_dir if cfg.vocab_dir is not None else cfg.model_metadata_dir, cfg.vocabtype) + convert.check_vocab_size(params, vocab) + return (params, vocab) def handle_args(): parser = argparse.ArgumentParser(description = 'Convert GGMLv3 models to GGUF') @@ -240,18 +294,29 @@ def handle_args(): parser.add_argument('--gqa', type = int, default = 1, help = 'grouped-query attention factor (use 8 for LLaMA2 70B)') parser.add_argument('--eps', default = '5.0e-06', help = 'RMS norm eps: Use 1e-6 for LLaMA1 and OpenLLaMA, use 1e-5 for LLaMA2') parser.add_argument('--context-length', '-c', type=int, default = 2048, help = 'Default max context length: LLaMA1 is typically 2048, LLaMA2 is typically 4096') + parser.add_argument('--model-metadata-dir', '-m', type = Path, help ='Load HuggingFace/.pth vocab and metadata from the specified directory') + parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file - only meaningful with --model-metadata-dir") + parser.add_argument("--vocabtype", choices=["spm", "bpe"], help="vocab format - only meaningful with --model-metadata-dir and/or --vocab-dir (default: spm)", default="spm") return parser.parse_args() def main(): cfg = handle_args() print(f'* Using config: {cfg}') - print('\n=== WARNING === Be aware that this conversion script is best-effort. Special tokens may not be converted correctly. Use a native GGUF model if possible. === WARNING ===\n') + print('\n=== WARNING === Be aware that this conversion script is best-effort. Use a native GGUF model if possible. === WARNING ===\n') + vocab_override = None + metadata_override = None + if cfg.model_metadata_dir is not None: + (params_override, vocab_override) = handle_metadata(cfg) + print(f'* Overriding params: {params_override}') + print(f'* Overriding vocab: {vocab_override}') + else: + print('\n=== WARNING === Special tokens may not be converted correctly. Use --model-metadata-dir if possible === WARNING ===\n') data = np.memmap(cfg.input, mode = 'r') model = GGMLV3Model() print('* Scanning GGML input file') offset = model.load(data, 0) print(f'* GGML model hyperparameters: {model.hyperparameters}') - converter = GGMLToGGUF(model, data, cfg) + converter = GGMLToGGUF(model, data, cfg, params_override = params_override, vocab_override = vocab_override) converter.save() print(f'* Successful completion. Output saved to: {cfg.output}') From 996aaca1d4eea0911250d8dd19ff526d676984f2 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Sun, 20 Aug 2023 16:06:23 -0600 Subject: [PATCH 09/11] Use correct params override var name --- convert-llama-ggmlv3-to-gguf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py index eca7be6f6ac7f..8c5829c883350 100644 --- a/convert-llama-ggmlv3-to-gguf.py +++ b/convert-llama-ggmlv3-to-gguf.py @@ -304,9 +304,10 @@ def main(): print(f'* Using config: {cfg}') print('\n=== WARNING === Be aware that this conversion script is best-effort. Use a native GGUF model if possible. === WARNING ===\n') vocab_override = None - metadata_override = None + params_override = None if cfg.model_metadata_dir is not None: (params_override, vocab_override) = handle_metadata(cfg) + print('!! Note: When overriding params the --gqa, --eps and --context-length options are ignored.') print(f'* Overriding params: {params_override}') print(f'* Overriding vocab: {vocab_override}') else: From f68aef5473d3b3c274882d9567ce73f20fbc053c Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Mon, 21 Aug 2023 04:19:17 -0600 Subject: [PATCH 10/11] Fix wrong type size for Q8_K Better handling of original style metadata --- convert-llama-ggmlv3-to-gguf.py | 48 ++++++++++++++++++++------------- 1 file changed, 29 insertions(+), 19 deletions(-) diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py index 8c5829c883350..30038072f4bd5 100644 --- a/convert-llama-ggmlv3-to-gguf.py +++ b/convert-llama-ggmlv3-to-gguf.py @@ -22,12 +22,19 @@ gguf.GGMLQuantizationType.Q4_K : (256, 2 + 2 + QK_K // 2 + 12), gguf.GGMLQuantizationType.Q5_K : (256, 2 + 2 + QK_K // 2 + QK_K // 8 + 12), gguf.GGMLQuantizationType.Q6_K : (256, 2 + QK_K // 2 + QK_K // 4 + QK_K // 16), - gguf.GGMLQuantizationType.Q8_K : (256, 2 + QK_K + QK_K // 8), + gguf.GGMLQuantizationType.Q8_K : (256, 4 + QK_K + QK_K // 8), } class Hyperparameters: def __init__(self): self.n_vocab = self.n_embd = self.n_mult = self.n_head = self.n_layer = self.n_rot = self.ftype = 0 + self.n_ff = 0 + + def set_n_ff(self, model): + ff_tensor_idx = model.tensor_map.get(b'layers.0.feed_forward.w1.weight') + assert ff_tensor_idx is not None, 'Missing layer 0 FF tensor' + ff_tensor = model.tensors[ff_tensor_idx] + self.n_ff = ff_tensor.dims[1] def load(self, data, offset): ( @@ -42,7 +49,7 @@ def load(self, data, offset): return 4 * 7 def __str__(self): - return f'' + return f'' class Vocab: def __init__(self): @@ -122,6 +129,7 @@ def load(self, data, offset): self.vocab = vocab self.tensors = tensors self.tensor_map = tensor_map + hp.set_n_ff(self) return offset class GGMLToGGUF: @@ -132,10 +140,6 @@ def __init__(self, ggml_model, data, cfg, params_override = None, vocab_override self.cfg = cfg self.params_override = params_override self.vocab_override = vocab_override - ff_tensor_idx = ggml_model.tensor_map.get(b'layers.0.feed_forward.w1.weight') - assert ff_tensor_idx is not None, 'Missing layer 0 FF tensor' - ff_tensor = ggml_model.tensors[ff_tensor_idx] - self.ff_length = ff_tensor.dims[1] if params_override is not None: n_kv_head = params_override.n_head_kv else: @@ -196,7 +200,7 @@ def add_params(self, gguf_writer): gguf_writer.add_context_length(cfg.context_length) gguf_writer.add_embedding_length(hp.n_embd) gguf_writer.add_block_count(hp.n_layer) - gguf_writer.add_feed_forward_length(self.ff_length) + gguf_writer.add_feed_forward_length(hp.n_ff) gguf_writer.add_rope_dimension_count(hp.n_embd // hp.n_head) gguf_writer.add_head_count(hp.n_head) gguf_writer.add_head_count_kv(self.n_kv_head) @@ -267,18 +271,24 @@ def add_tensors(self, gguf_writer): # print(f'+ {tensor.name} | {mapped_name} {tensor.dims} :: {tempdims}') gguf_writer.add_tensor(mapped_name, data[tensor.start_offset:tensor.start_offset + tensor.len_bytes], raw_shape = tempdims, raw_dtype = tensor.dtype) -def handle_metadata(cfg): +def handle_metadata(cfg, hp): import convert assert cfg.model_metadata_dir.is_dir(), 'Metadata dir is not a directory' hf_config_path = cfg.model_metadata_dir / "config.json" orig_config_path = cfg.model_metadata_dir / "params.json" - # Passing None to these load functions is not kosher but it should - # currently work for HF and only fail for original mode if - # n_vocab or n_ff is missing in params.json + # We pass a fake model here. "original" mode will check the shapes of some + # tensors if information is missing in the .json file: other than that, the + # model data isn't used so this should be safe (at least for now). + fakemodel = { + 'tok_embeddings.weight': convert.LazyTensor.__new__(convert.LazyTensor), + 'layers.0.feed_forward.w1.weight': convert.LazyTensor.__new__(convert.LazyTensor), + } + fakemodel['tok_embeddings.weight'].shape = [hp.n_vocab] + fakemodel['layers.0.feed_forward.w1.weight'].shape = [hp.n_ff] if hf_config_path.exists(): - params = convert.Params.loadHFTransformerJson(None, hf_config_path) + params = convert.Params.loadHFTransformerJson(fakemodel, hf_config_path) elif orig_config_path.exists(): - params = convert.Params.loadOriginalParamsJson(None, orig_config_path) + params = convert.Params.loadOriginalParamsJson(fakemodel, orig_config_path) else: raise ValueError('Unable to load metadata') vocab = convert.load_vocab(cfg.vocab_dir if cfg.vocab_dir is not None else cfg.model_metadata_dir, cfg.vocabtype) @@ -303,20 +313,20 @@ def main(): cfg = handle_args() print(f'* Using config: {cfg}') print('\n=== WARNING === Be aware that this conversion script is best-effort. Use a native GGUF model if possible. === WARNING ===\n') + data = np.memmap(cfg.input, mode = 'r') + model = GGMLV3Model() + print('* Scanning GGML input file') + offset = model.load(data, 0) + print(f'* GGML model hyperparameters: {model.hyperparameters}') vocab_override = None params_override = None if cfg.model_metadata_dir is not None: - (params_override, vocab_override) = handle_metadata(cfg) + (params_override, vocab_override) = handle_metadata(cfg, model.hyperparameters) print('!! Note: When overriding params the --gqa, --eps and --context-length options are ignored.') print(f'* Overriding params: {params_override}') print(f'* Overriding vocab: {vocab_override}') else: print('\n=== WARNING === Special tokens may not be converted correctly. Use --model-metadata-dir if possible === WARNING ===\n') - data = np.memmap(cfg.input, mode = 'r') - model = GGMLV3Model() - print('* Scanning GGML input file') - offset = model.load(data, 0) - print(f'* GGML model hyperparameters: {model.hyperparameters}') converter = GGMLToGGUF(model, data, cfg, params_override = params_override, vocab_override = vocab_override) converter.save() print(f'* Successful completion. Output saved to: {cfg.output}') From 054776049ebe6fcab96b2bc6091a7387c9c4a6f4 Mon Sep 17 00:00:00 2001 From: KerfuffleV2 Date: Mon, 21 Aug 2023 08:27:31 -0600 Subject: [PATCH 11/11] Set default value for gguf add_tensor raw_shape KW arg --- gguf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gguf.py b/gguf.py index 5c82414fa710f..9dbde2d543444 100644 --- a/gguf.py +++ b/gguf.py @@ -509,7 +509,7 @@ def add_tensor_info(self, name: str, tensor_shape: np.ndarray, tensor_dtype: np. self.offset_tensor += GGUFWriter.ggml_pad(tensor_nbytes, self.data_alignment) self.ti_data_count += 1 - def add_tensor(self, name: str, tensor: np.ndarray, raw_shape: Optional[np.ndarray], raw_dtype: Optional[GGMLQuantizationType] = None): + def add_tensor(self, name: str, tensor: np.ndarray, raw_shape: Optional[np.ndarray] = None, raw_dtype: Optional[GGMLQuantizationType] = None): if self.use_temp_file and not hasattr(self, "temp_file"): self.temp_file = tempfile.SpooledTemporaryFile(mode="w+b", max_size=256*1024*1024) self.temp_file.seek(0)