We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent ad186f4 commit 0822e15Copy full SHA for 0822e15
convert_hf_to_gguf.py
@@ -1856,10 +1856,7 @@ def prepare_tensors(self):
1856
class Llama4Model(LlamaModel):
1857
model_arch = gguf.MODEL_ARCH.LLAMA4
1858
undo_permute = False
1859
- ignore_vision = True
1860
1861
- # TODO @ngxson : avoid duplicate this code everywhere by at least support "text_config"
1862
- # same with llama, but we need to merge the text_config into the root level of hparams
1863
def __init__(self, *args, **kwargs):
1864
super().__init__(*args, **kwargs)
1865
# IMPORTANT: the normal "intermediate_size" is renamed to "intermediate_size_mlp", we need to undo this
0 commit comments