File tree 1 file changed +16
-17
lines changed
1 file changed +16
-17
lines changed Original file line number Diff line number Diff line change @@ -887,23 +887,22 @@ enum e_model {
887
887
static const size_t kB = 1024 ;
888
888
static const size_t MB = kB *kB ;
889
889
890
- // default hparams (LLaMA 7B)
891
890
struct llama_hparams {
892
- uint32_t n_vocab = 32000 ;
893
- uint32_t n_ctx_train = 2048 ; // the context size used during training
894
- uint32_t n_ctx = 512 ; // the context size used during inference
895
- uint32_t n_embd = 4096 ;
896
- uint32_t n_head = 32 ;
897
- uint32_t n_head_kv = 32 ;
898
- uint32_t n_layer = 32 ;
899
- uint32_t n_rot = 64 ;
900
- uint32_t n_ff = 11008 ;
901
-
902
- float f_norm_eps = 1e-5 ;
903
- float f_norm_rms_eps = 1e-5 ;
904
-
905
- float rope_freq_base = 10000 . 0f ;
906
- float rope_freq_scale = 1 . 0f ;
891
+ uint32_t n_vocab;
892
+ uint32_t n_ctx_train; // context size the model was trained on
893
+ uint32_t n_ctx; // context size used during inference
894
+ uint32_t n_embd;
895
+ uint32_t n_head;
896
+ uint32_t n_head_kv;
897
+ uint32_t n_layer;
898
+ uint32_t n_rot;
899
+ uint32_t n_ff;
900
+
901
+ float f_norm_eps;
902
+ float f_norm_rms_eps;
903
+
904
+ float rope_freq_base;
905
+ float rope_freq_scale;
907
906
908
907
bool operator !=(const llama_hparams & other) const {
909
908
return static_cast <bool >(memcmp (this , &other, sizeof (llama_hparams))); // NOLINT
@@ -1025,7 +1024,7 @@ struct llama_model {
1025
1024
1026
1025
std::string name = " n/a" ;
1027
1026
1028
- llama_hparams hparams;
1027
+ llama_hparams hparams = {} ;
1029
1028
llama_vocab vocab;
1030
1029
1031
1030
struct ggml_tensor * tok_embeddings;
You can’t perform that action at this time.
0 commit comments