Skip to content

Commit 6d3be57

Browse files
committed
llama : remove unused llama_hparams defaults
1 parent 71ca2fa commit 6d3be57

File tree

1 file changed

+16
-17
lines changed

1 file changed

+16
-17
lines changed

llama.cpp

+16-17
Original file line numberDiff line numberDiff line change
@@ -887,23 +887,22 @@ enum e_model {
887887
static const size_t kB = 1024;
888888
static const size_t MB = kB*kB;
889889

890-
// default hparams (LLaMA 7B)
891890
struct llama_hparams {
892-
uint32_t n_vocab = 32000;
893-
uint32_t n_ctx_train = 2048; // the context size used during training
894-
uint32_t n_ctx = 512; // the context size used during inference
895-
uint32_t n_embd = 4096;
896-
uint32_t n_head = 32;
897-
uint32_t n_head_kv = 32;
898-
uint32_t n_layer = 32;
899-
uint32_t n_rot = 64;
900-
uint32_t n_ff = 11008;
901-
902-
float f_norm_eps = 1e-5;
903-
float f_norm_rms_eps = 1e-5;
904-
905-
float rope_freq_base = 10000.0f;
906-
float rope_freq_scale = 1.0f;
891+
uint32_t n_vocab;
892+
uint32_t n_ctx_train; // context size the model was trained on
893+
uint32_t n_ctx; // context size used during inference
894+
uint32_t n_embd;
895+
uint32_t n_head;
896+
uint32_t n_head_kv;
897+
uint32_t n_layer;
898+
uint32_t n_rot;
899+
uint32_t n_ff;
900+
901+
float f_norm_eps;
902+
float f_norm_rms_eps;
903+
904+
float rope_freq_base;
905+
float rope_freq_scale;
907906

908907
bool operator!=(const llama_hparams & other) const {
909908
return static_cast<bool>(memcmp(this, &other, sizeof(llama_hparams))); // NOLINT
@@ -1025,7 +1024,7 @@ struct llama_model {
10251024

10261025
std::string name = "n/a";
10271026

1028-
llama_hparams hparams;
1027+
llama_hparams hparams = {};
10291028
llama_vocab vocab;
10301029

10311030
struct ggml_tensor * tok_embeddings;

0 commit comments

Comments
 (0)