@@ -68,6 +68,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
68
68
printf (" %s: ftype = %d\n " , __func__, hparams.ftype );
69
69
printf (" %s: qntvr = %d\n " , __func__, qntvr);
70
70
71
+ hparams.n_ctx = std::max (origmaxctx,hparams.n_ctx );
72
+
71
73
hparams.ftype %= GGML_QNT_VERSION_FACTOR;
72
74
}
73
75
@@ -474,8 +476,8 @@ bool gptj_eval(
474
476
475
477
// self-attention
476
478
{
477
- struct ggml_tensor * Qcur = ggml_rope_inplace (ctx0, ggml_reshape_3d (ctx0, ggml_mul_mat (ctx0, model.layers [il].c_attn_q_proj_w , cur), n_embd/n_head, n_head, N), n_past, n_rot, 0 , 0 );
478
- struct ggml_tensor * Kcur = ggml_rope_inplace (ctx0, ggml_reshape_3d (ctx0, ggml_mul_mat (ctx0, model.layers [il].c_attn_k_proj_w , cur), n_embd/n_head, n_head, N), n_past, n_rot, 0 , 0 );
479
+ struct ggml_tensor * Qcur = ggml_rope_inplace (ctx0, ggml_reshape_3d (ctx0, ggml_mul_mat (ctx0, model.layers [il].c_attn_q_proj_w , cur), n_embd/n_head, n_head, N), n_past, n_rot, 0 , n_ctx );
480
+ struct ggml_tensor * Kcur = ggml_rope_inplace (ctx0, ggml_reshape_3d (ctx0, ggml_mul_mat (ctx0, model.layers [il].c_attn_k_proj_w , cur), n_embd/n_head, n_head, N), n_past, n_rot, 0 , n_ctx );
479
481
480
482
// store key and value to memory
481
483
{
0 commit comments