Skip to content

Commit 0be54f7

Browse files
authored
baby-llama : fix build after ggml_rope change (ggml-org#2016)
1 parent 181e8d9 commit 0be54f7

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

examples/baby-llama/baby-llama.cpp

+6-6
Original file line numberDiff line numberDiff line change
@@ -566,8 +566,8 @@ struct ggml_tensor * forward(
566566
// wk shape [n_embd, n_embd, 1, 1]
567567
// Qcur shape [n_embd/n_head, n_head, N, 1]
568568
// Kcur shape [n_embd/n_head, n_head, N, 1]
569-
struct ggml_tensor * Qcur = ggml_rope(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wq, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0);
570-
struct ggml_tensor * Kcur = ggml_rope(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wk, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0);
569+
struct ggml_tensor * Qcur = ggml_rope(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wq, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0, 0);
570+
struct ggml_tensor * Kcur = ggml_rope(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wk, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0, 0);
571571

572572
// store key and value to memory
573573
{
@@ -823,8 +823,8 @@ struct ggml_tensor * forward_batch(
823823
// wk shape [n_embd, n_embd, 1, 1]
824824
// Qcur shape [n_embd/n_head, n_head, N, n_batch]
825825
// Kcur shape [n_embd/n_head, n_head, N, n_batch]
826-
struct ggml_tensor * Qcur = ggml_rope(ctx0, ggml_reshape_4d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wq, cur), n_embd/n_head, n_head, N, n_batch), n_past, n_rot, 0);
827-
struct ggml_tensor * Kcur = ggml_rope(ctx0, ggml_reshape_4d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wk, cur), n_embd/n_head, n_head, N, n_batch), n_past, n_rot, 0);
826+
struct ggml_tensor * Qcur = ggml_rope(ctx0, ggml_reshape_4d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wq, cur), n_embd/n_head, n_head, N, n_batch), n_past, n_rot, 0, 0);
827+
struct ggml_tensor * Kcur = ggml_rope(ctx0, ggml_reshape_4d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wk, cur), n_embd/n_head, n_head, N, n_batch), n_past, n_rot, 0, 0);
828828
assert_shape_4d(Qcur, n_embd/n_head, n_head, N, n_batch);
829829
assert_shape_4d(Kcur, n_embd/n_head, n_head, N, n_batch);
830830

@@ -1116,7 +1116,7 @@ struct ggml_tensor * forward_lora(
11161116
model->layers[il].wqb,
11171117
cur)),
11181118
n_embd/n_head, n_head, N),
1119-
n_past, n_rot, 0);
1119+
n_past, n_rot, 0, 0);
11201120
struct ggml_tensor * Kcur = ggml_rope(ctx0,
11211121
ggml_reshape_3d(ctx0,
11221122
ggml_mul_mat(ctx0,
@@ -1125,7 +1125,7 @@ struct ggml_tensor * forward_lora(
11251125
model->layers[il].wkb,
11261126
cur)),
11271127
n_embd/n_head, n_head, N),
1128-
n_past, n_rot, 0);
1128+
n_past, n_rot, 0, 0);
11291129

11301130
// store key and value to memory
11311131
{

0 commit comments

Comments
 (0)