7
7
8
8
#define pr_fmt (fmt ) "bpf_jit: " fmt
9
9
10
+ #include <linux/bitfield.h>
10
11
#include <linux/bpf.h>
11
12
#include <linux/filter.h>
12
13
#include <linux/printk.h>
@@ -56,6 +57,7 @@ struct jit_ctx {
56
57
int idx ;
57
58
int epilogue_offset ;
58
59
int * offset ;
60
+ int exentry_idx ;
59
61
__le32 * image ;
60
62
u32 stack_size ;
61
63
};
@@ -351,6 +353,67 @@ static void build_epilogue(struct jit_ctx *ctx)
351
353
emit (A64_RET (A64_LR ), ctx );
352
354
}
353
355
356
+ #define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
357
+ #define BPF_FIXUP_REG_MASK GENMASK(31, 27)
358
+
359
+ int arm64_bpf_fixup_exception (const struct exception_table_entry * ex ,
360
+ struct pt_regs * regs )
361
+ {
362
+ off_t offset = FIELD_GET (BPF_FIXUP_OFFSET_MASK , ex -> fixup );
363
+ int dst_reg = FIELD_GET (BPF_FIXUP_REG_MASK , ex -> fixup );
364
+
365
+ regs -> regs [dst_reg ] = 0 ;
366
+ regs -> pc = (unsigned long )& ex -> fixup - offset ;
367
+ return 1 ;
368
+ }
369
+
370
+ /* For accesses to BTF pointers, add an entry to the exception table */
371
+ static int add_exception_handler (const struct bpf_insn * insn ,
372
+ struct jit_ctx * ctx ,
373
+ int dst_reg )
374
+ {
375
+ off_t offset ;
376
+ unsigned long pc ;
377
+ struct exception_table_entry * ex ;
378
+
379
+ if (!ctx -> image )
380
+ /* First pass */
381
+ return 0 ;
382
+
383
+ if (BPF_MODE (insn -> code ) != BPF_PROBE_MEM )
384
+ return 0 ;
385
+
386
+ if (!ctx -> prog -> aux -> extable ||
387
+ WARN_ON_ONCE (ctx -> exentry_idx >= ctx -> prog -> aux -> num_exentries ))
388
+ return - EINVAL ;
389
+
390
+ ex = & ctx -> prog -> aux -> extable [ctx -> exentry_idx ];
391
+ pc = (unsigned long )& ctx -> image [ctx -> idx - 1 ];
392
+
393
+ offset = pc - (long )& ex -> insn ;
394
+ if (WARN_ON_ONCE (offset >= 0 || offset < INT_MIN ))
395
+ return - ERANGE ;
396
+ ex -> insn = offset ;
397
+
398
+ /*
399
+ * Since the extable follows the program, the fixup offset is always
400
+ * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
401
+ * to keep things simple, and put the destination register in the upper
402
+ * bits. We don't need to worry about buildtime or runtime sort
403
+ * modifying the upper bits because the table is already sorted, and
404
+ * isn't part of the main exception table.
405
+ */
406
+ offset = (long )& ex -> fixup - (pc + AARCH64_INSN_SIZE );
407
+ if (!FIELD_FIT (BPF_FIXUP_OFFSET_MASK , offset ))
408
+ return - ERANGE ;
409
+
410
+ ex -> fixup = FIELD_PREP (BPF_FIXUP_OFFSET_MASK , offset ) |
411
+ FIELD_PREP (BPF_FIXUP_REG_MASK , dst_reg );
412
+
413
+ ctx -> exentry_idx ++ ;
414
+ return 0 ;
415
+ }
416
+
354
417
/* JITs an eBPF instruction.
355
418
* Returns:
356
419
* 0 - successfully JITed an 8-byte eBPF instruction.
@@ -375,6 +438,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
375
438
u8 jmp_cond , reg ;
376
439
s32 jmp_offset ;
377
440
u32 a64_insn ;
441
+ int ret ;
378
442
379
443
#define check_imm (bits , imm ) do { \
380
444
if ((((imm) > 0) && ((imm) >> (bits))) || \
@@ -694,7 +758,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
694
758
const u8 r0 = bpf2a64 [BPF_REG_0 ];
695
759
bool func_addr_fixed ;
696
760
u64 func_addr ;
697
- int ret ;
698
761
699
762
ret = bpf_jit_get_func_addr (ctx -> prog , insn , extra_pass ,
700
763
& func_addr , & func_addr_fixed );
@@ -738,6 +801,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
738
801
case BPF_LDX | BPF_MEM | BPF_H :
739
802
case BPF_LDX | BPF_MEM | BPF_B :
740
803
case BPF_LDX | BPF_MEM | BPF_DW :
804
+ case BPF_LDX | BPF_PROBE_MEM | BPF_DW :
805
+ case BPF_LDX | BPF_PROBE_MEM | BPF_W :
806
+ case BPF_LDX | BPF_PROBE_MEM | BPF_H :
807
+ case BPF_LDX | BPF_PROBE_MEM | BPF_B :
741
808
emit_a64_mov_i (1 , tmp , off , ctx );
742
809
switch (BPF_SIZE (code )) {
743
810
case BPF_W :
@@ -753,6 +820,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
753
820
emit (A64_LDR64 (dst , src , tmp ), ctx );
754
821
break ;
755
822
}
823
+
824
+ ret = add_exception_handler (insn , ctx , dst );
825
+ if (ret )
826
+ return ret ;
756
827
break ;
757
828
758
829
/* ST: *(size *)(dst + off) = imm */
@@ -868,6 +939,9 @@ static int validate_code(struct jit_ctx *ctx)
868
939
return -1 ;
869
940
}
870
941
942
+ if (WARN_ON_ONCE (ctx -> exentry_idx != ctx -> prog -> aux -> num_exentries ))
943
+ return -1 ;
944
+
871
945
return 0 ;
872
946
}
873
947
@@ -884,14 +958,14 @@ struct arm64_jit_data {
884
958
885
959
struct bpf_prog * bpf_int_jit_compile (struct bpf_prog * prog )
886
960
{
961
+ int image_size , prog_size , extable_size ;
887
962
struct bpf_prog * tmp , * orig_prog = prog ;
888
963
struct bpf_binary_header * header ;
889
964
struct arm64_jit_data * jit_data ;
890
965
bool was_classic = bpf_prog_was_classic (prog );
891
966
bool tmp_blinded = false;
892
967
bool extra_pass = false;
893
968
struct jit_ctx ctx ;
894
- int image_size ;
895
969
u8 * image_ptr ;
896
970
897
971
if (!prog -> jit_requested )
@@ -922,7 +996,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
922
996
image_ptr = jit_data -> image ;
923
997
header = jit_data -> header ;
924
998
extra_pass = true;
925
- image_size = sizeof (u32 ) * ctx .idx ;
999
+ prog_size = sizeof (u32 ) * ctx .idx ;
926
1000
goto skip_init_ctx ;
927
1001
}
928
1002
memset (& ctx , 0 , sizeof (ctx ));
@@ -950,8 +1024,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
950
1024
ctx .epilogue_offset = ctx .idx ;
951
1025
build_epilogue (& ctx );
952
1026
1027
+ extable_size = prog -> aux -> num_exentries *
1028
+ sizeof (struct exception_table_entry );
1029
+
953
1030
/* Now we know the actual image size. */
954
- image_size = sizeof (u32 ) * ctx .idx ;
1031
+ prog_size = sizeof (u32 ) * ctx .idx ;
1032
+ image_size = prog_size + extable_size ;
955
1033
header = bpf_jit_binary_alloc (image_size , & image_ptr ,
956
1034
sizeof (u32 ), jit_fill_hole );
957
1035
if (header == NULL ) {
@@ -962,8 +1040,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
962
1040
/* 2. Now, the actual pass. */
963
1041
964
1042
ctx .image = (__le32 * )image_ptr ;
1043
+ if (extable_size )
1044
+ prog -> aux -> extable = (void * )image_ptr + prog_size ;
965
1045
skip_init_ctx :
966
1046
ctx .idx = 0 ;
1047
+ ctx .exentry_idx = 0 ;
967
1048
968
1049
build_prologue (& ctx , was_classic );
969
1050
@@ -984,7 +1065,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
984
1065
985
1066
/* And we're done. */
986
1067
if (bpf_jit_enable > 1 )
987
- bpf_jit_dump (prog -> len , image_size , 2 , ctx .image );
1068
+ bpf_jit_dump (prog -> len , prog_size , 2 , ctx .image );
988
1069
989
1070
bpf_flush_icache (header , ctx .image + ctx .idx );
990
1071
@@ -1005,7 +1086,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1005
1086
}
1006
1087
prog -> bpf_func = (void * )ctx .image ;
1007
1088
prog -> jited = 1 ;
1008
- prog -> jited_len = image_size ;
1089
+ prog -> jited_len = prog_size ;
1009
1090
1010
1091
if (!prog -> is_func || extra_pass ) {
1011
1092
bpf_prog_fill_jited_linfo (prog , ctx .offset );
0 commit comments