@@ -217,30 +217,40 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
217
217
return 0 ;
218
218
}
219
219
220
- static bool bpf_is_jmp_and_has_target (const struct bpf_insn * insn )
221
- {
222
- return BPF_CLASS (insn -> code ) == BPF_JMP &&
223
- /* Call and Exit are both special jumps with no
224
- * target inside the BPF instruction image.
225
- */
226
- BPF_OP (insn -> code ) != BPF_CALL &&
227
- BPF_OP (insn -> code ) != BPF_EXIT ;
228
- }
229
-
230
220
static void bpf_adj_branches (struct bpf_prog * prog , u32 pos , u32 delta )
231
221
{
232
222
struct bpf_insn * insn = prog -> insnsi ;
233
223
u32 i , insn_cnt = prog -> len ;
224
+ bool pseudo_call ;
225
+ u8 code ;
226
+ int off ;
234
227
235
228
for (i = 0 ; i < insn_cnt ; i ++ , insn ++ ) {
236
- if (!bpf_is_jmp_and_has_target (insn ))
229
+ code = insn -> code ;
230
+ if (BPF_CLASS (code ) != BPF_JMP )
237
231
continue ;
232
+ if (BPF_OP (code ) == BPF_EXIT )
233
+ continue ;
234
+ if (BPF_OP (code ) == BPF_CALL ) {
235
+ if (insn -> src_reg == BPF_PSEUDO_CALL )
236
+ pseudo_call = true;
237
+ else
238
+ continue ;
239
+ } else {
240
+ pseudo_call = false;
241
+ }
242
+ off = pseudo_call ? insn -> imm : insn -> off ;
238
243
239
244
/* Adjust offset of jmps if we cross boundaries. */
240
- if (i < pos && i + insn -> off + 1 > pos )
241
- insn -> off += delta ;
242
- else if (i > pos + delta && i + insn -> off + 1 <= pos + delta )
243
- insn -> off -= delta ;
245
+ if (i < pos && i + off + 1 > pos )
246
+ off += delta ;
247
+ else if (i > pos + delta && i + off + 1 <= pos + delta )
248
+ off -= delta ;
249
+
250
+ if (pseudo_call )
251
+ insn -> imm = off ;
252
+ else
253
+ insn -> off = off ;
244
254
}
245
255
}
246
256
@@ -774,8 +784,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
774
784
*
775
785
* Decode and execute eBPF instructions.
776
786
*/
777
- static unsigned int ___bpf_prog_run (u64 * regs , const struct bpf_insn * insn ,
778
- u64 * stack )
787
+ static u64 ___bpf_prog_run (u64 * regs , const struct bpf_insn * insn , u64 * stack )
779
788
{
780
789
u64 tmp ;
781
790
static const void * jumptable [256 ] = {
@@ -835,6 +844,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
835
844
[BPF_ALU64 | BPF_NEG ] = & & ALU64_NEG ,
836
845
/* Call instruction */
837
846
[BPF_JMP | BPF_CALL ] = & & JMP_CALL ,
847
+ [BPF_JMP | BPF_CALL_ARGS ] = & & JMP_CALL_ARGS ,
838
848
[BPF_JMP | BPF_TAIL_CALL ] = & & JMP_TAIL_CALL ,
839
849
/* Jumps */
840
850
[BPF_JMP | BPF_JA ] = & & JMP_JA ,
@@ -1025,6 +1035,13 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
1025
1035
BPF_R4 , BPF_R5 );
1026
1036
CONT ;
1027
1037
1038
+ JMP_CALL_ARGS :
1039
+ BPF_R0 = (__bpf_call_base_args + insn -> imm )(BPF_R1 , BPF_R2 ,
1040
+ BPF_R3 , BPF_R4 ,
1041
+ BPF_R5 ,
1042
+ insn + insn -> off + 1 );
1043
+ CONT ;
1044
+
1028
1045
JMP_TAIL_CALL : {
1029
1046
struct bpf_map * map = (struct bpf_map * ) (unsigned long ) BPF_R2 ;
1030
1047
struct bpf_array * array = container_of (map , struct bpf_array , map );
@@ -1297,6 +1314,23 @@ static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn
1297
1314
return ___bpf_prog_run(regs, insn, stack); \
1298
1315
}
1299
1316
1317
+ #define PROG_NAME_ARGS (stack_size ) __bpf_prog_run_args##stack_size
1318
+ #define DEFINE_BPF_PROG_RUN_ARGS (stack_size ) \
1319
+ static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1320
+ const struct bpf_insn *insn) \
1321
+ { \
1322
+ u64 stack[stack_size / sizeof(u64)]; \
1323
+ u64 regs[MAX_BPF_REG]; \
1324
+ \
1325
+ FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1326
+ BPF_R1 = r1; \
1327
+ BPF_R2 = r2; \
1328
+ BPF_R3 = r3; \
1329
+ BPF_R4 = r4; \
1330
+ BPF_R5 = r5; \
1331
+ return ___bpf_prog_run(regs, insn, stack); \
1332
+ }
1333
+
1300
1334
#define EVAL1 (FN , X ) FN(X)
1301
1335
#define EVAL2 (FN , X , Y ...) FN(X) EVAL1(FN, Y)
1302
1336
#define EVAL3 (FN , X , Y ...) FN(X) EVAL2(FN, Y)
@@ -1308,6 +1342,10 @@ EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1308
1342
EVAL6 (DEFINE_BPF_PROG_RUN , 224 , 256 , 288 , 320 , 352 , 384 );
1309
1343
EVAL4 (DEFINE_BPF_PROG_RUN , 416 , 448 , 480 , 512 );
1310
1344
1345
+ EVAL6 (DEFINE_BPF_PROG_RUN_ARGS , 32 , 64 , 96 , 128 , 160 , 192 );
1346
+ EVAL6 (DEFINE_BPF_PROG_RUN_ARGS , 224 , 256 , 288 , 320 , 352 , 384 );
1347
+ EVAL4 (DEFINE_BPF_PROG_RUN_ARGS , 416 , 448 , 480 , 512 );
1348
+
1311
1349
#define PROG_NAME_LIST (stack_size ) PROG_NAME(stack_size),
1312
1350
1313
1351
static unsigned int (* interpreters [])(const void * ctx ,
@@ -1316,6 +1354,24 @@ EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1316
1354
EVAL6 (PROG_NAME_LIST , 224 , 256 , 288 , 320 , 352 , 384 )
1317
1355
EVAL4 (PROG_NAME_LIST , 416 , 448 , 480 , 512 )
1318
1356
};
1357
+ #undef PROG_NAME_LIST
1358
+ #define PROG_NAME_LIST (stack_size ) PROG_NAME_ARGS(stack_size),
1359
+ static u64 (* interpreters_args [])(u64 r1 , u64 r2 , u64 r3 , u64 r4 , u64 r5 ,
1360
+ const struct bpf_insn * insn ) = {
1361
+ EVAL6 (PROG_NAME_LIST , 32 , 64 , 96 , 128 , 160 , 192 )
1362
+ EVAL6 (PROG_NAME_LIST , 224 , 256 , 288 , 320 , 352 , 384 )
1363
+ EVAL4 (PROG_NAME_LIST , 416 , 448 , 480 , 512 )
1364
+ };
1365
+ #undef PROG_NAME_LIST
1366
+
1367
+ void bpf_patch_call_args (struct bpf_insn * insn , u32 stack_depth )
1368
+ {
1369
+ stack_depth = max_t (u32 , stack_depth , 1 );
1370
+ insn -> off = (s16 ) insn -> imm ;
1371
+ insn -> imm = interpreters_args [(round_up (stack_depth , 32 ) / 32 ) - 1 ] -
1372
+ __bpf_call_base_args ;
1373
+ insn -> code = BPF_JMP | BPF_CALL_ARGS ;
1374
+ }
1319
1375
1320
1376
bool bpf_prog_array_compatible (struct bpf_array * array ,
1321
1377
const struct bpf_prog * fp )
0 commit comments