Skip to content

Commit 1ea47e0

Browse files
4astborkmann
authored andcommitted
bpf: add support for bpf_call to interpreter
though bpf_call is still the same call instruction and calling convention 'bpf to bpf' and 'bpf to helper' is the same the interpreter has to oparate on 'struct bpf_insn *'. To distinguish these two cases add a kernel internal opcode and mark call insns with it. This opcode is seen by interpreter only. JITs will never see it. Also add tiny bit of debug code to aid interpreter debugging. Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]>
1 parent b0b04fc commit 1ea47e0

File tree

4 files changed

+116
-17
lines changed

4 files changed

+116
-17
lines changed

include/linux/bpf.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -402,6 +402,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
402402

403403
/* verify correctness of eBPF program */
404404
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
405+
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
405406

406407
/* Map specifics */
407408
struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key);

include/linux/filter.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,9 @@ struct bpf_prog_aux;
5858
/* unused opcode to mark special call to bpf_tail_call() helper */
5959
#define BPF_TAIL_CALL 0xf0
6060

61+
/* unused opcode to mark call to interpreter with arguments */
62+
#define BPF_CALL_ARGS 0xe0
63+
6164
/* As per nm, we expose JITed images as text (code) section for
6265
* kallsyms. That way, tools like perf can find it to match
6366
* addresses.
@@ -710,6 +713,9 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
710713
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
711714

712715
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
716+
#define __bpf_call_base_args \
717+
((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
718+
__bpf_call_base)
713719

714720
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
715721
void bpf_jit_compile(struct bpf_prog *prog);

kernel/bpf/core.c

Lines changed: 73 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -217,30 +217,40 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
217217
return 0;
218218
}
219219

220-
static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
221-
{
222-
return BPF_CLASS(insn->code) == BPF_JMP &&
223-
/* Call and Exit are both special jumps with no
224-
* target inside the BPF instruction image.
225-
*/
226-
BPF_OP(insn->code) != BPF_CALL &&
227-
BPF_OP(insn->code) != BPF_EXIT;
228-
}
229-
230220
static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
231221
{
232222
struct bpf_insn *insn = prog->insnsi;
233223
u32 i, insn_cnt = prog->len;
224+
bool pseudo_call;
225+
u8 code;
226+
int off;
234227

235228
for (i = 0; i < insn_cnt; i++, insn++) {
236-
if (!bpf_is_jmp_and_has_target(insn))
229+
code = insn->code;
230+
if (BPF_CLASS(code) != BPF_JMP)
237231
continue;
232+
if (BPF_OP(code) == BPF_EXIT)
233+
continue;
234+
if (BPF_OP(code) == BPF_CALL) {
235+
if (insn->src_reg == BPF_PSEUDO_CALL)
236+
pseudo_call = true;
237+
else
238+
continue;
239+
} else {
240+
pseudo_call = false;
241+
}
242+
off = pseudo_call ? insn->imm : insn->off;
238243

239244
/* Adjust offset of jmps if we cross boundaries. */
240-
if (i < pos && i + insn->off + 1 > pos)
241-
insn->off += delta;
242-
else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
243-
insn->off -= delta;
245+
if (i < pos && i + off + 1 > pos)
246+
off += delta;
247+
else if (i > pos + delta && i + off + 1 <= pos + delta)
248+
off -= delta;
249+
250+
if (pseudo_call)
251+
insn->imm = off;
252+
else
253+
insn->off = off;
244254
}
245255
}
246256

@@ -774,8 +784,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
774784
*
775785
* Decode and execute eBPF instructions.
776786
*/
777-
static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
778-
u64 *stack)
787+
static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
779788
{
780789
u64 tmp;
781790
static const void *jumptable[256] = {
@@ -835,6 +844,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
835844
[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
836845
/* Call instruction */
837846
[BPF_JMP | BPF_CALL] = &&JMP_CALL,
847+
[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
838848
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
839849
/* Jumps */
840850
[BPF_JMP | BPF_JA] = &&JMP_JA,
@@ -1025,6 +1035,13 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
10251035
BPF_R4, BPF_R5);
10261036
CONT;
10271037

1038+
JMP_CALL_ARGS:
1039+
BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1040+
BPF_R3, BPF_R4,
1041+
BPF_R5,
1042+
insn + insn->off + 1);
1043+
CONT;
1044+
10281045
JMP_TAIL_CALL: {
10291046
struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
10301047
struct bpf_array *array = container_of(map, struct bpf_array, map);
@@ -1297,6 +1314,23 @@ static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn
12971314
return ___bpf_prog_run(regs, insn, stack); \
12981315
}
12991316

1317+
#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
1318+
#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
1319+
static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
1320+
const struct bpf_insn *insn) \
1321+
{ \
1322+
u64 stack[stack_size / sizeof(u64)]; \
1323+
u64 regs[MAX_BPF_REG]; \
1324+
\
1325+
FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1326+
BPF_R1 = r1; \
1327+
BPF_R2 = r2; \
1328+
BPF_R3 = r3; \
1329+
BPF_R4 = r4; \
1330+
BPF_R5 = r5; \
1331+
return ___bpf_prog_run(regs, insn, stack); \
1332+
}
1333+
13001334
#define EVAL1(FN, X) FN(X)
13011335
#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
13021336
#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
@@ -1308,6 +1342,10 @@ EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
13081342
EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
13091343
EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
13101344

1345+
EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
1346+
EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
1347+
EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
1348+
13111349
#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
13121350

13131351
static unsigned int (*interpreters[])(const void *ctx,
@@ -1316,6 +1354,24 @@ EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
13161354
EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
13171355
EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
13181356
};
1357+
#undef PROG_NAME_LIST
1358+
#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
1359+
static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
1360+
const struct bpf_insn *insn) = {
1361+
EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1362+
EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1363+
EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1364+
};
1365+
#undef PROG_NAME_LIST
1366+
1367+
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
1368+
{
1369+
stack_depth = max_t(u32, stack_depth, 1);
1370+
insn->off = (s16) insn->imm;
1371+
insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
1372+
__bpf_call_base_args;
1373+
insn->code = BPF_JMP | BPF_CALL_ARGS;
1374+
}
13191375

13201376
bool bpf_prog_array_compatible(struct bpf_array *array,
13211377
const struct bpf_prog *fp)

kernel/bpf/verifier.c

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1458,6 +1458,21 @@ static int update_stack_depth(struct bpf_verifier_env *env,
14581458
return 0;
14591459
}
14601460

1461+
static int get_callee_stack_depth(struct bpf_verifier_env *env,
1462+
const struct bpf_insn *insn, int idx)
1463+
{
1464+
int start = idx + insn->imm + 1, subprog;
1465+
1466+
subprog = find_subprog(env, start);
1467+
if (subprog < 0) {
1468+
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
1469+
start);
1470+
return -EFAULT;
1471+
}
1472+
subprog++;
1473+
return env->subprog_stack_depth[subprog];
1474+
}
1475+
14611476
/* check whether memory at (regno + off) is accessible for t = (read | write)
14621477
* if t==write, value_regno is a register which value is stored into memory
14631478
* if t==read, value_regno is a register which will receive the value from memory
@@ -4997,6 +5012,24 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
49975012
return 0;
49985013
}
49995014

5015+
static int fixup_call_args(struct bpf_verifier_env *env)
5016+
{
5017+
struct bpf_prog *prog = env->prog;
5018+
struct bpf_insn *insn = prog->insnsi;
5019+
int i, depth;
5020+
5021+
for (i = 0; i < prog->len; i++, insn++) {
5022+
if (insn->code != (BPF_JMP | BPF_CALL) ||
5023+
insn->src_reg != BPF_PSEUDO_CALL)
5024+
continue;
5025+
depth = get_callee_stack_depth(env, insn, i);
5026+
if (depth < 0)
5027+
return depth;
5028+
bpf_patch_call_args(insn, depth);
5029+
}
5030+
return 0;
5031+
}
5032+
50005033
/* fixup insn->imm field of bpf_call instructions
50015034
* and inline eligible helpers as explicit sequence of BPF instructions
50025035
*
@@ -5225,6 +5258,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
52255258
if (ret == 0)
52265259
ret = fixup_bpf_calls(env);
52275260

5261+
if (ret == 0)
5262+
ret = fixup_call_args(env);
5263+
52285264
if (log->level && bpf_verifier_log_full(log))
52295265
ret = -ENOSPC;
52305266
if (log->level && !log->ubuf) {

0 commit comments

Comments
 (0)