Skip to content

Commit 0734311

Browse files
zf1575192187Alexei Starovoitov
authored and
Alexei Starovoitov
committed
bpf: add bpf_map_lookup_percpu_elem for percpu map
Add new ebpf helpers bpf_map_lookup_percpu_elem. The implementation method is relatively simple, refer to the implementation method of map_lookup_elem of percpu map, increase the parameters of cpu, and obtain it according to the specified cpu. Signed-off-by: Feng Zhou <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 571b873 commit 0734311

File tree

9 files changed

+103
-2
lines changed

9 files changed

+103
-2
lines changed

include/linux/bpf.h

+2
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@ struct bpf_map_ops {
8989
int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
9090
int (*map_pop_elem)(struct bpf_map *map, void *value);
9191
int (*map_peek_elem)(struct bpf_map *map, void *value);
92+
void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
9293

9394
/* funcs called by prog_array and perf_event_array map */
9495
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
@@ -2184,6 +2185,7 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto;
21842185
extern const struct bpf_func_proto bpf_map_push_elem_proto;
21852186
extern const struct bpf_func_proto bpf_map_pop_elem_proto;
21862187
extern const struct bpf_func_proto bpf_map_peek_elem_proto;
2188+
extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
21872189

21882190
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
21892191
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;

include/uapi/linux/bpf.h

+9
Original file line numberDiff line numberDiff line change
@@ -5164,6 +5164,14 @@ union bpf_attr {
51645164
* if not NULL, is a reference which must be released using its
51655165
* corresponding release function, or moved into a BPF map before
51665166
* program exit.
5167+
*
5168+
* void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu)
5169+
* Description
5170+
* Perform a lookup in *percpu map* for an entry associated to
5171+
* *key* on *cpu*.
5172+
* Return
5173+
* Map value associated to *key* on *cpu*, or **NULL** if no entry
5174+
* was found or *cpu* is invalid.
51675175
*/
51685176
#define __BPF_FUNC_MAPPER(FN) \
51695177
FN(unspec), \
@@ -5361,6 +5369,7 @@ union bpf_attr {
53615369
FN(skb_set_tstamp), \
53625370
FN(ima_file_hash), \
53635371
FN(kptr_xchg), \
5372+
FN(map_lookup_percpu_elem), \
53645373
/* */
53655374

53665375
/* integer value in 'imm' field of BPF_CALL instruction selects which helper

kernel/bpf/arraymap.c

+15
Original file line numberDiff line numberDiff line change
@@ -243,6 +243,20 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
243243
return this_cpu_ptr(array->pptrs[index & array->index_mask]);
244244
}
245245

246+
static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
247+
{
248+
struct bpf_array *array = container_of(map, struct bpf_array, map);
249+
u32 index = *(u32 *)key;
250+
251+
if (cpu >= nr_cpu_ids)
252+
return NULL;
253+
254+
if (unlikely(index >= array->map.max_entries))
255+
return NULL;
256+
257+
return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
258+
}
259+
246260
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
247261
{
248262
struct bpf_array *array = container_of(map, struct bpf_array, map);
@@ -725,6 +739,7 @@ const struct bpf_map_ops percpu_array_map_ops = {
725739
.map_lookup_elem = percpu_array_map_lookup_elem,
726740
.map_update_elem = array_map_update_elem,
727741
.map_delete_elem = array_map_delete_elem,
742+
.map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
728743
.map_seq_show_elem = percpu_array_map_seq_show_elem,
729744
.map_check_btf = array_map_check_btf,
730745
.map_lookup_batch = generic_map_lookup_batch,

kernel/bpf/core.c

+1
Original file line numberDiff line numberDiff line change
@@ -2619,6 +2619,7 @@ const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
26192619
const struct bpf_func_proto bpf_map_push_elem_proto __weak;
26202620
const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
26212621
const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2622+
const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
26222623
const struct bpf_func_proto bpf_spin_lock_proto __weak;
26232624
const struct bpf_func_proto bpf_spin_unlock_proto __weak;
26242625
const struct bpf_func_proto bpf_jiffies64_proto __weak;

kernel/bpf/hashtab.c

+32
Original file line numberDiff line numberDiff line change
@@ -2199,6 +2199,20 @@ static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
21992199
return NULL;
22002200
}
22012201

2202+
static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2203+
{
2204+
struct htab_elem *l;
2205+
2206+
if (cpu >= nr_cpu_ids)
2207+
return NULL;
2208+
2209+
l = __htab_map_lookup_elem(map, key);
2210+
if (l)
2211+
return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2212+
else
2213+
return NULL;
2214+
}
2215+
22022216
static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
22032217
{
22042218
struct htab_elem *l = __htab_map_lookup_elem(map, key);
@@ -2211,6 +2225,22 @@ static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
22112225
return NULL;
22122226
}
22132227

2228+
static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
2229+
{
2230+
struct htab_elem *l;
2231+
2232+
if (cpu >= nr_cpu_ids)
2233+
return NULL;
2234+
2235+
l = __htab_map_lookup_elem(map, key);
2236+
if (l) {
2237+
bpf_lru_node_set_ref(&l->lru_node);
2238+
return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
2239+
}
2240+
2241+
return NULL;
2242+
}
2243+
22142244
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
22152245
{
22162246
struct htab_elem *l;
@@ -2300,6 +2330,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
23002330
.map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem,
23012331
.map_update_elem = htab_percpu_map_update_elem,
23022332
.map_delete_elem = htab_map_delete_elem,
2333+
.map_lookup_percpu_elem = htab_percpu_map_lookup_percpu_elem,
23032334
.map_seq_show_elem = htab_percpu_map_seq_show_elem,
23042335
.map_set_for_each_callback_args = map_set_for_each_callback_args,
23052336
.map_for_each_callback = bpf_for_each_hash_elem,
@@ -2318,6 +2349,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
23182349
.map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem,
23192350
.map_update_elem = htab_lru_percpu_map_update_elem,
23202351
.map_delete_elem = htab_lru_map_delete_elem,
2352+
.map_lookup_percpu_elem = htab_lru_percpu_map_lookup_percpu_elem,
23212353
.map_seq_show_elem = htab_percpu_map_seq_show_elem,
23222354
.map_set_for_each_callback_args = map_set_for_each_callback_args,
23232355
.map_for_each_callback = bpf_for_each_hash_elem,

kernel/bpf/helpers.c

+18
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,22 @@ const struct bpf_func_proto bpf_map_peek_elem_proto = {
119119
.arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
120120
};
121121

122+
BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
123+
{
124+
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
125+
return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
126+
}
127+
128+
const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = {
129+
.func = bpf_map_lookup_percpu_elem,
130+
.gpl_only = false,
131+
.pkt_access = true,
132+
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
133+
.arg1_type = ARG_CONST_MAP_PTR,
134+
.arg2_type = ARG_PTR_TO_MAP_KEY,
135+
.arg3_type = ARG_ANYTHING,
136+
};
137+
122138
const struct bpf_func_proto bpf_get_prandom_u32_proto = {
123139
.func = bpf_user_rnd_u32,
124140
.gpl_only = false,
@@ -1420,6 +1436,8 @@ bpf_base_func_proto(enum bpf_func_id func_id)
14201436
return &bpf_map_pop_elem_proto;
14211437
case BPF_FUNC_map_peek_elem:
14221438
return &bpf_map_peek_elem_proto;
1439+
case BPF_FUNC_map_lookup_percpu_elem:
1440+
return &bpf_map_lookup_percpu_elem_proto;
14231441
case BPF_FUNC_get_prandom_u32:
14241442
return &bpf_get_prandom_u32_proto;
14251443
case BPF_FUNC_get_smp_processor_id:

kernel/bpf/verifier.c

+15-2
Original file line numberDiff line numberDiff line change
@@ -6137,6 +6137,12 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
61376137
map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
61386138
goto error;
61396139
break;
6140+
case BPF_FUNC_map_lookup_percpu_elem:
6141+
if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
6142+
map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
6143+
map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH)
6144+
goto error;
6145+
break;
61406146
case BPF_FUNC_sk_storage_get:
61416147
case BPF_FUNC_sk_storage_delete:
61426148
if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
@@ -6750,7 +6756,8 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
67506756
func_id != BPF_FUNC_map_pop_elem &&
67516757
func_id != BPF_FUNC_map_peek_elem &&
67526758
func_id != BPF_FUNC_for_each_map_elem &&
6753-
func_id != BPF_FUNC_redirect_map)
6759+
func_id != BPF_FUNC_redirect_map &&
6760+
func_id != BPF_FUNC_map_lookup_percpu_elem)
67546761
return 0;
67556762

67566763
if (map == NULL) {
@@ -13810,7 +13817,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
1381013817
insn->imm == BPF_FUNC_map_pop_elem ||
1381113818
insn->imm == BPF_FUNC_map_peek_elem ||
1381213819
insn->imm == BPF_FUNC_redirect_map ||
13813-
insn->imm == BPF_FUNC_for_each_map_elem)) {
13820+
insn->imm == BPF_FUNC_for_each_map_elem ||
13821+
insn->imm == BPF_FUNC_map_lookup_percpu_elem)) {
1381413822
aux = &env->insn_aux_data[i + delta];
1381513823
if (bpf_map_ptr_poisoned(aux))
1381613824
goto patch_call_imm;
@@ -13859,6 +13867,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
1385913867
bpf_callback_t callback_fn,
1386013868
void *callback_ctx,
1386113869
u64 flags))NULL));
13870+
BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem,
13871+
(void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL));
1386213872

1386313873
patch_map_ops_generic:
1386413874
switch (insn->imm) {
@@ -13886,6 +13896,9 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
1388613896
case BPF_FUNC_for_each_map_elem:
1388713897
insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
1388813898
continue;
13899+
case BPF_FUNC_map_lookup_percpu_elem:
13900+
insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem);
13901+
continue;
1388913902
}
1389013903

1389113904
goto patch_call_imm;

kernel/trace/bpf_trace.c

+2
Original file line numberDiff line numberDiff line change
@@ -1197,6 +1197,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
11971197
return &bpf_map_pop_elem_proto;
11981198
case BPF_FUNC_map_peek_elem:
11991199
return &bpf_map_peek_elem_proto;
1200+
case BPF_FUNC_map_lookup_percpu_elem:
1201+
return &bpf_map_lookup_percpu_elem_proto;
12001202
case BPF_FUNC_ktime_get_ns:
12011203
return &bpf_ktime_get_ns_proto;
12021204
case BPF_FUNC_ktime_get_boot_ns:

tools/include/uapi/linux/bpf.h

+9
Original file line numberDiff line numberDiff line change
@@ -5164,6 +5164,14 @@ union bpf_attr {
51645164
* if not NULL, is a reference which must be released using its
51655165
* corresponding release function, or moved into a BPF map before
51665166
* program exit.
5167+
*
5168+
* void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu)
5169+
* Description
5170+
* Perform a lookup in *percpu map* for an entry associated to
5171+
* *key* on *cpu*.
5172+
* Return
5173+
* Map value associated to *key* on *cpu*, or **NULL** if no entry
5174+
* was found or *cpu* is invalid.
51675175
*/
51685176
#define __BPF_FUNC_MAPPER(FN) \
51695177
FN(unspec), \
@@ -5361,6 +5369,7 @@ union bpf_attr {
53615369
FN(skb_set_tstamp), \
53625370
FN(ima_file_hash), \
53635371
FN(kptr_xchg), \
5372+
FN(map_lookup_percpu_elem), \
53645373
/* */
53655374

53665375
/* integer value in 'imm' field of BPF_CALL instruction selects which helper

0 commit comments

Comments
 (0)