Skip to content

Commit 96eabe7

Browse files
iamkafaidavem330
authored andcommitted
bpf: Allow selecting numa node during map creation
The current map creation API does not allow to provide the numa-node preference. The memory usually comes from where the map-creation-process is running. The performance is not ideal if the bpf_prog is known to always run in a numa node different from the map-creation-process. One of the use case is sharding on CPU to different LRU maps (i.e. an array of LRU maps). Here is the test result of map_perf_test on the INNER_LRU_HASH_PREALLOC test if we force the lru map used by CPU0 to be allocated from a remote numa node: [ The machine has 20 cores. CPU0-9 at node 0. CPU10-19 at node 1 ] ># taskset -c 10 ./map_perf_test 512 8 1260000 8000000 5:inner_lru_hash_map_perf pre-alloc 1628380 events per sec 4:inner_lru_hash_map_perf pre-alloc 1626396 events per sec 3:inner_lru_hash_map_perf pre-alloc 1626144 events per sec 6:inner_lru_hash_map_perf pre-alloc 1621657 events per sec 2:inner_lru_hash_map_perf pre-alloc 1621534 events per sec 1:inner_lru_hash_map_perf pre-alloc 1620292 events per sec 7:inner_lru_hash_map_perf pre-alloc 1613305 events per sec 0:inner_lru_hash_map_perf pre-alloc 1239150 events per sec #<<< After specifying numa node: ># taskset -c 10 ./map_perf_test 512 8 1260000 8000000 5:inner_lru_hash_map_perf pre-alloc 1629627 events per sec 3:inner_lru_hash_map_perf pre-alloc 1628057 events per sec 1:inner_lru_hash_map_perf pre-alloc 1623054 events per sec 6:inner_lru_hash_map_perf pre-alloc 1616033 events per sec 2:inner_lru_hash_map_perf pre-alloc 1614630 events per sec 4:inner_lru_hash_map_perf pre-alloc 1612651 events per sec 7:inner_lru_hash_map_perf pre-alloc 1609337 events per sec 0:inner_lru_hash_map_perf pre-alloc 1619340 events per sec #<<< This patch adds one field, numa_node, to the bpf_attr. Since numa node 0 is a valid node, a new flag BPF_F_NUMA_NODE is also added. The numa_node field is honored if and only if the BPF_F_NUMA_NODE flag is set. Numa node selection is not supported for percpu map. This patch does not change all the kmalloc. F.e. 'htab = kzalloc()' is not changed since the object is small enough to stay in the cache. Signed-off-by: Martin KaFai Lau <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Acked-by: Alexei Starovoitov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent bd76b87 commit 96eabe7

File tree

9 files changed

+73
-23
lines changed

9 files changed

+73
-23
lines changed

Diff for: include/linux/bpf.h

+9-1
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ struct bpf_map {
5151
u32 map_flags;
5252
u32 pages;
5353
u32 id;
54+
int numa_node;
5455
struct user_struct *user;
5556
const struct bpf_map_ops *ops;
5657
struct work_struct work;
@@ -264,7 +265,7 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
264265
void bpf_map_put_with_uref(struct bpf_map *map);
265266
void bpf_map_put(struct bpf_map *map);
266267
int bpf_map_precharge_memlock(u32 pages);
267-
void *bpf_map_area_alloc(size_t size);
268+
void *bpf_map_area_alloc(size_t size, int numa_node);
268269
void bpf_map_area_free(void *base);
269270

270271
extern int sysctl_unprivileged_bpf_disabled;
@@ -316,6 +317,13 @@ struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
316317
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
317318
void __dev_map_flush(struct bpf_map *map);
318319

320+
/* Return map's numa specified by userspace */
321+
static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
322+
{
323+
return (attr->map_flags & BPF_F_NUMA_NODE) ?
324+
attr->numa_node : NUMA_NO_NODE;
325+
}
326+
319327
#else
320328
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
321329
{

Diff for: include/uapi/linux/bpf.h

+9-1
Original file line numberDiff line numberDiff line change
@@ -165,6 +165,7 @@ enum bpf_attach_type {
165165
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
166166
#define BPF_EXIST 2 /* update existing element */
167167

168+
/* flags for BPF_MAP_CREATE command */
168169
#define BPF_F_NO_PREALLOC (1U << 0)
169170
/* Instead of having one common LRU list in the
170171
* BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
@@ -173,15 +174,22 @@ enum bpf_attach_type {
173174
* across different LRU lists.
174175
*/
175176
#define BPF_F_NO_COMMON_LRU (1U << 1)
177+
/* Specify numa node during map creation */
178+
#define BPF_F_NUMA_NODE (1U << 2)
176179

177180
union bpf_attr {
178181
struct { /* anonymous struct used by BPF_MAP_CREATE command */
179182
__u32 map_type; /* one of enum bpf_map_type */
180183
__u32 key_size; /* size of key in bytes */
181184
__u32 value_size; /* size of value in bytes */
182185
__u32 max_entries; /* max number of entries in a map */
183-
__u32 map_flags; /* prealloc or not */
186+
__u32 map_flags; /* BPF_MAP_CREATE related
187+
* flags defined above.
188+
*/
184189
__u32 inner_map_fd; /* fd pointing to the inner map */
190+
__u32 numa_node; /* numa node (effective only if
191+
* BPF_F_NUMA_NODE is set).
192+
*/
185193
};
186194

187195
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */

Diff for: kernel/bpf/arraymap.c

+5-2
Original file line numberDiff line numberDiff line change
@@ -49,13 +49,15 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
4949
static struct bpf_map *array_map_alloc(union bpf_attr *attr)
5050
{
5151
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
52+
int numa_node = bpf_map_attr_numa_node(attr);
5253
struct bpf_array *array;
5354
u64 array_size;
5455
u32 elem_size;
5556

5657
/* check sanity of attributes */
5758
if (attr->max_entries == 0 || attr->key_size != 4 ||
58-
attr->value_size == 0 || attr->map_flags)
59+
attr->value_size == 0 || attr->map_flags & ~BPF_F_NUMA_NODE ||
60+
(percpu && numa_node != NUMA_NO_NODE))
5961
return ERR_PTR(-EINVAL);
6062

6163
if (attr->value_size > KMALLOC_MAX_SIZE)
@@ -77,7 +79,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
7779
return ERR_PTR(-ENOMEM);
7880

7981
/* allocate all map elements and zero-initialize them */
80-
array = bpf_map_area_alloc(array_size);
82+
array = bpf_map_area_alloc(array_size, numa_node);
8183
if (!array)
8284
return ERR_PTR(-ENOMEM);
8385

@@ -87,6 +89,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
8789
array->map.value_size = attr->value_size;
8890
array->map.max_entries = attr->max_entries;
8991
array->map.map_flags = attr->map_flags;
92+
array->map.numa_node = numa_node;
9093
array->elem_size = elem_size;
9194

9295
if (!percpu)

Diff for: kernel/bpf/devmap.c

+6-3
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
8080

8181
/* check sanity of attributes */
8282
if (attr->max_entries == 0 || attr->key_size != 4 ||
83-
attr->value_size != 4 || attr->map_flags)
83+
attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
8484
return ERR_PTR(-EINVAL);
8585

8686
dtab = kzalloc(sizeof(*dtab), GFP_USER);
@@ -93,6 +93,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
9393
dtab->map.value_size = attr->value_size;
9494
dtab->map.max_entries = attr->max_entries;
9595
dtab->map.map_flags = attr->map_flags;
96+
dtab->map.numa_node = bpf_map_attr_numa_node(attr);
9697

9798
err = -ENOMEM;
9899

@@ -119,7 +120,8 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
119120
goto free_dtab;
120121

121122
dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
122-
sizeof(struct bpf_dtab_netdev *));
123+
sizeof(struct bpf_dtab_netdev *),
124+
dtab->map.numa_node);
123125
if (!dtab->netdev_map)
124126
goto free_dtab;
125127

@@ -344,7 +346,8 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
344346
if (!ifindex) {
345347
dev = NULL;
346348
} else {
347-
dev = kmalloc(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN);
349+
dev = kmalloc_node(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN,
350+
map->numa_node);
348351
if (!dev)
349352
return -ENOMEM;
350353

Diff for: kernel/bpf/hashtab.c

+15-4
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,9 @@
1818
#include "bpf_lru_list.h"
1919
#include "map_in_map.h"
2020

21+
#define HTAB_CREATE_FLAG_MASK \
22+
(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE)
23+
2124
struct bucket {
2225
struct hlist_nulls_head head;
2326
raw_spinlock_t lock;
@@ -138,7 +141,8 @@ static int prealloc_init(struct bpf_htab *htab)
138141
if (!htab_is_percpu(htab) && !htab_is_lru(htab))
139142
num_entries += num_possible_cpus();
140143

141-
htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries);
144+
htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries,
145+
htab->map.numa_node);
142146
if (!htab->elems)
143147
return -ENOMEM;
144148

@@ -233,6 +237,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
233237
*/
234238
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
235239
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
240+
int numa_node = bpf_map_attr_numa_node(attr);
236241
struct bpf_htab *htab;
237242
int err, i;
238243
u64 cost;
@@ -248,7 +253,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
248253
*/
249254
return ERR_PTR(-EPERM);
250255

251-
if (attr->map_flags & ~(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU))
256+
if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
252257
/* reserved bits should not be used */
253258
return ERR_PTR(-EINVAL);
254259

@@ -258,6 +263,9 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
258263
if (lru && !prealloc)
259264
return ERR_PTR(-ENOTSUPP);
260265

266+
if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
267+
return ERR_PTR(-EINVAL);
268+
261269
htab = kzalloc(sizeof(*htab), GFP_USER);
262270
if (!htab)
263271
return ERR_PTR(-ENOMEM);
@@ -268,6 +276,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
268276
htab->map.value_size = attr->value_size;
269277
htab->map.max_entries = attr->max_entries;
270278
htab->map.map_flags = attr->map_flags;
279+
htab->map.numa_node = numa_node;
271280

272281
/* check sanity of attributes.
273282
* value_size == 0 may be allowed in the future to use map as a set
@@ -346,7 +355,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
346355

347356
err = -ENOMEM;
348357
htab->buckets = bpf_map_area_alloc(htab->n_buckets *
349-
sizeof(struct bucket));
358+
sizeof(struct bucket),
359+
htab->map.numa_node);
350360
if (!htab->buckets)
351361
goto free_htab;
352362

@@ -689,7 +699,8 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
689699
atomic_dec(&htab->count);
690700
return ERR_PTR(-E2BIG);
691701
}
692-
l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
702+
l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
703+
htab->map.numa_node);
693704
if (!l_new)
694705
return ERR_PTR(-ENOMEM);
695706
}

Diff for: kernel/bpf/lpm_trie.c

+7-2
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,8 @@ static struct lpm_trie_node *lpm_trie_node_alloc(const struct lpm_trie *trie,
244244
if (value)
245245
size += trie->map.value_size;
246246

247-
node = kmalloc(size, GFP_ATOMIC | __GFP_NOWARN);
247+
node = kmalloc_node(size, GFP_ATOMIC | __GFP_NOWARN,
248+
trie->map.numa_node);
248249
if (!node)
249250
return NULL;
250251

@@ -405,6 +406,8 @@ static int trie_delete_elem(struct bpf_map *map, void *key)
405406
#define LPM_KEY_SIZE_MAX LPM_KEY_SIZE(LPM_DATA_SIZE_MAX)
406407
#define LPM_KEY_SIZE_MIN LPM_KEY_SIZE(LPM_DATA_SIZE_MIN)
407408

409+
#define LPM_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_NUMA_NODE)
410+
408411
static struct bpf_map *trie_alloc(union bpf_attr *attr)
409412
{
410413
struct lpm_trie *trie;
@@ -416,7 +419,8 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
416419

417420
/* check sanity of attributes */
418421
if (attr->max_entries == 0 ||
419-
attr->map_flags != BPF_F_NO_PREALLOC ||
422+
!(attr->map_flags & BPF_F_NO_PREALLOC) ||
423+
attr->map_flags & ~LPM_CREATE_FLAG_MASK ||
420424
attr->key_size < LPM_KEY_SIZE_MIN ||
421425
attr->key_size > LPM_KEY_SIZE_MAX ||
422426
attr->value_size < LPM_VAL_SIZE_MIN ||
@@ -433,6 +437,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
433437
trie->map.value_size = attr->value_size;
434438
trie->map.max_entries = attr->max_entries;
435439
trie->map.map_flags = attr->map_flags;
440+
trie->map.numa_node = bpf_map_attr_numa_node(attr);
436441
trie->data_size = attr->key_size -
437442
offsetof(struct bpf_lpm_trie_key, data);
438443
trie->max_prefixlen = trie->data_size * 8;

Diff for: kernel/bpf/sockmap.c

+7-3
Original file line numberDiff line numberDiff line change
@@ -443,7 +443,9 @@ static struct smap_psock *smap_init_psock(struct sock *sock,
443443
{
444444
struct smap_psock *psock;
445445

446-
psock = kzalloc(sizeof(struct smap_psock), GFP_ATOMIC | __GFP_NOWARN);
446+
psock = kzalloc_node(sizeof(struct smap_psock),
447+
GFP_ATOMIC | __GFP_NOWARN,
448+
stab->map.numa_node);
447449
if (!psock)
448450
return ERR_PTR(-ENOMEM);
449451

@@ -465,7 +467,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
465467

466468
/* check sanity of attributes */
467469
if (attr->max_entries == 0 || attr->key_size != 4 ||
468-
attr->value_size != 4 || attr->map_flags)
470+
attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
469471
return ERR_PTR(-EINVAL);
470472

471473
if (attr->value_size > KMALLOC_MAX_SIZE)
@@ -481,6 +483,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
481483
stab->map.value_size = attr->value_size;
482484
stab->map.max_entries = attr->max_entries;
483485
stab->map.map_flags = attr->map_flags;
486+
stab->map.numa_node = bpf_map_attr_numa_node(attr);
484487

485488
/* make sure page count doesn't overflow */
486489
cost = (u64) stab->map.max_entries * sizeof(struct sock *);
@@ -495,7 +498,8 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
495498
goto free_stab;
496499

497500
stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
498-
sizeof(struct sock *));
501+
sizeof(struct sock *),
502+
stab->map.numa_node);
499503
if (!stab->sock_map)
500504
goto free_stab;
501505

Diff for: kernel/bpf/stackmap.c

+5-3
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,8 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
3131
u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
3232
int err;
3333

34-
smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries);
34+
smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
35+
smap->map.numa_node);
3536
if (!smap->elems)
3637
return -ENOMEM;
3738

@@ -59,7 +60,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
5960
if (!capable(CAP_SYS_ADMIN))
6061
return ERR_PTR(-EPERM);
6162

62-
if (attr->map_flags)
63+
if (attr->map_flags & ~BPF_F_NUMA_NODE)
6364
return ERR_PTR(-EINVAL);
6465

6566
/* check sanity of attributes */
@@ -75,7 +76,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
7576
if (cost >= U32_MAX - PAGE_SIZE)
7677
return ERR_PTR(-E2BIG);
7778

78-
smap = bpf_map_area_alloc(cost);
79+
smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
7980
if (!smap)
8081
return ERR_PTR(-ENOMEM);
8182

@@ -91,6 +92,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
9192
smap->map.map_flags = attr->map_flags;
9293
smap->n_buckets = n_buckets;
9394
smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
95+
smap->map.numa_node = bpf_map_attr_numa_node(attr);
9496

9597
err = bpf_map_precharge_memlock(smap->map.pages);
9698
if (err)

Diff for: kernel/bpf/syscall.c

+10-4
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
105105
return map;
106106
}
107107

108-
void *bpf_map_area_alloc(size_t size)
108+
void *bpf_map_area_alloc(size_t size, int numa_node)
109109
{
110110
/* We definitely need __GFP_NORETRY, so OOM killer doesn't
111111
* trigger under memory pressure as we really just want to
@@ -115,12 +115,13 @@ void *bpf_map_area_alloc(size_t size)
115115
void *area;
116116

117117
if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
118-
area = kmalloc(size, GFP_USER | flags);
118+
area = kmalloc_node(size, GFP_USER | flags, numa_node);
119119
if (area != NULL)
120120
return area;
121121
}
122122

123-
return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL);
123+
return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags,
124+
__builtin_return_address(0));
124125
}
125126

126127
void bpf_map_area_free(void *area)
@@ -309,17 +310,22 @@ int bpf_map_new_fd(struct bpf_map *map)
309310
offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
310311
sizeof(attr->CMD##_LAST_FIELD)) != NULL
311312

312-
#define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
313+
#define BPF_MAP_CREATE_LAST_FIELD numa_node
313314
/* called via syscall */
314315
static int map_create(union bpf_attr *attr)
315316
{
317+
int numa_node = bpf_map_attr_numa_node(attr);
316318
struct bpf_map *map;
317319
int err;
318320

319321
err = CHECK_ATTR(BPF_MAP_CREATE);
320322
if (err)
321323
return -EINVAL;
322324

325+
if (numa_node != NUMA_NO_NODE &&
326+
(numa_node >= nr_node_ids || !node_online(numa_node)))
327+
return -EINVAL;
328+
323329
/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
324330
map = find_and_alloc_map(attr);
325331
if (IS_ERR(map))

0 commit comments

Comments
 (0)