Skip to content

Commit c4bcfb3

Browse files
yonghong-songAlexei Starovoitov
authored and
Alexei Starovoitov
committed
bpf: Implement cgroup storage available to non-cgroup-attached bpf progs
Similar to sk/inode/task storage, implement similar cgroup local storage. There already exists a local storage implementation for cgroup-attached bpf programs. See map type BPF_MAP_TYPE_CGROUP_STORAGE and helper bpf_get_local_storage(). But there are use cases such that non-cgroup attached bpf progs wants to access cgroup local storage data. For example, tc egress prog has access to sk and cgroup. It is possible to use sk local storage to emulate cgroup local storage by storing data in socket. But this is a waste as it could be lots of sockets belonging to a particular cgroup. Alternatively, a separate map can be created with cgroup id as the key. But this will introduce additional overhead to manipulate the new map. A cgroup local storage, similar to existing sk/inode/task storage, should help for this use case. The life-cycle of storage is managed with the life-cycle of the cgroup struct. i.e. the storage is destroyed along with the owning cgroup with a call to bpf_cgrp_storage_free() when cgroup itself is deleted. The userspace map operations can be done by using a cgroup fd as a key passed to the lookup, update and delete operations. Typically, the following code is used to get the current cgroup: struct task_struct *task = bpf_get_current_task_btf(); ... task->cgroups->dfl_cgrp ... and in structure task_struct definition: struct task_struct { .... struct css_set __rcu *cgroups; .... } With sleepable program, accessing task->cgroups is not protected by rcu_read_lock. So the current implementation only supports non-sleepable program and supporting sleepable program will be the next step together with adding rcu_read_lock protection for rcu tagged structures. Since map name BPF_MAP_TYPE_CGROUP_STORAGE has been used for old cgroup local storage support, the new map name BPF_MAP_TYPE_CGRP_STORAGE is used for cgroup storage available to non-cgroup-attached bpf programs. The old cgroup storage supports bpf_get_local_storage() helper to get the cgroup data. The new cgroup storage helper bpf_cgrp_storage_get() can provide similar functionality. While old cgroup storage pre-allocates storage memory, the new mechanism can also pre-allocate with a user space bpf_map_update_elem() call to avoid potential run-time memory allocation failure. Therefore, the new cgroup storage can provide all functionality w.r.t. the old one. So in uapi bpf.h, the old BPF_MAP_TYPE_CGROUP_STORAGE is alias to BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED to indicate the old cgroup storage can be deprecated since the new one can provide the same functionality. Acked-by: David Vernet <[email protected]> Signed-off-by: Yonghong Song <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent c83597f commit c4bcfb3

File tree

13 files changed

+385
-5
lines changed

13 files changed

+385
-5
lines changed

include/linux/bpf.h

+7
Original file line numberDiff line numberDiff line change
@@ -2041,6 +2041,7 @@ struct bpf_link *bpf_link_by_id(u32 id);
20412041

20422042
const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
20432043
void bpf_task_storage_free(struct task_struct *task);
2044+
void bpf_cgrp_storage_free(struct cgroup *cgroup);
20442045
bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
20452046
const struct btf_func_model *
20462047
bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
@@ -2295,6 +2296,10 @@ static inline bool has_current_bpf_ctx(void)
22952296
static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog)
22962297
{
22972298
}
2299+
2300+
static inline void bpf_cgrp_storage_free(struct cgroup *cgroup)
2301+
{
2302+
}
22982303
#endif /* CONFIG_BPF_SYSCALL */
22992304

23002305
void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
@@ -2535,6 +2540,8 @@ extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
25352540
extern const struct bpf_func_proto bpf_set_retval_proto;
25362541
extern const struct bpf_func_proto bpf_get_retval_proto;
25372542
extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto;
2543+
extern const struct bpf_func_proto bpf_cgrp_storage_get_proto;
2544+
extern const struct bpf_func_proto bpf_cgrp_storage_delete_proto;
25382545

25392546
const struct bpf_func_proto *tracing_prog_func_proto(
25402547
enum bpf_func_id func_id, const struct bpf_prog *prog);

include/linux/bpf_types.h

+1
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_PROG_ARRAY, prog_array_map_ops)
8686
BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops)
8787
#ifdef CONFIG_CGROUPS
8888
BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops)
89+
BPF_MAP_TYPE(BPF_MAP_TYPE_CGRP_STORAGE, cgrp_storage_map_ops)
8990
#endif
9091
#ifdef CONFIG_CGROUP_BPF
9192
BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops)

include/linux/cgroup-defs.h

+4
Original file line numberDiff line numberDiff line change
@@ -504,6 +504,10 @@ struct cgroup {
504504
/* Used to store internal freezer state */
505505
struct cgroup_freezer_state freezer;
506506

507+
#ifdef CONFIG_BPF_SYSCALL
508+
struct bpf_local_storage __rcu *bpf_cgrp_storage;
509+
#endif
510+
507511
/* All ancestors including self */
508512
struct cgroup *ancestors[];
509513
};

include/uapi/linux/bpf.h

+49-1
Original file line numberDiff line numberDiff line change
@@ -922,7 +922,14 @@ enum bpf_map_type {
922922
BPF_MAP_TYPE_CPUMAP,
923923
BPF_MAP_TYPE_XSKMAP,
924924
BPF_MAP_TYPE_SOCKHASH,
925-
BPF_MAP_TYPE_CGROUP_STORAGE,
925+
BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
926+
/* BPF_MAP_TYPE_CGROUP_STORAGE is available to bpf programs attaching
927+
* to a cgroup. The newer BPF_MAP_TYPE_CGRP_STORAGE is available to
928+
* both cgroup-attached and other progs and supports all functionality
929+
* provided by BPF_MAP_TYPE_CGROUP_STORAGE. So mark
930+
* BPF_MAP_TYPE_CGROUP_STORAGE deprecated.
931+
*/
932+
BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
926933
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
927934
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
928935
BPF_MAP_TYPE_QUEUE,
@@ -935,6 +942,7 @@ enum bpf_map_type {
935942
BPF_MAP_TYPE_TASK_STORAGE,
936943
BPF_MAP_TYPE_BLOOM_FILTER,
937944
BPF_MAP_TYPE_USER_RINGBUF,
945+
BPF_MAP_TYPE_CGRP_STORAGE,
938946
};
939947

940948
/* Note that tracing related programs such as
@@ -5435,6 +5443,44 @@ union bpf_attr {
54355443
* **-E2BIG** if user-space has tried to publish a sample which is
54365444
* larger than the size of the ring buffer, or which cannot fit
54375445
* within a struct bpf_dynptr.
5446+
*
5447+
* void *bpf_cgrp_storage_get(struct bpf_map *map, struct cgroup *cgroup, void *value, u64 flags)
5448+
* Description
5449+
* Get a bpf_local_storage from the *cgroup*.
5450+
*
5451+
* Logically, it could be thought of as getting the value from
5452+
* a *map* with *cgroup* as the **key**. From this
5453+
* perspective, the usage is not much different from
5454+
* **bpf_map_lookup_elem**\ (*map*, **&**\ *cgroup*) except this
5455+
* helper enforces the key must be a cgroup struct and the map must also
5456+
* be a **BPF_MAP_TYPE_CGRP_STORAGE**.
5457+
*
5458+
* In reality, the local-storage value is embedded directly inside of the
5459+
* *cgroup* object itself, rather than being located in the
5460+
* **BPF_MAP_TYPE_CGRP_STORAGE** map. When the local-storage value is
5461+
* queried for some *map* on a *cgroup* object, the kernel will perform an
5462+
* O(n) iteration over all of the live local-storage values for that
5463+
* *cgroup* object until the local-storage value for the *map* is found.
5464+
*
5465+
* An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
5466+
* used such that a new bpf_local_storage will be
5467+
* created if one does not exist. *value* can be used
5468+
* together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
5469+
* the initial value of a bpf_local_storage. If *value* is
5470+
* **NULL**, the new bpf_local_storage will be zero initialized.
5471+
* Return
5472+
* A bpf_local_storage pointer is returned on success.
5473+
*
5474+
* **NULL** if not found or there was an error in adding
5475+
* a new bpf_local_storage.
5476+
*
5477+
* long bpf_cgrp_storage_delete(struct bpf_map *map, struct cgroup *cgroup)
5478+
* Description
5479+
* Delete a bpf_local_storage from a *cgroup*.
5480+
* Return
5481+
* 0 on success.
5482+
*
5483+
* **-ENOENT** if the bpf_local_storage cannot be found.
54385484
*/
54395485
#define ___BPF_FUNC_MAPPER(FN, ctx...) \
54405486
FN(unspec, 0, ##ctx) \
@@ -5647,6 +5693,8 @@ union bpf_attr {
56475693
FN(tcp_raw_check_syncookie_ipv6, 207, ##ctx) \
56485694
FN(ktime_get_tai_ns, 208, ##ctx) \
56495695
FN(user_ringbuf_drain, 209, ##ctx) \
5696+
FN(cgrp_storage_get, 210, ##ctx) \
5697+
FN(cgrp_storage_delete, 211, ##ctx) \
56505698
/* */
56515699

56525700
/* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't

kernel/bpf/Makefile

+1-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ ifeq ($(CONFIG_PERF_EVENTS),y)
2525
obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
2626
endif
2727
ifeq ($(CONFIG_CGROUPS),y)
28-
obj-$(CONFIG_BPF_SYSCALL) += cgroup_iter.o
28+
obj-$(CONFIG_BPF_SYSCALL) += cgroup_iter.o bpf_cgrp_storage.o
2929
endif
3030
obj-$(CONFIG_CGROUP_BPF) += cgroup.o
3131
ifeq ($(CONFIG_INET),y)

kernel/bpf/bpf_cgrp_storage.c

+247
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,247 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/*
3+
* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
4+
*/
5+
6+
#include <linux/types.h>
7+
#include <linux/bpf.h>
8+
#include <linux/bpf_local_storage.h>
9+
#include <uapi/linux/btf.h>
10+
#include <linux/btf_ids.h>
11+
12+
DEFINE_BPF_STORAGE_CACHE(cgroup_cache);
13+
14+
static DEFINE_PER_CPU(int, bpf_cgrp_storage_busy);
15+
16+
static void bpf_cgrp_storage_lock(void)
17+
{
18+
migrate_disable();
19+
this_cpu_inc(bpf_cgrp_storage_busy);
20+
}
21+
22+
static void bpf_cgrp_storage_unlock(void)
23+
{
24+
this_cpu_dec(bpf_cgrp_storage_busy);
25+
migrate_enable();
26+
}
27+
28+
static bool bpf_cgrp_storage_trylock(void)
29+
{
30+
migrate_disable();
31+
if (unlikely(this_cpu_inc_return(bpf_cgrp_storage_busy) != 1)) {
32+
this_cpu_dec(bpf_cgrp_storage_busy);
33+
migrate_enable();
34+
return false;
35+
}
36+
return true;
37+
}
38+
39+
static struct bpf_local_storage __rcu **cgroup_storage_ptr(void *owner)
40+
{
41+
struct cgroup *cg = owner;
42+
43+
return &cg->bpf_cgrp_storage;
44+
}
45+
46+
void bpf_cgrp_storage_free(struct cgroup *cgroup)
47+
{
48+
struct bpf_local_storage *local_storage;
49+
bool free_cgroup_storage = false;
50+
unsigned long flags;
51+
52+
rcu_read_lock();
53+
local_storage = rcu_dereference(cgroup->bpf_cgrp_storage);
54+
if (!local_storage) {
55+
rcu_read_unlock();
56+
return;
57+
}
58+
59+
bpf_cgrp_storage_lock();
60+
raw_spin_lock_irqsave(&local_storage->lock, flags);
61+
free_cgroup_storage = bpf_local_storage_unlink_nolock(local_storage);
62+
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
63+
bpf_cgrp_storage_unlock();
64+
rcu_read_unlock();
65+
66+
if (free_cgroup_storage)
67+
kfree_rcu(local_storage, rcu);
68+
}
69+
70+
static struct bpf_local_storage_data *
71+
cgroup_storage_lookup(struct cgroup *cgroup, struct bpf_map *map, bool cacheit_lockit)
72+
{
73+
struct bpf_local_storage *cgroup_storage;
74+
struct bpf_local_storage_map *smap;
75+
76+
cgroup_storage = rcu_dereference_check(cgroup->bpf_cgrp_storage,
77+
bpf_rcu_lock_held());
78+
if (!cgroup_storage)
79+
return NULL;
80+
81+
smap = (struct bpf_local_storage_map *)map;
82+
return bpf_local_storage_lookup(cgroup_storage, smap, cacheit_lockit);
83+
}
84+
85+
static void *bpf_cgrp_storage_lookup_elem(struct bpf_map *map, void *key)
86+
{
87+
struct bpf_local_storage_data *sdata;
88+
struct cgroup *cgroup;
89+
int fd;
90+
91+
fd = *(int *)key;
92+
cgroup = cgroup_get_from_fd(fd);
93+
if (IS_ERR(cgroup))
94+
return ERR_CAST(cgroup);
95+
96+
bpf_cgrp_storage_lock();
97+
sdata = cgroup_storage_lookup(cgroup, map, true);
98+
bpf_cgrp_storage_unlock();
99+
cgroup_put(cgroup);
100+
return sdata ? sdata->data : NULL;
101+
}
102+
103+
static int bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key,
104+
void *value, u64 map_flags)
105+
{
106+
struct bpf_local_storage_data *sdata;
107+
struct cgroup *cgroup;
108+
int fd;
109+
110+
fd = *(int *)key;
111+
cgroup = cgroup_get_from_fd(fd);
112+
if (IS_ERR(cgroup))
113+
return PTR_ERR(cgroup);
114+
115+
bpf_cgrp_storage_lock();
116+
sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map,
117+
value, map_flags, GFP_ATOMIC);
118+
bpf_cgrp_storage_unlock();
119+
cgroup_put(cgroup);
120+
return PTR_ERR_OR_ZERO(sdata);
121+
}
122+
123+
static int cgroup_storage_delete(struct cgroup *cgroup, struct bpf_map *map)
124+
{
125+
struct bpf_local_storage_data *sdata;
126+
127+
sdata = cgroup_storage_lookup(cgroup, map, false);
128+
if (!sdata)
129+
return -ENOENT;
130+
131+
bpf_selem_unlink(SELEM(sdata), true);
132+
return 0;
133+
}
134+
135+
static int bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key)
136+
{
137+
struct cgroup *cgroup;
138+
int err, fd;
139+
140+
fd = *(int *)key;
141+
cgroup = cgroup_get_from_fd(fd);
142+
if (IS_ERR(cgroup))
143+
return PTR_ERR(cgroup);
144+
145+
bpf_cgrp_storage_lock();
146+
err = cgroup_storage_delete(cgroup, map);
147+
bpf_cgrp_storage_unlock();
148+
cgroup_put(cgroup);
149+
return err;
150+
}
151+
152+
static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
153+
{
154+
return -ENOTSUPP;
155+
}
156+
157+
static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
158+
{
159+
return bpf_local_storage_map_alloc(attr, &cgroup_cache);
160+
}
161+
162+
static void cgroup_storage_map_free(struct bpf_map *map)
163+
{
164+
bpf_local_storage_map_free(map, &cgroup_cache, NULL);
165+
}
166+
167+
/* *gfp_flags* is a hidden argument provided by the verifier */
168+
BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup,
169+
void *, value, u64, flags, gfp_t, gfp_flags)
170+
{
171+
struct bpf_local_storage_data *sdata;
172+
173+
WARN_ON_ONCE(!bpf_rcu_lock_held());
174+
if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE))
175+
return (unsigned long)NULL;
176+
177+
if (!cgroup)
178+
return (unsigned long)NULL;
179+
180+
if (!bpf_cgrp_storage_trylock())
181+
return (unsigned long)NULL;
182+
183+
sdata = cgroup_storage_lookup(cgroup, map, true);
184+
if (sdata)
185+
goto unlock;
186+
187+
/* only allocate new storage, when the cgroup is refcounted */
188+
if (!percpu_ref_is_dying(&cgroup->self.refcnt) &&
189+
(flags & BPF_LOCAL_STORAGE_GET_F_CREATE))
190+
sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map,
191+
value, BPF_NOEXIST, gfp_flags);
192+
193+
unlock:
194+
bpf_cgrp_storage_unlock();
195+
return IS_ERR_OR_NULL(sdata) ? (unsigned long)NULL : (unsigned long)sdata->data;
196+
}
197+
198+
BPF_CALL_2(bpf_cgrp_storage_delete, struct bpf_map *, map, struct cgroup *, cgroup)
199+
{
200+
int ret;
201+
202+
WARN_ON_ONCE(!bpf_rcu_lock_held());
203+
if (!cgroup)
204+
return -EINVAL;
205+
206+
if (!bpf_cgrp_storage_trylock())
207+
return -EBUSY;
208+
209+
ret = cgroup_storage_delete(cgroup, map);
210+
bpf_cgrp_storage_unlock();
211+
return ret;
212+
}
213+
214+
BTF_ID_LIST_SINGLE(cgroup_storage_map_btf_ids, struct, bpf_local_storage_map)
215+
const struct bpf_map_ops cgrp_storage_map_ops = {
216+
.map_meta_equal = bpf_map_meta_equal,
217+
.map_alloc_check = bpf_local_storage_map_alloc_check,
218+
.map_alloc = cgroup_storage_map_alloc,
219+
.map_free = cgroup_storage_map_free,
220+
.map_get_next_key = notsupp_get_next_key,
221+
.map_lookup_elem = bpf_cgrp_storage_lookup_elem,
222+
.map_update_elem = bpf_cgrp_storage_update_elem,
223+
.map_delete_elem = bpf_cgrp_storage_delete_elem,
224+
.map_check_btf = bpf_local_storage_map_check_btf,
225+
.map_btf_id = &cgroup_storage_map_btf_ids[0],
226+
.map_owner_storage_ptr = cgroup_storage_ptr,
227+
};
228+
229+
const struct bpf_func_proto bpf_cgrp_storage_get_proto = {
230+
.func = bpf_cgrp_storage_get,
231+
.gpl_only = false,
232+
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
233+
.arg1_type = ARG_CONST_MAP_PTR,
234+
.arg2_type = ARG_PTR_TO_BTF_ID,
235+
.arg2_btf_id = &bpf_cgroup_btf_id[0],
236+
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
237+
.arg4_type = ARG_ANYTHING,
238+
};
239+
240+
const struct bpf_func_proto bpf_cgrp_storage_delete_proto = {
241+
.func = bpf_cgrp_storage_delete,
242+
.gpl_only = false,
243+
.ret_type = RET_INTEGER,
244+
.arg1_type = ARG_CONST_MAP_PTR,
245+
.arg2_type = ARG_PTR_TO_BTF_ID,
246+
.arg2_btf_id = &bpf_cgroup_btf_id[0],
247+
};

kernel/bpf/helpers.c

+6
Original file line numberDiff line numberDiff line change
@@ -1663,6 +1663,12 @@ bpf_base_func_proto(enum bpf_func_id func_id)
16631663
return &bpf_dynptr_write_proto;
16641664
case BPF_FUNC_dynptr_data:
16651665
return &bpf_dynptr_data_proto;
1666+
#ifdef CONFIG_CGROUPS
1667+
case BPF_FUNC_cgrp_storage_get:
1668+
return &bpf_cgrp_storage_get_proto;
1669+
case BPF_FUNC_cgrp_storage_delete:
1670+
return &bpf_cgrp_storage_delete_proto;
1671+
#endif
16661672
default:
16671673
break;
16681674
}

0 commit comments

Comments
 (0)