Skip to content

Commit d9d31cf

Browse files
fomichevAlexei Starovoitov
authored and
Alexei Starovoitov
committed
bpf: Use bpf_prog_run_array_cg_flags everywhere
Rename bpf_prog_run_array_cg_flags to bpf_prog_run_array_cg and use it everywhere. check_return_code already enforces sane return ranges for all cgroup types. (only egress and bind hooks have uncanonical return ranges, the rest is using [0, 1]) No functional changes. v2: - 'func_ret & 1' under explicit test (Andrii & Martin) Suggested-by: Alexei Starovoitov <[email protected]> Signed-off-by: Stanislav Fomichev <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
1 parent 246bdfa commit d9d31cf

File tree

2 files changed

+26
-54
lines changed

2 files changed

+26
-54
lines changed

include/linux/bpf-cgroup.h

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -225,24 +225,20 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
225225

226226
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
227227
({ \
228-
u32 __unused_flags; \
229228
int __ret = 0; \
230229
if (cgroup_bpf_enabled(atype)) \
231230
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
232-
NULL, \
233-
&__unused_flags); \
231+
NULL, NULL); \
234232
__ret; \
235233
})
236234

237235
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
238236
({ \
239-
u32 __unused_flags; \
240237
int __ret = 0; \
241238
if (cgroup_bpf_enabled(atype)) { \
242239
lock_sock(sk); \
243240
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
244-
t_ctx, \
245-
&__unused_flags); \
241+
t_ctx, NULL); \
246242
release_sock(sk); \
247243
} \
248244
__ret; \

kernel/bpf/cgroup.c

Lines changed: 24 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -25,50 +25,18 @@ EXPORT_SYMBOL(cgroup_bpf_enabled_key);
2525
/* __always_inline is necessary to prevent indirect call through run_prog
2626
* function pointer.
2727
*/
28-
static __always_inline int
29-
bpf_prog_run_array_cg_flags(const struct cgroup_bpf *cgrp,
30-
enum cgroup_bpf_attach_type atype,
31-
const void *ctx, bpf_prog_run_fn run_prog,
32-
int retval, u32 *ret_flags)
33-
{
34-
const struct bpf_prog_array_item *item;
35-
const struct bpf_prog *prog;
36-
const struct bpf_prog_array *array;
37-
struct bpf_run_ctx *old_run_ctx;
38-
struct bpf_cg_run_ctx run_ctx;
39-
u32 func_ret;
40-
41-
run_ctx.retval = retval;
42-
migrate_disable();
43-
rcu_read_lock();
44-
array = rcu_dereference(cgrp->effective[atype]);
45-
item = &array->items[0];
46-
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
47-
while ((prog = READ_ONCE(item->prog))) {
48-
run_ctx.prog_item = item;
49-
func_ret = run_prog(prog, ctx);
50-
if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval))
51-
run_ctx.retval = -EPERM;
52-
*(ret_flags) |= (func_ret >> 1);
53-
item++;
54-
}
55-
bpf_reset_run_ctx(old_run_ctx);
56-
rcu_read_unlock();
57-
migrate_enable();
58-
return run_ctx.retval;
59-
}
60-
6128
static __always_inline int
6229
bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
6330
enum cgroup_bpf_attach_type atype,
6431
const void *ctx, bpf_prog_run_fn run_prog,
65-
int retval)
32+
int retval, u32 *ret_flags)
6633
{
6734
const struct bpf_prog_array_item *item;
6835
const struct bpf_prog *prog;
6936
const struct bpf_prog_array *array;
7037
struct bpf_run_ctx *old_run_ctx;
7138
struct bpf_cg_run_ctx run_ctx;
39+
u32 func_ret;
7240

7341
run_ctx.retval = retval;
7442
migrate_disable();
@@ -78,7 +46,12 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
7846
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
7947
while ((prog = READ_ONCE(item->prog))) {
8048
run_ctx.prog_item = item;
81-
if (!run_prog(prog, ctx) && !IS_ERR_VALUE((long)run_ctx.retval))
49+
func_ret = run_prog(prog, ctx);
50+
if (ret_flags) {
51+
*(ret_flags) |= (func_ret >> 1);
52+
func_ret &= 1;
53+
}
54+
if (!func_ret && !IS_ERR_VALUE((long)run_ctx.retval))
8255
run_ctx.retval = -EPERM;
8356
item++;
8457
}
@@ -1144,9 +1117,8 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
11441117
u32 flags = 0;
11451118
bool cn;
11461119

1147-
ret = bpf_prog_run_array_cg_flags(
1148-
&cgrp->bpf, atype,
1149-
skb, __bpf_prog_run_save_cb, 0, &flags);
1120+
ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, skb,
1121+
__bpf_prog_run_save_cb, 0, &flags);
11501122

11511123
/* Return values of CGROUP EGRESS BPF programs are:
11521124
* 0: drop packet
@@ -1172,7 +1144,8 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
11721144
ret = (cn ? NET_XMIT_DROP : ret);
11731145
} else {
11741146
ret = bpf_prog_run_array_cg(&cgrp->bpf, atype,
1175-
skb, __bpf_prog_run_save_cb, 0);
1147+
skb, __bpf_prog_run_save_cb, 0,
1148+
NULL);
11761149
if (ret && !IS_ERR_VALUE((long)ret))
11771150
ret = -EFAULT;
11781151
}
@@ -1202,7 +1175,8 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
12021175
{
12031176
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
12041177

1205-
return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0);
1178+
return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0,
1179+
NULL);
12061180
}
12071181
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
12081182

@@ -1247,8 +1221,8 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
12471221
}
12481222

12491223
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1250-
return bpf_prog_run_array_cg_flags(&cgrp->bpf, atype,
1251-
&ctx, bpf_prog_run, 0, flags);
1224+
return bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run,
1225+
0, flags);
12521226
}
12531227
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
12541228

@@ -1275,7 +1249,7 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
12751249
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
12761250

12771251
return bpf_prog_run_array_cg(&cgrp->bpf, atype, sock_ops, bpf_prog_run,
1278-
0);
1252+
0, NULL);
12791253
}
12801254
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
12811255

@@ -1292,7 +1266,8 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
12921266

12931267
rcu_read_lock();
12941268
cgrp = task_dfl_cgroup(current);
1295-
ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0);
1269+
ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
1270+
NULL);
12961271
rcu_read_unlock();
12971272

12981273
return ret;
@@ -1457,7 +1432,8 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
14571432

14581433
rcu_read_lock();
14591434
cgrp = task_dfl_cgroup(current);
1460-
ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0);
1435+
ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
1436+
NULL);
14611437
rcu_read_unlock();
14621438

14631439
kfree(ctx.cur_val);
@@ -1550,7 +1526,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
15501526

15511527
lock_sock(sk);
15521528
ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_SETSOCKOPT,
1553-
&ctx, bpf_prog_run, 0);
1529+
&ctx, bpf_prog_run, 0, NULL);
15541530
release_sock(sk);
15551531

15561532
if (ret)
@@ -1650,7 +1626,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
16501626

16511627
lock_sock(sk);
16521628
ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
1653-
&ctx, bpf_prog_run, retval);
1629+
&ctx, bpf_prog_run, retval, NULL);
16541630
release_sock(sk);
16551631

16561632
if (ret < 0)
@@ -1699,7 +1675,7 @@ int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
16991675
*/
17001676

17011677
ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
1702-
&ctx, bpf_prog_run, retval);
1678+
&ctx, bpf_prog_run, retval, NULL);
17031679
if (ret < 0)
17041680
return ret;
17051681

0 commit comments

Comments
 (0)