@@ -25,50 +25,18 @@ EXPORT_SYMBOL(cgroup_bpf_enabled_key);
25
25
/* __always_inline is necessary to prevent indirect call through run_prog
26
26
* function pointer.
27
27
*/
28
- static __always_inline int
29
- bpf_prog_run_array_cg_flags (const struct cgroup_bpf * cgrp ,
30
- enum cgroup_bpf_attach_type atype ,
31
- const void * ctx , bpf_prog_run_fn run_prog ,
32
- int retval , u32 * ret_flags )
33
- {
34
- const struct bpf_prog_array_item * item ;
35
- const struct bpf_prog * prog ;
36
- const struct bpf_prog_array * array ;
37
- struct bpf_run_ctx * old_run_ctx ;
38
- struct bpf_cg_run_ctx run_ctx ;
39
- u32 func_ret ;
40
-
41
- run_ctx .retval = retval ;
42
- migrate_disable ();
43
- rcu_read_lock ();
44
- array = rcu_dereference (cgrp -> effective [atype ]);
45
- item = & array -> items [0 ];
46
- old_run_ctx = bpf_set_run_ctx (& run_ctx .run_ctx );
47
- while ((prog = READ_ONCE (item -> prog ))) {
48
- run_ctx .prog_item = item ;
49
- func_ret = run_prog (prog , ctx );
50
- if (!(func_ret & 1 ) && !IS_ERR_VALUE ((long )run_ctx .retval ))
51
- run_ctx .retval = - EPERM ;
52
- * (ret_flags ) |= (func_ret >> 1 );
53
- item ++ ;
54
- }
55
- bpf_reset_run_ctx (old_run_ctx );
56
- rcu_read_unlock ();
57
- migrate_enable ();
58
- return run_ctx .retval ;
59
- }
60
-
61
28
static __always_inline int
62
29
bpf_prog_run_array_cg (const struct cgroup_bpf * cgrp ,
63
30
enum cgroup_bpf_attach_type atype ,
64
31
const void * ctx , bpf_prog_run_fn run_prog ,
65
- int retval )
32
+ int retval , u32 * ret_flags )
66
33
{
67
34
const struct bpf_prog_array_item * item ;
68
35
const struct bpf_prog * prog ;
69
36
const struct bpf_prog_array * array ;
70
37
struct bpf_run_ctx * old_run_ctx ;
71
38
struct bpf_cg_run_ctx run_ctx ;
39
+ u32 func_ret ;
72
40
73
41
run_ctx .retval = retval ;
74
42
migrate_disable ();
@@ -78,7 +46,12 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
78
46
old_run_ctx = bpf_set_run_ctx (& run_ctx .run_ctx );
79
47
while ((prog = READ_ONCE (item -> prog ))) {
80
48
run_ctx .prog_item = item ;
81
- if (!run_prog (prog , ctx ) && !IS_ERR_VALUE ((long )run_ctx .retval ))
49
+ func_ret = run_prog (prog , ctx );
50
+ if (ret_flags ) {
51
+ * (ret_flags ) |= (func_ret >> 1 );
52
+ func_ret &= 1 ;
53
+ }
54
+ if (!func_ret && !IS_ERR_VALUE ((long )run_ctx .retval ))
82
55
run_ctx .retval = - EPERM ;
83
56
item ++ ;
84
57
}
@@ -1144,9 +1117,8 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
1144
1117
u32 flags = 0 ;
1145
1118
bool cn ;
1146
1119
1147
- ret = bpf_prog_run_array_cg_flags (
1148
- & cgrp -> bpf , atype ,
1149
- skb , __bpf_prog_run_save_cb , 0 , & flags );
1120
+ ret = bpf_prog_run_array_cg (& cgrp -> bpf , atype , skb ,
1121
+ __bpf_prog_run_save_cb , 0 , & flags );
1150
1122
1151
1123
/* Return values of CGROUP EGRESS BPF programs are:
1152
1124
* 0: drop packet
@@ -1172,7 +1144,8 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
1172
1144
ret = (cn ? NET_XMIT_DROP : ret );
1173
1145
} else {
1174
1146
ret = bpf_prog_run_array_cg (& cgrp -> bpf , atype ,
1175
- skb , __bpf_prog_run_save_cb , 0 );
1147
+ skb , __bpf_prog_run_save_cb , 0 ,
1148
+ NULL );
1176
1149
if (ret && !IS_ERR_VALUE ((long )ret ))
1177
1150
ret = - EFAULT ;
1178
1151
}
@@ -1202,7 +1175,8 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
1202
1175
{
1203
1176
struct cgroup * cgrp = sock_cgroup_ptr (& sk -> sk_cgrp_data );
1204
1177
1205
- return bpf_prog_run_array_cg (& cgrp -> bpf , atype , sk , bpf_prog_run , 0 );
1178
+ return bpf_prog_run_array_cg (& cgrp -> bpf , atype , sk , bpf_prog_run , 0 ,
1179
+ NULL );
1206
1180
}
1207
1181
EXPORT_SYMBOL (__cgroup_bpf_run_filter_sk );
1208
1182
@@ -1247,8 +1221,8 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
1247
1221
}
1248
1222
1249
1223
cgrp = sock_cgroup_ptr (& sk -> sk_cgrp_data );
1250
- return bpf_prog_run_array_cg_flags (& cgrp -> bpf , atype ,
1251
- & ctx , bpf_prog_run , 0 , flags );
1224
+ return bpf_prog_run_array_cg (& cgrp -> bpf , atype , & ctx , bpf_prog_run ,
1225
+ 0 , flags );
1252
1226
}
1253
1227
EXPORT_SYMBOL (__cgroup_bpf_run_filter_sock_addr );
1254
1228
@@ -1275,7 +1249,7 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
1275
1249
struct cgroup * cgrp = sock_cgroup_ptr (& sk -> sk_cgrp_data );
1276
1250
1277
1251
return bpf_prog_run_array_cg (& cgrp -> bpf , atype , sock_ops , bpf_prog_run ,
1278
- 0 );
1252
+ 0 , NULL );
1279
1253
}
1280
1254
EXPORT_SYMBOL (__cgroup_bpf_run_filter_sock_ops );
1281
1255
@@ -1292,7 +1266,8 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
1292
1266
1293
1267
rcu_read_lock ();
1294
1268
cgrp = task_dfl_cgroup (current );
1295
- ret = bpf_prog_run_array_cg (& cgrp -> bpf , atype , & ctx , bpf_prog_run , 0 );
1269
+ ret = bpf_prog_run_array_cg (& cgrp -> bpf , atype , & ctx , bpf_prog_run , 0 ,
1270
+ NULL );
1296
1271
rcu_read_unlock ();
1297
1272
1298
1273
return ret ;
@@ -1457,7 +1432,8 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
1457
1432
1458
1433
rcu_read_lock ();
1459
1434
cgrp = task_dfl_cgroup (current );
1460
- ret = bpf_prog_run_array_cg (& cgrp -> bpf , atype , & ctx , bpf_prog_run , 0 );
1435
+ ret = bpf_prog_run_array_cg (& cgrp -> bpf , atype , & ctx , bpf_prog_run , 0 ,
1436
+ NULL );
1461
1437
rcu_read_unlock ();
1462
1438
1463
1439
kfree (ctx .cur_val );
@@ -1550,7 +1526,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
1550
1526
1551
1527
lock_sock (sk );
1552
1528
ret = bpf_prog_run_array_cg (& cgrp -> bpf , CGROUP_SETSOCKOPT ,
1553
- & ctx , bpf_prog_run , 0 );
1529
+ & ctx , bpf_prog_run , 0 , NULL );
1554
1530
release_sock (sk );
1555
1531
1556
1532
if (ret )
@@ -1650,7 +1626,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1650
1626
1651
1627
lock_sock (sk );
1652
1628
ret = bpf_prog_run_array_cg (& cgrp -> bpf , CGROUP_GETSOCKOPT ,
1653
- & ctx , bpf_prog_run , retval );
1629
+ & ctx , bpf_prog_run , retval , NULL );
1654
1630
release_sock (sk );
1655
1631
1656
1632
if (ret < 0 )
@@ -1699,7 +1675,7 @@ int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
1699
1675
*/
1700
1676
1701
1677
ret = bpf_prog_run_array_cg (& cgrp -> bpf , CGROUP_GETSOCKOPT ,
1702
- & ctx , bpf_prog_run , retval );
1678
+ & ctx , bpf_prog_run , retval , NULL );
1703
1679
if (ret < 0 )
1704
1680
return ret ;
1705
1681
0 commit comments