6
6
#include <linux/types.h>
7
7
#include <linux/spinlock.h>
8
8
#include <linux/bpf.h>
9
+ #include <linux/btf.h>
9
10
#include <linux/btf_ids.h>
10
11
#include <linux/bpf_local_storage.h>
11
12
#include <net/bpf_sk_storage.h>
15
16
16
17
DEFINE_BPF_STORAGE_CACHE (sk_cache );
17
18
18
- static int omem_charge (struct sock * sk , unsigned int size )
19
- {
20
- /* same check as in sock_kmalloc() */
21
- if (size <= sysctl_optmem_max &&
22
- atomic_read (& sk -> sk_omem_alloc ) + size < sysctl_optmem_max ) {
23
- atomic_add (size , & sk -> sk_omem_alloc );
24
- return 0 ;
25
- }
26
-
27
- return - ENOMEM ;
28
- }
29
-
30
19
static struct bpf_local_storage_data *
31
- sk_storage_lookup (struct sock * sk , struct bpf_map * map , bool cacheit_lockit )
20
+ bpf_sk_storage_lookup (struct sock * sk , struct bpf_map * map , bool cacheit_lockit )
32
21
{
33
22
struct bpf_local_storage * sk_storage ;
34
23
struct bpf_local_storage_map * smap ;
@@ -41,11 +30,11 @@ sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
41
30
return bpf_local_storage_lookup (sk_storage , smap , cacheit_lockit );
42
31
}
43
32
44
- static int sk_storage_delete (struct sock * sk , struct bpf_map * map )
33
+ static int bpf_sk_storage_del (struct sock * sk , struct bpf_map * map )
45
34
{
46
35
struct bpf_local_storage_data * sdata ;
47
36
48
- sdata = sk_storage_lookup (sk , map , false);
37
+ sdata = bpf_sk_storage_lookup (sk , map , false);
49
38
if (!sdata )
50
39
return - ENOENT ;
51
40
@@ -94,7 +83,7 @@ void bpf_sk_storage_free(struct sock *sk)
94
83
kfree_rcu (sk_storage , rcu );
95
84
}
96
85
97
- static void sk_storage_map_free (struct bpf_map * map )
86
+ static void bpf_sk_storage_map_free (struct bpf_map * map )
98
87
{
99
88
struct bpf_local_storage_map * smap ;
100
89
@@ -103,7 +92,7 @@ static void sk_storage_map_free(struct bpf_map *map)
103
92
bpf_local_storage_map_free (smap );
104
93
}
105
94
106
- static struct bpf_map * sk_storage_map_alloc (union bpf_attr * attr )
95
+ static struct bpf_map * bpf_sk_storage_map_alloc (union bpf_attr * attr )
107
96
{
108
97
struct bpf_local_storage_map * smap ;
109
98
@@ -130,7 +119,7 @@ static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
130
119
fd = * (int * )key ;
131
120
sock = sockfd_lookup (fd , & err );
132
121
if (sock ) {
133
- sdata = sk_storage_lookup (sock -> sk , map , true);
122
+ sdata = bpf_sk_storage_lookup (sock -> sk , map , true);
134
123
sockfd_put (sock );
135
124
return sdata ? sdata -> data : NULL ;
136
125
}
@@ -166,7 +155,7 @@ static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
166
155
fd = * (int * )key ;
167
156
sock = sockfd_lookup (fd , & err );
168
157
if (sock ) {
169
- err = sk_storage_delete (sock -> sk , map );
158
+ err = bpf_sk_storage_del (sock -> sk , map );
170
159
sockfd_put (sock );
171
160
return err ;
172
161
}
@@ -272,7 +261,7 @@ BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
272
261
if (!sk || !sk_fullsock (sk ) || flags > BPF_SK_STORAGE_GET_F_CREATE )
273
262
return (unsigned long )NULL ;
274
263
275
- sdata = sk_storage_lookup (sk , map , true);
264
+ sdata = bpf_sk_storage_lookup (sk , map , true);
276
265
if (sdata )
277
266
return (unsigned long )sdata -> data ;
278
267
@@ -305,30 +294,39 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
305
294
if (refcount_inc_not_zero (& sk -> sk_refcnt )) {
306
295
int err ;
307
296
308
- err = sk_storage_delete (sk , map );
297
+ err = bpf_sk_storage_del (sk , map );
309
298
sock_put (sk );
310
299
return err ;
311
300
}
312
301
313
302
return - ENOENT ;
314
303
}
315
304
316
- static int sk_storage_charge (struct bpf_local_storage_map * smap ,
317
- void * owner , u32 size )
305
+ static int bpf_sk_storage_charge (struct bpf_local_storage_map * smap ,
306
+ void * owner , u32 size )
318
307
{
319
- return omem_charge (owner , size );
308
+ struct sock * sk = (struct sock * )owner ;
309
+
310
+ /* same check as in sock_kmalloc() */
311
+ if (size <= sysctl_optmem_max &&
312
+ atomic_read (& sk -> sk_omem_alloc ) + size < sysctl_optmem_max ) {
313
+ atomic_add (size , & sk -> sk_omem_alloc );
314
+ return 0 ;
315
+ }
316
+
317
+ return - ENOMEM ;
320
318
}
321
319
322
- static void sk_storage_uncharge (struct bpf_local_storage_map * smap ,
323
- void * owner , u32 size )
320
+ static void bpf_sk_storage_uncharge (struct bpf_local_storage_map * smap ,
321
+ void * owner , u32 size )
324
322
{
325
323
struct sock * sk = owner ;
326
324
327
325
atomic_sub (size , & sk -> sk_omem_alloc );
328
326
}
329
327
330
328
static struct bpf_local_storage __rcu * *
331
- sk_storage_ptr (void * owner )
329
+ bpf_sk_storage_ptr (void * owner )
332
330
{
333
331
struct sock * sk = owner ;
334
332
@@ -339,18 +337,18 @@ static int sk_storage_map_btf_id;
339
337
const struct bpf_map_ops sk_storage_map_ops = {
340
338
.map_meta_equal = bpf_map_meta_equal ,
341
339
.map_alloc_check = bpf_local_storage_map_alloc_check ,
342
- .map_alloc = sk_storage_map_alloc ,
343
- .map_free = sk_storage_map_free ,
340
+ .map_alloc = bpf_sk_storage_map_alloc ,
341
+ .map_free = bpf_sk_storage_map_free ,
344
342
.map_get_next_key = notsupp_get_next_key ,
345
343
.map_lookup_elem = bpf_fd_sk_storage_lookup_elem ,
346
344
.map_update_elem = bpf_fd_sk_storage_update_elem ,
347
345
.map_delete_elem = bpf_fd_sk_storage_delete_elem ,
348
346
.map_check_btf = bpf_local_storage_map_check_btf ,
349
347
.map_btf_name = "bpf_local_storage_map" ,
350
348
.map_btf_id = & sk_storage_map_btf_id ,
351
- .map_local_storage_charge = sk_storage_charge ,
352
- .map_local_storage_uncharge = sk_storage_uncharge ,
353
- .map_owner_storage_ptr = sk_storage_ptr ,
349
+ .map_local_storage_charge = bpf_sk_storage_charge ,
350
+ .map_local_storage_uncharge = bpf_sk_storage_uncharge ,
351
+ .map_owner_storage_ptr = bpf_sk_storage_ptr ,
354
352
};
355
353
356
354
const struct bpf_func_proto bpf_sk_storage_get_proto = {
@@ -381,6 +379,79 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
381
379
.arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON ,
382
380
};
383
381
382
+ static bool bpf_sk_storage_tracing_allowed (const struct bpf_prog * prog )
383
+ {
384
+ const struct btf * btf_vmlinux ;
385
+ const struct btf_type * t ;
386
+ const char * tname ;
387
+ u32 btf_id ;
388
+
389
+ if (prog -> aux -> dst_prog )
390
+ return false;
391
+
392
+ /* Ensure the tracing program is not tracing
393
+ * any bpf_sk_storage*() function and also
394
+ * use the bpf_sk_storage_(get|delete) helper.
395
+ */
396
+ switch (prog -> expected_attach_type ) {
397
+ case BPF_TRACE_RAW_TP :
398
+ /* bpf_sk_storage has no trace point */
399
+ return true;
400
+ case BPF_TRACE_FENTRY :
401
+ case BPF_TRACE_FEXIT :
402
+ btf_vmlinux = bpf_get_btf_vmlinux ();
403
+ btf_id = prog -> aux -> attach_btf_id ;
404
+ t = btf_type_by_id (btf_vmlinux , btf_id );
405
+ tname = btf_name_by_offset (btf_vmlinux , t -> name_off );
406
+ return !!strncmp (tname , "bpf_sk_storage" ,
407
+ strlen ("bpf_sk_storage" ));
408
+ default :
409
+ return false;
410
+ }
411
+
412
+ return false;
413
+ }
414
+
415
+ BPF_CALL_4 (bpf_sk_storage_get_tracing , struct bpf_map * , map , struct sock * , sk ,
416
+ void * , value , u64 , flags )
417
+ {
418
+ if (!in_serving_softirq () && !in_task ())
419
+ return (unsigned long )NULL ;
420
+
421
+ return (unsigned long )____bpf_sk_storage_get (map , sk , value , flags );
422
+ }
423
+
424
+ BPF_CALL_2 (bpf_sk_storage_delete_tracing , struct bpf_map * , map ,
425
+ struct sock * , sk )
426
+ {
427
+ if (!in_serving_softirq () && !in_task ())
428
+ return - EPERM ;
429
+
430
+ return ____bpf_sk_storage_delete (map , sk );
431
+ }
432
+
433
+ const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
434
+ .func = bpf_sk_storage_get_tracing ,
435
+ .gpl_only = false,
436
+ .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL ,
437
+ .arg1_type = ARG_CONST_MAP_PTR ,
438
+ .arg2_type = ARG_PTR_TO_BTF_ID ,
439
+ .arg2_btf_id = & btf_sock_ids [BTF_SOCK_TYPE_SOCK_COMMON ],
440
+ .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL ,
441
+ .arg4_type = ARG_ANYTHING ,
442
+ .allowed = bpf_sk_storage_tracing_allowed ,
443
+ };
444
+
445
+ const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
446
+ .func = bpf_sk_storage_delete_tracing ,
447
+ .gpl_only = false,
448
+ .ret_type = RET_INTEGER ,
449
+ .arg1_type = ARG_CONST_MAP_PTR ,
450
+ .arg2_type = ARG_PTR_TO_BTF_ID ,
451
+ .arg2_btf_id = & btf_sock_ids [BTF_SOCK_TYPE_SOCK_COMMON ],
452
+ .allowed = bpf_sk_storage_tracing_allowed ,
453
+ };
454
+
384
455
struct bpf_sk_storage_diag {
385
456
u32 nr_maps ;
386
457
struct bpf_map * maps [];
0 commit comments