Skip to content

Commit 904709f

Browse files
author
Alexei Starovoitov
committed
Merge branch 'bpf: Enable bpf_sk_storage for FENTRY/FEXIT/RAW_TP'
Martin KaFai says: ==================== This set is to allow the FENTRY/FEXIT/RAW_TP tracing program to use bpf_sk_storage. The first two patches are a cleanup. The last patch is tests. Patch 3 has the required kernel changes to enable bpf_sk_storage for FENTRY/FEXIT/RAW_TP. Please see individual patch for details. v2: - Rename some of the function prefix from sk_storage to bpf_sk_storage - Use prefix check instead of substr check ==================== Signed-off-by: Alexei Starovoitov <[email protected]>
2 parents 0a58a65 + 53632e1 commit 904709f

File tree

6 files changed

+369
-32
lines changed

6 files changed

+369
-32
lines changed

include/net/bpf_sk_storage.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,8 @@ void bpf_sk_storage_free(struct sock *sk);
2020

2121
extern const struct bpf_func_proto bpf_sk_storage_get_proto;
2222
extern const struct bpf_func_proto bpf_sk_storage_delete_proto;
23+
extern const struct bpf_func_proto bpf_sk_storage_get_tracing_proto;
24+
extern const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto;
2325

2426
struct bpf_local_storage_elem;
2527
struct bpf_sk_storage_diag;

kernel/trace/bpf_trace.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
#include <linux/syscalls.h>
1717
#include <linux/error-injection.h>
1818
#include <linux/btf_ids.h>
19+
#include <net/bpf_sk_storage.h>
1920

2021
#include <uapi/linux/bpf.h>
2122
#include <uapi/linux/btf.h>
@@ -1735,6 +1736,10 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
17351736
return &bpf_skc_to_tcp_request_sock_proto;
17361737
case BPF_FUNC_skc_to_udp6_sock:
17371738
return &bpf_skc_to_udp6_sock_proto;
1739+
case BPF_FUNC_sk_storage_get:
1740+
return &bpf_sk_storage_get_tracing_proto;
1741+
case BPF_FUNC_sk_storage_delete:
1742+
return &bpf_sk_storage_delete_tracing_proto;
17381743
#endif
17391744
case BPF_FUNC_seq_printf:
17401745
return prog->expected_attach_type == BPF_TRACE_ITER ?

net/core/bpf_sk_storage.c

Lines changed: 103 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
#include <linux/types.h>
77
#include <linux/spinlock.h>
88
#include <linux/bpf.h>
9+
#include <linux/btf.h>
910
#include <linux/btf_ids.h>
1011
#include <linux/bpf_local_storage.h>
1112
#include <net/bpf_sk_storage.h>
@@ -15,20 +16,8 @@
1516

1617
DEFINE_BPF_STORAGE_CACHE(sk_cache);
1718

18-
static int omem_charge(struct sock *sk, unsigned int size)
19-
{
20-
/* same check as in sock_kmalloc() */
21-
if (size <= sysctl_optmem_max &&
22-
atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
23-
atomic_add(size, &sk->sk_omem_alloc);
24-
return 0;
25-
}
26-
27-
return -ENOMEM;
28-
}
29-
3019
static struct bpf_local_storage_data *
31-
sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
20+
bpf_sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
3221
{
3322
struct bpf_local_storage *sk_storage;
3423
struct bpf_local_storage_map *smap;
@@ -41,11 +30,11 @@ sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
4130
return bpf_local_storage_lookup(sk_storage, smap, cacheit_lockit);
4231
}
4332

44-
static int sk_storage_delete(struct sock *sk, struct bpf_map *map)
33+
static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
4534
{
4635
struct bpf_local_storage_data *sdata;
4736

48-
sdata = sk_storage_lookup(sk, map, false);
37+
sdata = bpf_sk_storage_lookup(sk, map, false);
4938
if (!sdata)
5039
return -ENOENT;
5140

@@ -94,7 +83,7 @@ void bpf_sk_storage_free(struct sock *sk)
9483
kfree_rcu(sk_storage, rcu);
9584
}
9685

97-
static void sk_storage_map_free(struct bpf_map *map)
86+
static void bpf_sk_storage_map_free(struct bpf_map *map)
9887
{
9988
struct bpf_local_storage_map *smap;
10089

@@ -103,7 +92,7 @@ static void sk_storage_map_free(struct bpf_map *map)
10392
bpf_local_storage_map_free(smap);
10493
}
10594

106-
static struct bpf_map *sk_storage_map_alloc(union bpf_attr *attr)
95+
static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
10796
{
10897
struct bpf_local_storage_map *smap;
10998

@@ -130,7 +119,7 @@ static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
130119
fd = *(int *)key;
131120
sock = sockfd_lookup(fd, &err);
132121
if (sock) {
133-
sdata = sk_storage_lookup(sock->sk, map, true);
122+
sdata = bpf_sk_storage_lookup(sock->sk, map, true);
134123
sockfd_put(sock);
135124
return sdata ? sdata->data : NULL;
136125
}
@@ -166,7 +155,7 @@ static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
166155
fd = *(int *)key;
167156
sock = sockfd_lookup(fd, &err);
168157
if (sock) {
169-
err = sk_storage_delete(sock->sk, map);
158+
err = bpf_sk_storage_del(sock->sk, map);
170159
sockfd_put(sock);
171160
return err;
172161
}
@@ -272,7 +261,7 @@ BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
272261
if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE)
273262
return (unsigned long)NULL;
274263

275-
sdata = sk_storage_lookup(sk, map, true);
264+
sdata = bpf_sk_storage_lookup(sk, map, true);
276265
if (sdata)
277266
return (unsigned long)sdata->data;
278267

@@ -305,30 +294,39 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
305294
if (refcount_inc_not_zero(&sk->sk_refcnt)) {
306295
int err;
307296

308-
err = sk_storage_delete(sk, map);
297+
err = bpf_sk_storage_del(sk, map);
309298
sock_put(sk);
310299
return err;
311300
}
312301

313302
return -ENOENT;
314303
}
315304

316-
static int sk_storage_charge(struct bpf_local_storage_map *smap,
317-
void *owner, u32 size)
305+
static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
306+
void *owner, u32 size)
318307
{
319-
return omem_charge(owner, size);
308+
struct sock *sk = (struct sock *)owner;
309+
310+
/* same check as in sock_kmalloc() */
311+
if (size <= sysctl_optmem_max &&
312+
atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
313+
atomic_add(size, &sk->sk_omem_alloc);
314+
return 0;
315+
}
316+
317+
return -ENOMEM;
320318
}
321319

322-
static void sk_storage_uncharge(struct bpf_local_storage_map *smap,
323-
void *owner, u32 size)
320+
static void bpf_sk_storage_uncharge(struct bpf_local_storage_map *smap,
321+
void *owner, u32 size)
324322
{
325323
struct sock *sk = owner;
326324

327325
atomic_sub(size, &sk->sk_omem_alloc);
328326
}
329327

330328
static struct bpf_local_storage __rcu **
331-
sk_storage_ptr(void *owner)
329+
bpf_sk_storage_ptr(void *owner)
332330
{
333331
struct sock *sk = owner;
334332

@@ -339,18 +337,18 @@ static int sk_storage_map_btf_id;
339337
const struct bpf_map_ops sk_storage_map_ops = {
340338
.map_meta_equal = bpf_map_meta_equal,
341339
.map_alloc_check = bpf_local_storage_map_alloc_check,
342-
.map_alloc = sk_storage_map_alloc,
343-
.map_free = sk_storage_map_free,
340+
.map_alloc = bpf_sk_storage_map_alloc,
341+
.map_free = bpf_sk_storage_map_free,
344342
.map_get_next_key = notsupp_get_next_key,
345343
.map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
346344
.map_update_elem = bpf_fd_sk_storage_update_elem,
347345
.map_delete_elem = bpf_fd_sk_storage_delete_elem,
348346
.map_check_btf = bpf_local_storage_map_check_btf,
349347
.map_btf_name = "bpf_local_storage_map",
350348
.map_btf_id = &sk_storage_map_btf_id,
351-
.map_local_storage_charge = sk_storage_charge,
352-
.map_local_storage_uncharge = sk_storage_uncharge,
353-
.map_owner_storage_ptr = sk_storage_ptr,
349+
.map_local_storage_charge = bpf_sk_storage_charge,
350+
.map_local_storage_uncharge = bpf_sk_storage_uncharge,
351+
.map_owner_storage_ptr = bpf_sk_storage_ptr,
354352
};
355353

356354
const struct bpf_func_proto bpf_sk_storage_get_proto = {
@@ -381,6 +379,79 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
381379
.arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
382380
};
383381

382+
static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
383+
{
384+
const struct btf *btf_vmlinux;
385+
const struct btf_type *t;
386+
const char *tname;
387+
u32 btf_id;
388+
389+
if (prog->aux->dst_prog)
390+
return false;
391+
392+
/* Ensure the tracing program is not tracing
393+
* any bpf_sk_storage*() function and also
394+
* use the bpf_sk_storage_(get|delete) helper.
395+
*/
396+
switch (prog->expected_attach_type) {
397+
case BPF_TRACE_RAW_TP:
398+
/* bpf_sk_storage has no trace point */
399+
return true;
400+
case BPF_TRACE_FENTRY:
401+
case BPF_TRACE_FEXIT:
402+
btf_vmlinux = bpf_get_btf_vmlinux();
403+
btf_id = prog->aux->attach_btf_id;
404+
t = btf_type_by_id(btf_vmlinux, btf_id);
405+
tname = btf_name_by_offset(btf_vmlinux, t->name_off);
406+
return !!strncmp(tname, "bpf_sk_storage",
407+
strlen("bpf_sk_storage"));
408+
default:
409+
return false;
410+
}
411+
412+
return false;
413+
}
414+
415+
BPF_CALL_4(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk,
416+
void *, value, u64, flags)
417+
{
418+
if (!in_serving_softirq() && !in_task())
419+
return (unsigned long)NULL;
420+
421+
return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags);
422+
}
423+
424+
BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map,
425+
struct sock *, sk)
426+
{
427+
if (!in_serving_softirq() && !in_task())
428+
return -EPERM;
429+
430+
return ____bpf_sk_storage_delete(map, sk);
431+
}
432+
433+
const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
434+
.func = bpf_sk_storage_get_tracing,
435+
.gpl_only = false,
436+
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
437+
.arg1_type = ARG_CONST_MAP_PTR,
438+
.arg2_type = ARG_PTR_TO_BTF_ID,
439+
.arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
440+
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
441+
.arg4_type = ARG_ANYTHING,
442+
.allowed = bpf_sk_storage_tracing_allowed,
443+
};
444+
445+
const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
446+
.func = bpf_sk_storage_delete_tracing,
447+
.gpl_only = false,
448+
.ret_type = RET_INTEGER,
449+
.arg1_type = ARG_CONST_MAP_PTR,
450+
.arg2_type = ARG_PTR_TO_BTF_ID,
451+
.arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
452+
.allowed = bpf_sk_storage_tracing_allowed,
453+
};
454+
384455
struct bpf_sk_storage_diag {
385456
u32 nr_maps;
386457
struct bpf_map *maps[];

0 commit comments

Comments
 (0)