@@ -39,6 +39,7 @@ struct bpf_local_storage;
39
39
struct bpf_local_storage_map ;
40
40
struct kobject ;
41
41
struct mem_cgroup ;
42
+ struct bpf_func_state ;
42
43
43
44
extern struct idr btf_idr ;
44
45
extern spinlock_t btf_idr_lock ;
@@ -117,6 +118,9 @@ struct bpf_map_ops {
117
118
void * owner , u32 size );
118
119
struct bpf_local_storage __rcu * * (* map_owner_storage_ptr )(void * owner );
119
120
121
+ /* Misc helpers.*/
122
+ int (* map_redirect )(struct bpf_map * map , u32 ifindex , u64 flags );
123
+
120
124
/* map_meta_equal must be implemented for maps that can be
121
125
* used as an inner map. It is a runtime check to ensure
122
126
* an inner map can be inserted to an outer map.
@@ -129,6 +133,13 @@ struct bpf_map_ops {
129
133
bool (* map_meta_equal )(const struct bpf_map * meta0 ,
130
134
const struct bpf_map * meta1 );
131
135
136
+
137
+ int (* map_set_for_each_callback_args )(struct bpf_verifier_env * env ,
138
+ struct bpf_func_state * caller ,
139
+ struct bpf_func_state * callee );
140
+ int (* map_for_each_callback )(struct bpf_map * map , void * callback_fn ,
141
+ void * callback_ctx , u64 flags );
142
+
132
143
/* BTF name and id of struct allocated by map_alloc */
133
144
const char * const map_btf_name ;
134
145
int * map_btf_id ;
@@ -295,6 +306,8 @@ enum bpf_arg_type {
295
306
ARG_CONST_ALLOC_SIZE_OR_ZERO , /* number of allocated bytes requested */
296
307
ARG_PTR_TO_BTF_ID_SOCK_COMMON , /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
297
308
ARG_PTR_TO_PERCPU_BTF_ID , /* pointer to in-kernel percpu type */
309
+ ARG_PTR_TO_FUNC , /* pointer to a bpf program function */
310
+ ARG_PTR_TO_STACK_OR_NULL , /* pointer to stack or NULL */
298
311
__BPF_ARG_TYPE_MAX ,
299
312
};
300
313
@@ -411,6 +424,8 @@ enum bpf_reg_type {
411
424
PTR_TO_RDWR_BUF , /* reg points to a read/write buffer */
412
425
PTR_TO_RDWR_BUF_OR_NULL , /* reg points to a read/write buffer or NULL */
413
426
PTR_TO_PERCPU_BTF_ID , /* reg points to a percpu kernel variable */
427
+ PTR_TO_FUNC , /* reg points to a bpf program function */
428
+ PTR_TO_MAP_KEY , /* reg points to a map element key */
414
429
};
415
430
416
431
/* The information passed from prog-specific *_is_valid_access
@@ -506,6 +521,11 @@ enum bpf_cgroup_storage_type {
506
521
*/
507
522
#define MAX_BPF_FUNC_ARGS 12
508
523
524
+ /* The maximum number of arguments passed through registers
525
+ * a single function may have.
526
+ */
527
+ #define MAX_BPF_FUNC_REG_ARGS 5
528
+
509
529
struct btf_func_model {
510
530
u8 ret_size ;
511
531
u8 nr_args ;
@@ -1380,6 +1400,10 @@ void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
1380
1400
int bpf_iter_map_fill_link_info (const struct bpf_iter_aux_info * aux ,
1381
1401
struct bpf_link_info * info );
1382
1402
1403
+ int map_set_for_each_callback_args (struct bpf_verifier_env * env ,
1404
+ struct bpf_func_state * caller ,
1405
+ struct bpf_func_state * callee );
1406
+
1383
1407
int bpf_percpu_hash_copy (struct bpf_map * map , void * key , void * value );
1384
1408
int bpf_percpu_array_copy (struct bpf_map * map , void * key , void * value );
1385
1409
int bpf_percpu_hash_update (struct bpf_map * map , void * key , void * value ,
@@ -1429,9 +1453,9 @@ struct btf *bpf_get_btf_vmlinux(void);
1429
1453
/* Map specifics */
1430
1454
struct xdp_buff ;
1431
1455
struct sk_buff ;
1456
+ struct bpf_dtab_netdev ;
1457
+ struct bpf_cpu_map_entry ;
1432
1458
1433
- struct bpf_dtab_netdev * __dev_map_lookup_elem (struct bpf_map * map , u32 key );
1434
- struct bpf_dtab_netdev * __dev_map_hash_lookup_elem (struct bpf_map * map , u32 key );
1435
1459
void __dev_flush (void );
1436
1460
int dev_xdp_enqueue (struct net_device * dev , struct xdp_buff * xdp ,
1437
1461
struct net_device * dev_rx );
@@ -1441,7 +1465,6 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
1441
1465
struct bpf_prog * xdp_prog );
1442
1466
bool dev_map_can_have_prog (struct bpf_map * map );
1443
1467
1444
- struct bpf_cpu_map_entry * __cpu_map_lookup_elem (struct bpf_map * map , u32 key );
1445
1468
void __cpu_map_flush (void );
1446
1469
int cpu_map_enqueue (struct bpf_cpu_map_entry * rcpu , struct xdp_buff * xdp ,
1447
1470
struct net_device * dev_rx );
@@ -1470,6 +1493,9 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1470
1493
int bpf_prog_test_run_raw_tp (struct bpf_prog * prog ,
1471
1494
const union bpf_attr * kattr ,
1472
1495
union bpf_attr __user * uattr );
1496
+ int bpf_prog_test_run_sk_lookup (struct bpf_prog * prog ,
1497
+ const union bpf_attr * kattr ,
1498
+ union bpf_attr __user * uattr );
1473
1499
bool btf_ctx_access (int off , int size , enum bpf_access_type type ,
1474
1500
const struct bpf_prog * prog ,
1475
1501
struct bpf_insn_access_aux * info );
@@ -1499,6 +1525,7 @@ struct bpf_prog *bpf_prog_by_id(u32 id);
1499
1525
struct bpf_link * bpf_link_by_id (u32 id );
1500
1526
1501
1527
const struct bpf_func_proto * bpf_base_func_proto (enum bpf_func_id func_id );
1528
+ void bpf_task_storage_free (struct task_struct * task );
1502
1529
#else /* !CONFIG_BPF_SYSCALL */
1503
1530
static inline struct bpf_prog * bpf_prog_get (u32 ufd )
1504
1531
{
@@ -1568,17 +1595,6 @@ static inline int bpf_obj_get_user(const char __user *pathname, int flags)
1568
1595
return - EOPNOTSUPP ;
1569
1596
}
1570
1597
1571
- static inline struct net_device * __dev_map_lookup_elem (struct bpf_map * map ,
1572
- u32 key )
1573
- {
1574
- return NULL ;
1575
- }
1576
-
1577
- static inline struct net_device * __dev_map_hash_lookup_elem (struct bpf_map * map ,
1578
- u32 key )
1579
- {
1580
- return NULL ;
1581
- }
1582
1598
static inline bool dev_map_can_have_prog (struct bpf_map * map )
1583
1599
{
1584
1600
return false;
@@ -1590,6 +1606,7 @@ static inline void __dev_flush(void)
1590
1606
1591
1607
struct xdp_buff ;
1592
1608
struct bpf_dtab_netdev ;
1609
+ struct bpf_cpu_map_entry ;
1593
1610
1594
1611
static inline
1595
1612
int dev_xdp_enqueue (struct net_device * dev , struct xdp_buff * xdp ,
@@ -1614,12 +1631,6 @@ static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
1614
1631
return 0 ;
1615
1632
}
1616
1633
1617
- static inline
1618
- struct bpf_cpu_map_entry * __cpu_map_lookup_elem (struct bpf_map * map , u32 key )
1619
- {
1620
- return NULL ;
1621
- }
1622
-
1623
1634
static inline void __cpu_map_flush (void )
1624
1635
{
1625
1636
}
@@ -1670,6 +1681,13 @@ static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1670
1681
return - ENOTSUPP ;
1671
1682
}
1672
1683
1684
+ static inline int bpf_prog_test_run_sk_lookup (struct bpf_prog * prog ,
1685
+ const union bpf_attr * kattr ,
1686
+ union bpf_attr __user * uattr )
1687
+ {
1688
+ return - ENOTSUPP ;
1689
+ }
1690
+
1673
1691
static inline void bpf_map_put (struct bpf_map * map )
1674
1692
{
1675
1693
}
@@ -1684,6 +1702,10 @@ bpf_base_func_proto(enum bpf_func_id func_id)
1684
1702
{
1685
1703
return NULL ;
1686
1704
}
1705
+
1706
+ static inline void bpf_task_storage_free (struct task_struct * task )
1707
+ {
1708
+ }
1687
1709
#endif /* CONFIG_BPF_SYSCALL */
1688
1710
1689
1711
void __bpf_free_used_btfs (struct bpf_prog_aux * aux ,
@@ -1768,22 +1790,24 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
1768
1790
}
1769
1791
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
1770
1792
1771
- #if defined(CONFIG_BPF_STREAM_PARSER )
1772
- int sock_map_prog_update (struct bpf_map * map , struct bpf_prog * prog ,
1773
- struct bpf_prog * old , u32 which );
1793
+ #if defined(CONFIG_INET ) && defined(CONFIG_BPF_SYSCALL )
1774
1794
int sock_map_get_from_fd (const union bpf_attr * attr , struct bpf_prog * prog );
1775
1795
int sock_map_prog_detach (const union bpf_attr * attr , enum bpf_prog_type ptype );
1776
1796
int sock_map_update_elem_sys (struct bpf_map * map , void * key , void * value , u64 flags );
1777
1797
void sock_map_unhash (struct sock * sk );
1778
1798
void sock_map_close (struct sock * sk , long timeout );
1799
+
1800
+ void bpf_sk_reuseport_detach (struct sock * sk );
1801
+ int bpf_fd_reuseport_array_lookup_elem (struct bpf_map * map , void * key ,
1802
+ void * value );
1803
+ int bpf_fd_reuseport_array_update_elem (struct bpf_map * map , void * key ,
1804
+ void * value , u64 map_flags );
1779
1805
#else
1780
- static inline int sock_map_prog_update (struct bpf_map * map ,
1781
- struct bpf_prog * prog ,
1782
- struct bpf_prog * old , u32 which )
1806
+ static inline void bpf_sk_reuseport_detach (struct sock * sk )
1783
1807
{
1784
- return - EOPNOTSUPP ;
1785
1808
}
1786
1809
1810
+ #ifdef CONFIG_BPF_SYSCALL
1787
1811
static inline int sock_map_get_from_fd (const union bpf_attr * attr ,
1788
1812
struct bpf_prog * prog )
1789
1813
{
@@ -1801,20 +1825,7 @@ static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void
1801
1825
{
1802
1826
return - EOPNOTSUPP ;
1803
1827
}
1804
- #endif /* CONFIG_BPF_STREAM_PARSER */
1805
1828
1806
- #if defined(CONFIG_INET ) && defined(CONFIG_BPF_SYSCALL )
1807
- void bpf_sk_reuseport_detach (struct sock * sk );
1808
- int bpf_fd_reuseport_array_lookup_elem (struct bpf_map * map , void * key ,
1809
- void * value );
1810
- int bpf_fd_reuseport_array_update_elem (struct bpf_map * map , void * key ,
1811
- void * value , u64 map_flags );
1812
- #else
1813
- static inline void bpf_sk_reuseport_detach (struct sock * sk )
1814
- {
1815
- }
1816
-
1817
- #ifdef CONFIG_BPF_SYSCALL
1818
1829
static inline int bpf_fd_reuseport_array_lookup_elem (struct bpf_map * map ,
1819
1830
void * key , void * value )
1820
1831
{
@@ -1886,6 +1897,9 @@ extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
1886
1897
extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto ;
1887
1898
extern const struct bpf_func_proto bpf_sock_from_file_proto ;
1888
1899
extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto ;
1900
+ extern const struct bpf_func_proto bpf_task_storage_get_proto ;
1901
+ extern const struct bpf_func_proto bpf_task_storage_delete_proto ;
1902
+ extern const struct bpf_func_proto bpf_for_each_map_elem_proto ;
1889
1903
1890
1904
const struct bpf_func_proto * bpf_tracing_func_proto (
1891
1905
enum bpf_func_id func_id , const struct bpf_prog * prog );
0 commit comments