8
8
9
9
#include "../../mm/slab.h" /* kmem_cache, slab_caches and slab_mutex */
10
10
11
+ /* open-coded version */
12
+ struct bpf_iter_kmem_cache {
13
+ __u64 __opaque [1 ];
14
+ } __attribute__((aligned (8 )));
15
+
16
+ struct bpf_iter_kmem_cache_kern {
17
+ struct kmem_cache * pos ;
18
+ } __attribute__((aligned (8 )));
19
+
20
+ #define KMEM_CACHE_POS_START ((void *)1L)
21
+
22
+ __bpf_kfunc_start_defs ();
23
+
24
+ __bpf_kfunc int bpf_iter_kmem_cache_new (struct bpf_iter_kmem_cache * it )
25
+ {
26
+ struct bpf_iter_kmem_cache_kern * kit = (void * )it ;
27
+
28
+ BUILD_BUG_ON (sizeof (* kit ) > sizeof (* it ));
29
+ BUILD_BUG_ON (__alignof__ (* kit ) != __alignof__(* it ));
30
+
31
+ kit -> pos = KMEM_CACHE_POS_START ;
32
+ return 0 ;
33
+ }
34
+
35
+ __bpf_kfunc struct kmem_cache * bpf_iter_kmem_cache_next (struct bpf_iter_kmem_cache * it )
36
+ {
37
+ struct bpf_iter_kmem_cache_kern * kit = (void * )it ;
38
+ struct kmem_cache * prev = kit -> pos ;
39
+ struct kmem_cache * next ;
40
+ bool destroy = false;
41
+
42
+ if (!prev )
43
+ return NULL ;
44
+
45
+ mutex_lock (& slab_mutex );
46
+
47
+ if (list_empty (& slab_caches )) {
48
+ mutex_unlock (& slab_mutex );
49
+ return NULL ;
50
+ }
51
+
52
+ if (prev == KMEM_CACHE_POS_START )
53
+ next = list_first_entry (& slab_caches , struct kmem_cache , list );
54
+ else if (list_last_entry (& slab_caches , struct kmem_cache , list ) == prev )
55
+ next = NULL ;
56
+ else
57
+ next = list_next_entry (prev , list );
58
+
59
+ /* boot_caches have negative refcount, don't touch them */
60
+ if (next && next -> refcount > 0 )
61
+ next -> refcount ++ ;
62
+
63
+ /* Skip kmem_cache_destroy() for active entries */
64
+ if (prev && prev != KMEM_CACHE_POS_START ) {
65
+ if (prev -> refcount > 1 )
66
+ prev -> refcount -- ;
67
+ else if (prev -> refcount == 1 )
68
+ destroy = true;
69
+ }
70
+
71
+ mutex_unlock (& slab_mutex );
72
+
73
+ if (destroy )
74
+ kmem_cache_destroy (prev );
75
+
76
+ kit -> pos = next ;
77
+ return next ;
78
+ }
79
+
80
+ __bpf_kfunc void bpf_iter_kmem_cache_destroy (struct bpf_iter_kmem_cache * it )
81
+ {
82
+ struct bpf_iter_kmem_cache_kern * kit = (void * )it ;
83
+ struct kmem_cache * s = kit -> pos ;
84
+ bool destroy = false;
85
+
86
+ if (s == NULL || s == KMEM_CACHE_POS_START )
87
+ return ;
88
+
89
+ mutex_lock (& slab_mutex );
90
+
91
+ /* Skip kmem_cache_destroy() for active entries */
92
+ if (s -> refcount > 1 )
93
+ s -> refcount -- ;
94
+ else if (s -> refcount == 1 )
95
+ destroy = true;
96
+
97
+ mutex_unlock (& slab_mutex );
98
+
99
+ if (destroy )
100
+ kmem_cache_destroy (s );
101
+ }
102
+
103
+ __bpf_kfunc_end_defs ();
104
+
11
105
struct bpf_iter__kmem_cache {
12
106
__bpf_md_ptr (struct bpf_iter_meta * , meta );
13
107
__bpf_md_ptr (struct kmem_cache * , s );
14
108
};
15
109
110
+ union kmem_cache_iter_priv {
111
+ struct bpf_iter_kmem_cache it ;
112
+ struct bpf_iter_kmem_cache_kern kit ;
113
+ };
114
+
16
115
static void * kmem_cache_iter_seq_start (struct seq_file * seq , loff_t * pos )
17
116
{
18
117
loff_t cnt = 0 ;
19
118
bool found = false;
20
119
struct kmem_cache * s ;
120
+ union kmem_cache_iter_priv * p = seq -> private ;
21
121
22
122
mutex_lock (& slab_mutex );
23
123
@@ -43,8 +143,9 @@ static void *kmem_cache_iter_seq_start(struct seq_file *seq, loff_t *pos)
43
143
mutex_unlock (& slab_mutex );
44
144
45
145
if (!found )
46
- return NULL ;
146
+ s = NULL ;
47
147
148
+ p -> kit .pos = s ;
48
149
return s ;
49
150
}
50
151
@@ -55,63 +156,24 @@ static void kmem_cache_iter_seq_stop(struct seq_file *seq, void *v)
55
156
.meta = & meta ,
56
157
.s = v ,
57
158
};
159
+ union kmem_cache_iter_priv * p = seq -> private ;
58
160
struct bpf_prog * prog ;
59
- bool destroy = false;
60
161
61
162
meta .seq = seq ;
62
163
prog = bpf_iter_get_info (& meta , true);
63
164
if (prog && !ctx .s )
64
165
bpf_iter_run_prog (prog , & ctx );
65
166
66
- if (ctx .s == NULL )
67
- return ;
68
-
69
- mutex_lock (& slab_mutex );
70
-
71
- /* Skip kmem_cache_destroy() for active entries */
72
- if (ctx .s -> refcount > 1 )
73
- ctx .s -> refcount -- ;
74
- else if (ctx .s -> refcount == 1 )
75
- destroy = true;
76
-
77
- mutex_unlock (& slab_mutex );
78
-
79
- if (destroy )
80
- kmem_cache_destroy (ctx .s );
167
+ bpf_iter_kmem_cache_destroy (& p -> it );
81
168
}
82
169
83
170
static void * kmem_cache_iter_seq_next (struct seq_file * seq , void * v , loff_t * pos )
84
171
{
85
- struct kmem_cache * s = v ;
86
- struct kmem_cache * next = NULL ;
87
- bool destroy = false;
172
+ union kmem_cache_iter_priv * p = seq -> private ;
88
173
89
174
++ * pos ;
90
175
91
- mutex_lock (& slab_mutex );
92
-
93
- if (list_last_entry (& slab_caches , struct kmem_cache , list ) != s ) {
94
- next = list_next_entry (s , list );
95
-
96
- WARN_ON_ONCE (next -> refcount == 0 );
97
-
98
- /* boot_caches have negative refcount, don't touch them */
99
- if (next -> refcount > 0 )
100
- next -> refcount ++ ;
101
- }
102
-
103
- /* Skip kmem_cache_destroy() for active entries */
104
- if (s -> refcount > 1 )
105
- s -> refcount -- ;
106
- else if (s -> refcount == 1 )
107
- destroy = true;
108
-
109
- mutex_unlock (& slab_mutex );
110
-
111
- if (destroy )
112
- kmem_cache_destroy (s );
113
-
114
- return next ;
176
+ return bpf_iter_kmem_cache_next (& p -> it );
115
177
}
116
178
117
179
static int kmem_cache_iter_seq_show (struct seq_file * seq , void * v )
@@ -143,6 +205,7 @@ BTF_ID_LIST_GLOBAL_SINGLE(bpf_kmem_cache_btf_id, struct, kmem_cache)
143
205
144
206
static const struct bpf_iter_seq_info kmem_cache_iter_seq_info = {
145
207
.seq_ops = & kmem_cache_iter_seq_ops ,
208
+ .seq_priv_size = sizeof (union kmem_cache_iter_priv ),
146
209
};
147
210
148
211
static void bpf_iter_kmem_cache_show_fdinfo (const struct bpf_iter_aux_info * aux ,
0 commit comments