7
7
static void test_add (struct atomics_lskel * skel )
8
8
{
9
9
int err , prog_fd ;
10
- int link_fd ;
11
10
LIBBPF_OPTS (bpf_test_run_opts , topts );
12
11
13
- link_fd = atomics_lskel__add__attach (skel );
14
- if (!ASSERT_GT (link_fd , 0 , "attach(add)" ))
15
- return ;
16
-
12
+ /* No need to attach it, just run it directly */
17
13
prog_fd = skel -> progs .add .prog_fd ;
18
14
err = bpf_prog_test_run_opts (prog_fd , & topts );
19
15
if (!ASSERT_OK (err , "test_run_opts err" ))
20
- goto cleanup ;
16
+ return ;
21
17
if (!ASSERT_OK (topts .retval , "test_run_opts retval" ))
22
- goto cleanup ;
18
+ return ;
23
19
24
20
ASSERT_EQ (skel -> data -> add64_value , 3 , "add64_value" );
25
21
ASSERT_EQ (skel -> bss -> add64_result , 1 , "add64_result" );
@@ -31,27 +27,20 @@ static void test_add(struct atomics_lskel *skel)
31
27
ASSERT_EQ (skel -> bss -> add_stack_result , 1 , "add_stack_result" );
32
28
33
29
ASSERT_EQ (skel -> data -> add_noreturn_value , 3 , "add_noreturn_value" );
34
-
35
- cleanup :
36
- close (link_fd );
37
30
}
38
31
39
32
static void test_sub (struct atomics_lskel * skel )
40
33
{
41
34
int err , prog_fd ;
42
- int link_fd ;
43
35
LIBBPF_OPTS (bpf_test_run_opts , topts );
44
36
45
- link_fd = atomics_lskel__sub__attach (skel );
46
- if (!ASSERT_GT (link_fd , 0 , "attach(sub)" ))
47
- return ;
48
-
37
+ /* No need to attach it, just run it directly */
49
38
prog_fd = skel -> progs .sub .prog_fd ;
50
39
err = bpf_prog_test_run_opts (prog_fd , & topts );
51
40
if (!ASSERT_OK (err , "test_run_opts err" ))
52
- goto cleanup ;
41
+ return ;
53
42
if (!ASSERT_OK (topts .retval , "test_run_opts retval" ))
54
- goto cleanup ;
43
+ return ;
55
44
56
45
ASSERT_EQ (skel -> data -> sub64_value , -1 , "sub64_value" );
57
46
ASSERT_EQ (skel -> bss -> sub64_result , 1 , "sub64_result" );
@@ -63,27 +52,20 @@ static void test_sub(struct atomics_lskel *skel)
63
52
ASSERT_EQ (skel -> bss -> sub_stack_result , 1 , "sub_stack_result" );
64
53
65
54
ASSERT_EQ (skel -> data -> sub_noreturn_value , -1 , "sub_noreturn_value" );
66
-
67
- cleanup :
68
- close (link_fd );
69
55
}
70
56
71
57
static void test_and (struct atomics_lskel * skel )
72
58
{
73
59
int err , prog_fd ;
74
- int link_fd ;
75
60
LIBBPF_OPTS (bpf_test_run_opts , topts );
76
61
77
- link_fd = atomics_lskel__and__attach (skel );
78
- if (!ASSERT_GT (link_fd , 0 , "attach(and)" ))
79
- return ;
80
-
62
+ /* No need to attach it, just run it directly */
81
63
prog_fd = skel -> progs .and .prog_fd ;
82
64
err = bpf_prog_test_run_opts (prog_fd , & topts );
83
65
if (!ASSERT_OK (err , "test_run_opts err" ))
84
- goto cleanup ;
66
+ return ;
85
67
if (!ASSERT_OK (topts .retval , "test_run_opts retval" ))
86
- goto cleanup ;
68
+ return ;
87
69
88
70
ASSERT_EQ (skel -> data -> and64_value , 0x010ull << 32 , "and64_value" );
89
71
ASSERT_EQ (skel -> bss -> and64_result , 0x110ull << 32 , "and64_result" );
@@ -92,26 +74,20 @@ static void test_and(struct atomics_lskel *skel)
92
74
ASSERT_EQ (skel -> bss -> and32_result , 0x110 , "and32_result" );
93
75
94
76
ASSERT_EQ (skel -> data -> and_noreturn_value , 0x010ull << 32 , "and_noreturn_value" );
95
- cleanup :
96
- close (link_fd );
97
77
}
98
78
99
79
static void test_or (struct atomics_lskel * skel )
100
80
{
101
81
int err , prog_fd ;
102
- int link_fd ;
103
82
LIBBPF_OPTS (bpf_test_run_opts , topts );
104
83
105
- link_fd = atomics_lskel__or__attach (skel );
106
- if (!ASSERT_GT (link_fd , 0 , "attach(or)" ))
107
- return ;
108
-
84
+ /* No need to attach it, just run it directly */
109
85
prog_fd = skel -> progs .or .prog_fd ;
110
86
err = bpf_prog_test_run_opts (prog_fd , & topts );
111
87
if (!ASSERT_OK (err , "test_run_opts err" ))
112
- goto cleanup ;
88
+ return ;
113
89
if (!ASSERT_OK (topts .retval , "test_run_opts retval" ))
114
- goto cleanup ;
90
+ return ;
115
91
116
92
ASSERT_EQ (skel -> data -> or64_value , 0x111ull << 32 , "or64_value" );
117
93
ASSERT_EQ (skel -> bss -> or64_result , 0x110ull << 32 , "or64_result" );
@@ -120,26 +96,20 @@ static void test_or(struct atomics_lskel *skel)
120
96
ASSERT_EQ (skel -> bss -> or32_result , 0x110 , "or32_result" );
121
97
122
98
ASSERT_EQ (skel -> data -> or_noreturn_value , 0x111ull << 32 , "or_noreturn_value" );
123
- cleanup :
124
- close (link_fd );
125
99
}
126
100
127
101
static void test_xor (struct atomics_lskel * skel )
128
102
{
129
103
int err , prog_fd ;
130
- int link_fd ;
131
104
LIBBPF_OPTS (bpf_test_run_opts , topts );
132
105
133
- link_fd = atomics_lskel__xor__attach (skel );
134
- if (!ASSERT_GT (link_fd , 0 , "attach(xor)" ))
135
- return ;
136
-
106
+ /* No need to attach it, just run it directly */
137
107
prog_fd = skel -> progs .xor .prog_fd ;
138
108
err = bpf_prog_test_run_opts (prog_fd , & topts );
139
109
if (!ASSERT_OK (err , "test_run_opts err" ))
140
- goto cleanup ;
110
+ return ;
141
111
if (!ASSERT_OK (topts .retval , "test_run_opts retval" ))
142
- goto cleanup ;
112
+ return ;
143
113
144
114
ASSERT_EQ (skel -> data -> xor64_value , 0x101ull << 32 , "xor64_value" );
145
115
ASSERT_EQ (skel -> bss -> xor64_result , 0x110ull << 32 , "xor64_result" );
@@ -148,26 +118,20 @@ static void test_xor(struct atomics_lskel *skel)
148
118
ASSERT_EQ (skel -> bss -> xor32_result , 0x110 , "xor32_result" );
149
119
150
120
ASSERT_EQ (skel -> data -> xor_noreturn_value , 0x101ull << 32 , "xor_nxoreturn_value" );
151
- cleanup :
152
- close (link_fd );
153
121
}
154
122
155
123
static void test_cmpxchg (struct atomics_lskel * skel )
156
124
{
157
125
int err , prog_fd ;
158
- int link_fd ;
159
126
LIBBPF_OPTS (bpf_test_run_opts , topts );
160
127
161
- link_fd = atomics_lskel__cmpxchg__attach (skel );
162
- if (!ASSERT_GT (link_fd , 0 , "attach(cmpxchg)" ))
163
- return ;
164
-
128
+ /* No need to attach it, just run it directly */
165
129
prog_fd = skel -> progs .cmpxchg .prog_fd ;
166
130
err = bpf_prog_test_run_opts (prog_fd , & topts );
167
131
if (!ASSERT_OK (err , "test_run_opts err" ))
168
- goto cleanup ;
132
+ return ;
169
133
if (!ASSERT_OK (topts .retval , "test_run_opts retval" ))
170
- goto cleanup ;
134
+ return ;
171
135
172
136
ASSERT_EQ (skel -> data -> cmpxchg64_value , 2 , "cmpxchg64_value" );
173
137
ASSERT_EQ (skel -> bss -> cmpxchg64_result_fail , 1 , "cmpxchg_result_fail" );
@@ -176,45 +140,34 @@ static void test_cmpxchg(struct atomics_lskel *skel)
176
140
ASSERT_EQ (skel -> data -> cmpxchg32_value , 2 , "lcmpxchg32_value" );
177
141
ASSERT_EQ (skel -> bss -> cmpxchg32_result_fail , 1 , "cmpxchg_result_fail" );
178
142
ASSERT_EQ (skel -> bss -> cmpxchg32_result_succeed , 1 , "cmpxchg_result_succeed" );
179
-
180
- cleanup :
181
- close (link_fd );
182
143
}
183
144
184
145
static void test_xchg (struct atomics_lskel * skel )
185
146
{
186
147
int err , prog_fd ;
187
- int link_fd ;
188
148
LIBBPF_OPTS (bpf_test_run_opts , topts );
189
149
190
- link_fd = atomics_lskel__xchg__attach (skel );
191
- if (!ASSERT_GT (link_fd , 0 , "attach(xchg)" ))
192
- return ;
193
-
150
+ /* No need to attach it, just run it directly */
194
151
prog_fd = skel -> progs .xchg .prog_fd ;
195
152
err = bpf_prog_test_run_opts (prog_fd , & topts );
196
153
if (!ASSERT_OK (err , "test_run_opts err" ))
197
- goto cleanup ;
154
+ return ;
198
155
if (!ASSERT_OK (topts .retval , "test_run_opts retval" ))
199
- goto cleanup ;
156
+ return ;
200
157
201
158
ASSERT_EQ (skel -> data -> xchg64_value , 2 , "xchg64_value" );
202
159
ASSERT_EQ (skel -> bss -> xchg64_result , 1 , "xchg64_result" );
203
160
204
161
ASSERT_EQ (skel -> data -> xchg32_value , 2 , "xchg32_value" );
205
162
ASSERT_EQ (skel -> bss -> xchg32_result , 1 , "xchg32_result" );
206
-
207
- cleanup :
208
- close (link_fd );
209
163
}
210
164
211
165
void test_atomics (void )
212
166
{
213
167
struct atomics_lskel * skel ;
214
- __u32 duration = 0 ;
215
168
216
169
skel = atomics_lskel__open_and_load ();
217
- if (CHECK (! skel , "skel_load" , " atomics skeleton failed\n " ))
170
+ if (! ASSERT_OK_PTR ( skel , "atomics skeleton load " ))
218
171
return ;
219
172
220
173
if (skel -> data -> skip_tests ) {
0 commit comments