@@ -32,17 +32,6 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
32
32
#define IORING_MAX_FIXED_FILES (1U << 20)
33
33
#define IORING_MAX_REG_BUFFERS (1U << 14)
34
34
35
- static const struct io_mapped_ubuf dummy_ubuf = {
36
- /* set invalid range, so io_import_fixed() fails meeting it */
37
- .ubuf = -1UL ,
38
- .len = UINT_MAX ,
39
- };
40
-
41
- const struct io_rsrc_node empty_node = {
42
- .type = IORING_RSRC_BUFFER ,
43
- .buf = (struct io_mapped_ubuf * ) & dummy_ubuf ,
44
- };
45
-
46
35
int __io_account_mem (struct user_struct * user , unsigned long nr_pages )
47
36
{
48
37
unsigned long page_limit , cur_pages , new_pages ;
@@ -116,7 +105,7 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
116
105
{
117
106
unsigned int i ;
118
107
119
- if (node -> buf != & dummy_ubuf ) {
108
+ if (node -> buf ) {
120
109
struct io_mapped_ubuf * imu = node -> buf ;
121
110
122
111
if (!refcount_dec_and_test (& imu -> refs ))
@@ -265,20 +254,21 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
265
254
err = io_buffer_validate (iov );
266
255
if (err )
267
256
break ;
268
- if (!iov -> iov_base && tag ) {
269
- err = - EINVAL ;
270
- break ;
271
- }
272
257
node = io_sqe_buffer_register (ctx , iov , & last_hpage );
273
258
if (IS_ERR (node )) {
274
259
err = PTR_ERR (node );
275
260
break ;
276
261
}
262
+ if (tag ) {
263
+ if (!node ) {
264
+ err = - EINVAL ;
265
+ break ;
266
+ }
267
+ node -> tag = tag ;
268
+ }
277
269
i = array_index_nospec (up -> offset + done , ctx -> buf_table .nr );
278
270
io_reset_rsrc_node (& ctx -> buf_table , i );
279
271
ctx -> buf_table .nodes [i ] = node ;
280
- if (tag )
281
- node -> tag = tag ;
282
272
if (ctx -> compat )
283
273
user_data += sizeof (struct compat_iovec );
284
274
else
@@ -591,8 +581,11 @@ static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
591
581
/* check previously registered pages */
592
582
for (i = 0 ; i < ctx -> buf_table .nr ; i ++ ) {
593
583
struct io_rsrc_node * node = ctx -> buf_table .nodes [i ];
594
- struct io_mapped_ubuf * imu = node -> buf ;
584
+ struct io_mapped_ubuf * imu ;
595
585
586
+ if (!node )
587
+ continue ;
588
+ imu = node -> buf ;
596
589
for (j = 0 ; j < imu -> nr_bvecs ; j ++ ) {
597
590
if (!PageCompound (imu -> bvec [j ].bv_page ))
598
591
continue ;
@@ -742,7 +735,7 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
742
735
bool coalesced ;
743
736
744
737
if (!iov -> iov_base )
745
- return rsrc_empty_node ;
738
+ return NULL ;
746
739
747
740
node = io_rsrc_node_alloc (ctx , IORING_RSRC_BUFFER );
748
741
if (!node )
@@ -850,19 +843,20 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
850
843
ret = - EFAULT ;
851
844
break ;
852
845
}
853
- if (tag && !iov -> iov_base ) {
854
- ret = - EINVAL ;
855
- break ;
856
- }
857
846
}
858
847
859
848
node = io_sqe_buffer_register (ctx , iov , & last_hpage );
860
849
if (IS_ERR (node )) {
861
850
ret = PTR_ERR (node );
862
851
break ;
863
852
}
864
- if (tag )
853
+ if (tag ) {
854
+ if (!node ) {
855
+ ret = - EINVAL ;
856
+ break ;
857
+ }
865
858
node -> tag = tag ;
859
+ }
866
860
data .nodes [i ] = node ;
867
861
}
868
862
@@ -957,8 +951,8 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
957
951
struct io_rsrc_node * dst_node , * src_node ;
958
952
959
953
src_node = io_rsrc_node_lookup (& src_ctx -> buf_table , i );
960
- if (src_node == rsrc_empty_node ) {
961
- dst_node = rsrc_empty_node ;
954
+ if (! src_node ) {
955
+ dst_node = NULL ;
962
956
} else {
963
957
dst_node = io_rsrc_node_alloc (ctx , IORING_RSRC_BUFFER );
964
958
if (!dst_node ) {
0 commit comments