@@ -202,13 +202,6 @@ struct io_rings {
202
202
struct io_uring_cqe cqes [] ____cacheline_aligned_in_smp ;
203
203
};
204
204
205
- enum io_uring_cmd_flags {
206
- IO_URING_F_COMPLETE_DEFER = 1 ,
207
- IO_URING_F_UNLOCKED = 2 ,
208
- /* int's last bit, sign checks are usually faster than a bit test */
209
- IO_URING_F_NONBLOCK = INT_MIN ,
210
- };
211
-
212
205
struct io_mapped_ubuf {
213
206
u64 ubuf ;
214
207
u64 ubuf_end ;
@@ -972,6 +965,7 @@ struct io_kiocb {
972
965
struct io_xattr xattr ;
973
966
struct io_socket sock ;
974
967
struct io_nop nop ;
968
+ struct io_uring_cmd uring_cmd ;
975
969
};
976
970
977
971
u8 opcode ;
@@ -1050,6 +1044,14 @@ struct io_cancel_data {
1050
1044
int seq ;
1051
1045
};
1052
1046
1047
+ /*
1048
+ * The URING_CMD payload starts at 'cmd' in the first sqe, and continues into
1049
+ * the following sqe if SQE128 is used.
1050
+ */
1051
+ #define uring_cmd_pdu_size (is_sqe128 ) \
1052
+ ((1 + !!(is_sqe128)) * sizeof(struct io_uring_sqe) - \
1053
+ offsetof(struct io_uring_sqe, cmd))
1054
+
1053
1055
struct io_op_def {
1054
1056
/* needs req->file assigned */
1055
1057
unsigned needs_file : 1 ;
@@ -1289,6 +1291,12 @@ static const struct io_op_def io_op_defs[] = {
1289
1291
[IORING_OP_SOCKET ] = {
1290
1292
.audit_skip = 1 ,
1291
1293
},
1294
+ [IORING_OP_URING_CMD ] = {
1295
+ .needs_file = 1 ,
1296
+ .plug = 1 ,
1297
+ .needs_async_setup = 1 ,
1298
+ .async_size = uring_cmd_pdu_size (1 ),
1299
+ },
1292
1300
};
1293
1301
1294
1302
/* requests with any of those set should undergo io_disarm_next() */
@@ -1428,6 +1436,8 @@ const char *io_uring_get_opcode(u8 opcode)
1428
1436
return "GETXATTR" ;
1429
1437
case IORING_OP_SOCKET :
1430
1438
return "SOCKET" ;
1439
+ case IORING_OP_URING_CMD :
1440
+ return "URING_CMD" ;
1431
1441
case IORING_OP_LAST :
1432
1442
return "INVALID" ;
1433
1443
}
@@ -4507,10 +4517,6 @@ static int __io_getxattr_prep(struct io_kiocb *req,
4507
4517
const char __user * name ;
4508
4518
int ret ;
4509
4519
4510
- if (unlikely (req -> ctx -> flags & IORING_SETUP_IOPOLL ))
4511
- return - EINVAL ;
4512
- if (unlikely (sqe -> ioprio ))
4513
- return - EINVAL ;
4514
4520
if (unlikely (req -> flags & REQ_F_FIXED_FILE ))
4515
4521
return - EBADF ;
4516
4522
@@ -4620,10 +4626,6 @@ static int __io_setxattr_prep(struct io_kiocb *req,
4620
4626
const char __user * name ;
4621
4627
int ret ;
4622
4628
4623
- if (unlikely (req -> ctx -> flags & IORING_SETUP_IOPOLL ))
4624
- return - EINVAL ;
4625
- if (unlikely (sqe -> ioprio ))
4626
- return - EINVAL ;
4627
4629
if (unlikely (req -> flags & REQ_F_FIXED_FILE ))
4628
4630
return - EBADF ;
4629
4631
@@ -4910,6 +4912,96 @@ static int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
4910
4912
return 0 ;
4911
4913
}
4912
4914
4915
+ static void io_uring_cmd_work (struct io_kiocb * req , bool * locked )
4916
+ {
4917
+ req -> uring_cmd .task_work_cb (& req -> uring_cmd );
4918
+ }
4919
+
4920
+ void io_uring_cmd_complete_in_task (struct io_uring_cmd * ioucmd ,
4921
+ void (* task_work_cb )(struct io_uring_cmd * ))
4922
+ {
4923
+ struct io_kiocb * req = container_of (ioucmd , struct io_kiocb , uring_cmd );
4924
+
4925
+ req -> uring_cmd .task_work_cb = task_work_cb ;
4926
+ req -> io_task_work .func = io_uring_cmd_work ;
4927
+ io_req_task_work_add (req , !!(req -> ctx -> flags & IORING_SETUP_SQPOLL ));
4928
+ }
4929
+ EXPORT_SYMBOL_GPL (io_uring_cmd_complete_in_task );
4930
+
4931
+ /*
4932
+ * Called by consumers of io_uring_cmd, if they originally returned
4933
+ * -EIOCBQUEUED upon receiving the command.
4934
+ */
4935
+ void io_uring_cmd_done (struct io_uring_cmd * ioucmd , ssize_t ret , ssize_t res2 )
4936
+ {
4937
+ struct io_kiocb * req = container_of (ioucmd , struct io_kiocb , uring_cmd );
4938
+
4939
+ if (ret < 0 )
4940
+ req_set_fail (req );
4941
+ if (req -> ctx -> flags & IORING_SETUP_CQE32 )
4942
+ __io_req_complete32 (req , 0 , ret , 0 , res2 , 0 );
4943
+ else
4944
+ io_req_complete (req , ret );
4945
+ }
4946
+ EXPORT_SYMBOL_GPL (io_uring_cmd_done );
4947
+
4948
+ static int io_uring_cmd_prep_async (struct io_kiocb * req )
4949
+ {
4950
+ size_t cmd_size ;
4951
+
4952
+ cmd_size = uring_cmd_pdu_size (req -> ctx -> flags & IORING_SETUP_SQE128 );
4953
+
4954
+ memcpy (req -> async_data , req -> uring_cmd .cmd , cmd_size );
4955
+ return 0 ;
4956
+ }
4957
+
4958
+ static int io_uring_cmd_prep (struct io_kiocb * req ,
4959
+ const struct io_uring_sqe * sqe )
4960
+ {
4961
+ struct io_uring_cmd * ioucmd = & req -> uring_cmd ;
4962
+
4963
+ if (sqe -> rw_flags )
4964
+ return - EINVAL ;
4965
+ ioucmd -> cmd = sqe -> cmd ;
4966
+ ioucmd -> cmd_op = READ_ONCE (sqe -> cmd_op );
4967
+ return 0 ;
4968
+ }
4969
+
4970
+ static int io_uring_cmd (struct io_kiocb * req , unsigned int issue_flags )
4971
+ {
4972
+ struct io_uring_cmd * ioucmd = & req -> uring_cmd ;
4973
+ struct io_ring_ctx * ctx = req -> ctx ;
4974
+ struct file * file = req -> file ;
4975
+ int ret ;
4976
+
4977
+ if (!req -> file -> f_op -> uring_cmd )
4978
+ return - EOPNOTSUPP ;
4979
+
4980
+ if (ctx -> flags & IORING_SETUP_SQE128 )
4981
+ issue_flags |= IO_URING_F_SQE128 ;
4982
+ if (ctx -> flags & IORING_SETUP_CQE32 )
4983
+ issue_flags |= IO_URING_F_CQE32 ;
4984
+ if (ctx -> flags & IORING_SETUP_IOPOLL )
4985
+ issue_flags |= IO_URING_F_IOPOLL ;
4986
+
4987
+ if (req_has_async_data (req ))
4988
+ ioucmd -> cmd = req -> async_data ;
4989
+
4990
+ ret = file -> f_op -> uring_cmd (ioucmd , issue_flags );
4991
+ if (ret == - EAGAIN ) {
4992
+ if (!req_has_async_data (req )) {
4993
+ if (io_alloc_async_data (req ))
4994
+ return - ENOMEM ;
4995
+ io_uring_cmd_prep_async (req );
4996
+ }
4997
+ return - EAGAIN ;
4998
+ }
4999
+
5000
+ if (ret != - EIOCBQUEUED )
5001
+ io_uring_cmd_done (ioucmd , ret , 0 );
5002
+ return 0 ;
5003
+ }
5004
+
4913
5005
static int io_shutdown_prep (struct io_kiocb * req ,
4914
5006
const struct io_uring_sqe * sqe )
4915
5007
{
@@ -6305,9 +6397,7 @@ static int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6305
6397
{
6306
6398
struct io_socket * sock = & req -> sock ;
6307
6399
6308
- if (unlikely (req -> ctx -> flags & IORING_SETUP_IOPOLL ))
6309
- return - EINVAL ;
6310
- if (sqe -> ioprio || sqe -> addr || sqe -> rw_flags || sqe -> buf_index )
6400
+ if (sqe -> addr || sqe -> rw_flags || sqe -> buf_index )
6311
6401
return - EINVAL ;
6312
6402
6313
6403
sock -> domain = READ_ONCE (sqe -> fd );
@@ -7755,6 +7845,8 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
7755
7845
return io_getxattr_prep (req , sqe );
7756
7846
case IORING_OP_SOCKET :
7757
7847
return io_socket_prep (req , sqe );
7848
+ case IORING_OP_URING_CMD :
7849
+ return io_uring_cmd_prep (req , sqe );
7758
7850
}
7759
7851
7760
7852
printk_once (KERN_WARNING "io_uring: unhandled opcode %d\n" ,
@@ -7787,6 +7879,8 @@ static int io_req_prep_async(struct io_kiocb *req)
7787
7879
return io_recvmsg_prep_async (req );
7788
7880
case IORING_OP_CONNECT :
7789
7881
return io_connect_prep_async (req );
7882
+ case IORING_OP_URING_CMD :
7883
+ return io_uring_cmd_prep_async (req );
7790
7884
}
7791
7885
printk_once (KERN_WARNING "io_uring: prep_async() bad opcode %d\n" ,
7792
7886
req -> opcode );
@@ -8081,6 +8175,9 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
8081
8175
case IORING_OP_SOCKET :
8082
8176
ret = io_socket (req , issue_flags );
8083
8177
break ;
8178
+ case IORING_OP_URING_CMD :
8179
+ ret = io_uring_cmd (req , issue_flags );
8180
+ break ;
8084
8181
default :
8085
8182
ret = - EINVAL ;
8086
8183
break ;
@@ -12699,6 +12796,8 @@ static int __init io_uring_init(void)
12699
12796
12700
12797
BUILD_BUG_ON (sizeof (atomic_t ) != sizeof (u32 ));
12701
12798
12799
+ BUILD_BUG_ON (sizeof (struct io_uring_cmd ) > 64 );
12800
+
12702
12801
req_cachep = KMEM_CACHE (io_kiocb , SLAB_HWCACHE_ALIGN | SLAB_PANIC |
12703
12802
SLAB_ACCOUNT );
12704
12803
return 0 ;
0 commit comments