@@ -203,6 +203,19 @@ struct vhost_scsi {
203
203
int vs_events_nr ; /* num of pending events, protected by vq->mutex */
204
204
};
205
205
206
+ /*
207
+ * Context for processing request and control queue operations.
208
+ */
209
+ struct vhost_scsi_ctx {
210
+ int head ;
211
+ unsigned int out , in ;
212
+ size_t req_size , rsp_size ;
213
+ size_t out_size , in_size ;
214
+ u8 * target , * lunp ;
215
+ void * req ;
216
+ struct iov_iter out_iter ;
217
+ };
218
+
206
219
static struct workqueue_struct * vhost_scsi_workqueue ;
207
220
208
221
/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
@@ -1050,10 +1063,107 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1050
1063
mutex_unlock (& vq -> mutex );
1051
1064
}
1052
1065
1066
+ static int
1067
+ vhost_scsi_get_desc (struct vhost_scsi * vs , struct vhost_virtqueue * vq ,
1068
+ struct vhost_scsi_ctx * vc )
1069
+ {
1070
+ int ret = - ENXIO ;
1071
+
1072
+ vc -> head = vhost_get_vq_desc (vq , vq -> iov ,
1073
+ ARRAY_SIZE (vq -> iov ), & vc -> out , & vc -> in ,
1074
+ NULL , NULL );
1075
+
1076
+ pr_debug ("vhost_get_vq_desc: head: %d, out: %u in: %u\n" ,
1077
+ vc -> head , vc -> out , vc -> in );
1078
+
1079
+ /* On error, stop handling until the next kick. */
1080
+ if (unlikely (vc -> head < 0 ))
1081
+ goto done ;
1082
+
1083
+ /* Nothing new? Wait for eventfd to tell us they refilled. */
1084
+ if (vc -> head == vq -> num ) {
1085
+ if (unlikely (vhost_enable_notify (& vs -> dev , vq ))) {
1086
+ vhost_disable_notify (& vs -> dev , vq );
1087
+ ret = - EAGAIN ;
1088
+ }
1089
+ goto done ;
1090
+ }
1091
+
1092
+ /*
1093
+ * Get the size of request and response buffers.
1094
+ */
1095
+ vc -> out_size = iov_length (vq -> iov , vc -> out );
1096
+ vc -> in_size = iov_length (& vq -> iov [vc -> out ], vc -> in );
1097
+
1098
+ /*
1099
+ * Copy over the virtio-scsi request header, which for a
1100
+ * ANY_LAYOUT enabled guest may span multiple iovecs, or a
1101
+ * single iovec may contain both the header + outgoing
1102
+ * WRITE payloads.
1103
+ *
1104
+ * copy_from_iter() will advance out_iter, so that it will
1105
+ * point at the start of the outgoing WRITE payload, if
1106
+ * DMA_TO_DEVICE is set.
1107
+ */
1108
+ iov_iter_init (& vc -> out_iter , WRITE , vq -> iov , vc -> out , vc -> out_size );
1109
+ ret = 0 ;
1110
+
1111
+ done :
1112
+ return ret ;
1113
+ }
1114
+
1115
+ static int
1116
+ vhost_scsi_chk_size (struct vhost_virtqueue * vq , struct vhost_scsi_ctx * vc )
1117
+ {
1118
+ if (unlikely (vc -> in_size < vc -> rsp_size )) {
1119
+ vq_err (vq ,
1120
+ "Response buf too small, need min %zu bytes got %zu" ,
1121
+ vc -> rsp_size , vc -> in_size );
1122
+ return - EINVAL ;
1123
+ } else if (unlikely (vc -> out_size < vc -> req_size )) {
1124
+ vq_err (vq ,
1125
+ "Request buf too small, need min %zu bytes got %zu" ,
1126
+ vc -> req_size , vc -> out_size );
1127
+ return - EIO ;
1128
+ }
1129
+
1130
+ return 0 ;
1131
+ }
1132
+
1133
+ static int
1134
+ vhost_scsi_get_req (struct vhost_virtqueue * vq , struct vhost_scsi_ctx * vc ,
1135
+ struct vhost_scsi_tpg * * tpgp )
1136
+ {
1137
+ int ret = - EIO ;
1138
+
1139
+ if (unlikely (!copy_from_iter_full (vc -> req , vc -> req_size ,
1140
+ & vc -> out_iter )))
1141
+ vq_err (vq , "Faulted on copy_from_iter\n" );
1142
+ else if (unlikely (* vc -> lunp != 1 ))
1143
+ /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1144
+ vq_err (vq , "Illegal virtio-scsi lun: %u\n" , * vc -> lunp );
1145
+ else {
1146
+ struct vhost_scsi_tpg * * vs_tpg , * tpg ;
1147
+
1148
+ vs_tpg = vq -> private_data ; /* validated at handler entry */
1149
+
1150
+ tpg = READ_ONCE (vs_tpg [* vc -> target ]);
1151
+ if (unlikely (!tpg ))
1152
+ vq_err (vq , "Target 0x%x does not exist\n" , * vc -> target );
1153
+ else {
1154
+ if (tpgp )
1155
+ * tpgp = tpg ;
1156
+ ret = 0 ;
1157
+ }
1158
+ }
1159
+
1160
+ return ret ;
1161
+ }
1162
+
1053
1163
static void
1054
1164
vhost_scsi_send_tmf_resp (struct vhost_scsi * vs ,
1055
- struct vhost_virtqueue * vq ,
1056
- int head , unsigned int out )
1165
+ struct vhost_virtqueue * vq ,
1166
+ struct vhost_scsi_ctx * vc )
1057
1167
{
1058
1168
struct virtio_scsi_ctrl_tmf_resp __user * resp ;
1059
1169
struct virtio_scsi_ctrl_tmf_resp rsp ;
@@ -1062,18 +1172,18 @@ vhost_scsi_send_tmf_resp(struct vhost_scsi *vs,
1062
1172
pr_debug ("%s\n" , __func__ );
1063
1173
memset (& rsp , 0 , sizeof (rsp ));
1064
1174
rsp .response = VIRTIO_SCSI_S_FUNCTION_REJECTED ;
1065
- resp = vq -> iov [out ].iov_base ;
1175
+ resp = vq -> iov [vc -> out ].iov_base ;
1066
1176
ret = __copy_to_user (resp , & rsp , sizeof (rsp ));
1067
1177
if (!ret )
1068
- vhost_add_used_and_signal (& vs -> dev , vq , head , 0 );
1178
+ vhost_add_used_and_signal (& vs -> dev , vq , vc -> head , 0 );
1069
1179
else
1070
1180
pr_err ("Faulted on virtio_scsi_ctrl_tmf_resp\n" );
1071
1181
}
1072
1182
1073
1183
static void
1074
1184
vhost_scsi_send_an_resp (struct vhost_scsi * vs ,
1075
- struct vhost_virtqueue * vq ,
1076
- int head , unsigned int out )
1185
+ struct vhost_virtqueue * vq ,
1186
+ struct vhost_scsi_ctx * vc )
1077
1187
{
1078
1188
struct virtio_scsi_ctrl_an_resp __user * resp ;
1079
1189
struct virtio_scsi_ctrl_an_resp rsp ;
@@ -1082,10 +1192,10 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1082
1192
pr_debug ("%s\n" , __func__ );
1083
1193
memset (& rsp , 0 , sizeof (rsp )); /* event_actual = 0 */
1084
1194
rsp .response = VIRTIO_SCSI_S_OK ;
1085
- resp = vq -> iov [out ].iov_base ;
1195
+ resp = vq -> iov [vc -> out ].iov_base ;
1086
1196
ret = __copy_to_user (resp , & rsp , sizeof (rsp ));
1087
1197
if (!ret )
1088
- vhost_add_used_and_signal (& vs -> dev , vq , head , 0 );
1198
+ vhost_add_used_and_signal (& vs -> dev , vq , vc -> head , 0 );
1089
1199
else
1090
1200
pr_err ("Faulted on virtio_scsi_ctrl_an_resp\n" );
1091
1201
}
@@ -1098,13 +1208,9 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1098
1208
struct virtio_scsi_ctrl_an_req an ;
1099
1209
struct virtio_scsi_ctrl_tmf_req tmf ;
1100
1210
} v_req ;
1101
- struct iov_iter out_iter ;
1102
- unsigned int out = 0 , in = 0 ;
1103
- int head ;
1104
- size_t req_size , rsp_size , typ_size ;
1105
- size_t out_size , in_size ;
1106
- u8 * lunp ;
1107
- void * req ;
1211
+ struct vhost_scsi_ctx vc ;
1212
+ size_t typ_size ;
1213
+ int ret ;
1108
1214
1109
1215
mutex_lock (& vq -> mutex );
1110
1216
/*
@@ -1114,52 +1220,28 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1114
1220
if (!vq -> private_data )
1115
1221
goto out ;
1116
1222
1223
+ memset (& vc , 0 , sizeof (vc ));
1224
+
1117
1225
vhost_disable_notify (& vs -> dev , vq );
1118
1226
1119
1227
for (;;) {
1120
- head = vhost_get_vq_desc (vq , vq -> iov ,
1121
- ARRAY_SIZE (vq -> iov ), & out , & in ,
1122
- NULL , NULL );
1123
- pr_debug ("vhost_get_vq_desc: head: %d, out: %u in: %u\n" ,
1124
- head , out , in );
1125
- /* On error, stop handling until the next kick. */
1126
- if (unlikely (head < 0 ))
1127
- break ;
1128
- /* Nothing new? Wait for eventfd to tell us they refilled. */
1129
- if (head == vq -> num ) {
1130
- if (unlikely (vhost_enable_notify (& vs -> dev , vq ))) {
1131
- vhost_disable_notify (& vs -> dev , vq );
1132
- continue ;
1133
- }
1134
- break ;
1135
- }
1228
+ ret = vhost_scsi_get_desc (vs , vq , & vc );
1229
+ if (ret )
1230
+ goto err ;
1136
1231
1137
1232
/*
1138
- * Get the size of request and response buffers.
1233
+ * Get the request type first in order to setup
1234
+ * other parameters dependent on the type.
1139
1235
*/
1140
- out_size = iov_length (vq -> iov , out );
1141
- in_size = iov_length (& vq -> iov [out ], in );
1142
-
1143
- /*
1144
- * Copy over the virtio-scsi request header, which for a
1145
- * ANY_LAYOUT enabled guest may span multiple iovecs, or a
1146
- * single iovec may contain both the header + outgoing
1147
- * WRITE payloads.
1148
- *
1149
- * copy_from_iter() will advance out_iter, so that it will
1150
- * point at the start of the outgoing WRITE payload, if
1151
- * DMA_TO_DEVICE is set.
1152
- */
1153
- iov_iter_init (& out_iter , WRITE , vq -> iov , out , out_size );
1154
-
1155
- req = & v_req .type ;
1236
+ vc .req = & v_req .type ;
1156
1237
typ_size = sizeof (v_req .type );
1157
1238
1158
- if (unlikely (!copy_from_iter_full (req , typ_size , & out_iter ))) {
1239
+ if (unlikely (!copy_from_iter_full (vc .req , typ_size ,
1240
+ & vc .out_iter ))) {
1159
1241
vq_err (vq , "Faulted on copy_from_iter tmf type\n" );
1160
1242
/*
1161
- * The size of the response buffer varies based on
1162
- * the request type and must be validated against it.
1243
+ * The size of the response buffer depends on the
1244
+ * request type and must be validated against it.
1163
1245
* Since the request type is not known, don't send
1164
1246
* a response.
1165
1247
*/
@@ -1168,68 +1250,59 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1168
1250
1169
1251
switch (v_req .type ) {
1170
1252
case VIRTIO_SCSI_T_TMF :
1171
- req = & v_req .tmf ;
1172
- lunp = & v_req .tmf .lun [0 ];
1173
- req_size = sizeof (struct virtio_scsi_ctrl_tmf_req );
1174
- rsp_size = sizeof (struct virtio_scsi_ctrl_tmf_resp );
1253
+ vc .req = & v_req .tmf ;
1254
+ vc .req_size = sizeof (struct virtio_scsi_ctrl_tmf_req );
1255
+ vc .rsp_size = sizeof (struct virtio_scsi_ctrl_tmf_resp );
1256
+ vc .lunp = & v_req .tmf .lun [0 ];
1257
+ vc .target = & v_req .tmf .lun [1 ];
1175
1258
break ;
1176
1259
case VIRTIO_SCSI_T_AN_QUERY :
1177
1260
case VIRTIO_SCSI_T_AN_SUBSCRIBE :
1178
- req = & v_req .an ;
1179
- lunp = & v_req .an .lun [0 ];
1180
- req_size = sizeof (struct virtio_scsi_ctrl_an_req );
1181
- rsp_size = sizeof (struct virtio_scsi_ctrl_an_resp );
1261
+ vc .req = & v_req .an ;
1262
+ vc .req_size = sizeof (struct virtio_scsi_ctrl_an_req );
1263
+ vc .rsp_size = sizeof (struct virtio_scsi_ctrl_an_resp );
1264
+ vc .lunp = & v_req .an .lun [0 ];
1265
+ vc .target = NULL ;
1182
1266
break ;
1183
1267
default :
1184
1268
vq_err (vq , "Unknown control request %d" , v_req .type );
1185
1269
continue ;
1186
1270
}
1187
1271
1188
1272
/*
1189
- * Check for a sane response buffer so we can report early
1190
- * errors back to the guest.
1273
+ * Validate the size of request and response buffers.
1274
+ * Check for a sane response buffer so we can report
1275
+ * early errors back to the guest.
1191
1276
*/
1192
- if (unlikely (in_size < rsp_size )) {
1193
- vq_err (vq ,
1194
- "Resp buf too small, need min %zu bytes got %zu" ,
1195
- rsp_size , in_size );
1196
- /*
1197
- * Notifications are disabled at this point;
1198
- * continue so they can be eventually enabled
1199
- * when processing terminates.
1200
- */
1201
- continue ;
1202
- }
1277
+ ret = vhost_scsi_chk_size (vq , & vc );
1278
+ if (ret )
1279
+ goto err ;
1203
1280
1204
- if (unlikely (out_size < req_size )) {
1205
- vq_err (vq ,
1206
- "Req buf too small, need min %zu bytes got %zu" ,
1207
- req_size , out_size );
1208
- vhost_scsi_send_bad_target (vs , vq , head , out );
1209
- continue ;
1210
- }
1211
-
1212
- req += typ_size ;
1213
- req_size -= typ_size ;
1214
-
1215
- if (unlikely (!copy_from_iter_full (req , req_size , & out_iter ))) {
1216
- vq_err (vq , "Faulted on copy_from_iter\n" );
1217
- vhost_scsi_send_bad_target (vs , vq , head , out );
1218
- continue ;
1219
- }
1281
+ /*
1282
+ * Get the rest of the request now that its size is known.
1283
+ */
1284
+ vc .req += typ_size ;
1285
+ vc .req_size -= typ_size ;
1220
1286
1221
- /* virtio-scsi spec requires byte 0 of the lun to be 1 */
1222
- if (unlikely (* lunp != 1 )) {
1223
- vq_err (vq , "Illegal virtio-scsi lun: %u\n" , * lunp );
1224
- vhost_scsi_send_bad_target (vs , vq , head , out );
1225
- continue ;
1226
- }
1287
+ ret = vhost_scsi_get_req (vq , & vc , NULL );
1288
+ if (ret )
1289
+ goto err ;
1227
1290
1228
- if (v_req .type == VIRTIO_SCSI_T_TMF ) {
1229
- pr_debug ("%s tmf %d\n" , __func__ , v_req .tmf .subtype );
1230
- vhost_scsi_send_tmf_resp (vs , vq , head , out );
1231
- } else
1232
- vhost_scsi_send_an_resp (vs , vq , head , out );
1291
+ if (v_req .type == VIRTIO_SCSI_T_TMF )
1292
+ vhost_scsi_send_tmf_resp (vs , vq , & vc );
1293
+ else
1294
+ vhost_scsi_send_an_resp (vs , vq , & vc );
1295
+ err :
1296
+ /*
1297
+ * ENXIO: No more requests, or read error, wait for next kick
1298
+ * EINVAL: Invalid response buffer, drop the request
1299
+ * EIO: Respond with bad target
1300
+ * EAGAIN: Pending request
1301
+ */
1302
+ if (ret == - ENXIO )
1303
+ break ;
1304
+ else if (ret == - EIO )
1305
+ vhost_scsi_send_bad_target (vs , vq , vc .head , vc .out );
1233
1306
}
1234
1307
out :
1235
1308
mutex_unlock (& vq -> mutex );
0 commit comments