@@ -965,67 +965,52 @@ static void srp_disconnect_target(struct srp_target_port *target)
965
965
}
966
966
}
967
967
968
- static void srp_free_req_data (struct srp_target_port * target ,
969
- struct srp_rdma_ch * ch )
968
+ static int srp_exit_cmd_priv (struct Scsi_Host * shost , struct scsi_cmnd * cmd )
970
969
{
970
+ struct srp_target_port * target = host_to_target (shost );
971
971
struct srp_device * dev = target -> srp_host -> srp_dev ;
972
972
struct ib_device * ibdev = dev -> dev ;
973
- struct srp_request * req ;
974
- int i ;
973
+ struct srp_request * req = scsi_cmd_priv (cmd );
975
974
976
- if (!ch -> req_ring )
977
- return ;
978
-
979
- for (i = 0 ; i < target -> req_ring_size ; ++ i ) {
980
- req = & ch -> req_ring [i ];
981
- if (dev -> use_fast_reg )
982
- kfree (req -> fr_list );
983
- if (req -> indirect_dma_addr ) {
984
- ib_dma_unmap_single (ibdev , req -> indirect_dma_addr ,
985
- target -> indirect_size ,
986
- DMA_TO_DEVICE );
987
- }
988
- kfree (req -> indirect_desc );
975
+ kfree (req -> fr_list );
976
+ if (req -> indirect_dma_addr ) {
977
+ ib_dma_unmap_single (ibdev , req -> indirect_dma_addr ,
978
+ target -> indirect_size ,
979
+ DMA_TO_DEVICE );
989
980
}
981
+ kfree (req -> indirect_desc );
990
982
991
- kfree (ch -> req_ring );
992
- ch -> req_ring = NULL ;
983
+ return 0 ;
993
984
}
994
985
995
- static int srp_alloc_req_data (struct srp_rdma_ch * ch )
986
+ static int srp_init_cmd_priv (struct Scsi_Host * shost , struct scsi_cmnd * cmd )
996
987
{
997
- struct srp_target_port * target = ch -> target ;
988
+ struct srp_target_port * target = host_to_target ( shost ) ;
998
989
struct srp_device * srp_dev = target -> srp_host -> srp_dev ;
999
990
struct ib_device * ibdev = srp_dev -> dev ;
1000
- struct srp_request * req ;
991
+ struct srp_request * req = scsi_cmd_priv ( cmd ) ;
1001
992
dma_addr_t dma_addr ;
1002
- int i , ret = - ENOMEM ;
993
+ int ret = - ENOMEM ;
1003
994
1004
- ch -> req_ring = kcalloc (target -> req_ring_size , sizeof (* ch -> req_ring ),
1005
- GFP_KERNEL );
1006
- if (!ch -> req_ring )
1007
- goto out ;
1008
-
1009
- for (i = 0 ; i < target -> req_ring_size ; ++ i ) {
1010
- req = & ch -> req_ring [i ];
1011
- if (srp_dev -> use_fast_reg ) {
1012
- req -> fr_list = kmalloc_array (target -> mr_per_cmd ,
1013
- sizeof (void * ), GFP_KERNEL );
1014
- if (!req -> fr_list )
1015
- goto out ;
1016
- }
1017
- req -> indirect_desc = kmalloc (target -> indirect_size , GFP_KERNEL );
1018
- if (!req -> indirect_desc )
1019
- goto out ;
1020
-
1021
- dma_addr = ib_dma_map_single (ibdev , req -> indirect_desc ,
1022
- target -> indirect_size ,
1023
- DMA_TO_DEVICE );
1024
- if (ib_dma_mapping_error (ibdev , dma_addr ))
995
+ if (srp_dev -> use_fast_reg ) {
996
+ req -> fr_list = kmalloc_array (target -> mr_per_cmd , sizeof (void * ),
997
+ GFP_KERNEL );
998
+ if (!req -> fr_list )
1025
999
goto out ;
1000
+ }
1001
+ req -> indirect_desc = kmalloc (target -> indirect_size , GFP_KERNEL );
1002
+ if (!req -> indirect_desc )
1003
+ goto out ;
1026
1004
1027
- req -> indirect_dma_addr = dma_addr ;
1005
+ dma_addr = ib_dma_map_single (ibdev , req -> indirect_desc ,
1006
+ target -> indirect_size ,
1007
+ DMA_TO_DEVICE );
1008
+ if (ib_dma_mapping_error (ibdev , dma_addr )) {
1009
+ srp_exit_cmd_priv (shost , cmd );
1010
+ goto out ;
1028
1011
}
1012
+
1013
+ req -> indirect_dma_addr = dma_addr ;
1029
1014
ret = 0 ;
1030
1015
1031
1016
out :
@@ -1067,10 +1052,6 @@ static void srp_remove_target(struct srp_target_port *target)
1067
1052
}
1068
1053
cancel_work_sync (& target -> tl_err_work );
1069
1054
srp_rport_put (target -> rport );
1070
- for (i = 0 ; i < target -> ch_count ; i ++ ) {
1071
- ch = & target -> ch [i ];
1072
- srp_free_req_data (target , ch );
1073
- }
1074
1055
kfree (target -> ch );
1075
1056
target -> ch = NULL ;
1076
1057
@@ -1289,22 +1270,32 @@ static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1289
1270
}
1290
1271
}
1291
1272
1292
- static void srp_terminate_io (struct srp_rport * rport )
1273
+ struct srp_terminate_context {
1274
+ struct srp_target_port * srp_target ;
1275
+ int scsi_result ;
1276
+ };
1277
+
1278
+ static bool srp_terminate_cmd (struct scsi_cmnd * scmnd , void * context_ptr ,
1279
+ bool reserved )
1293
1280
{
1294
- struct srp_target_port * target = rport -> lld_data ;
1295
- struct srp_rdma_ch * ch ;
1296
- int i , j ;
1281
+ struct srp_terminate_context * context = context_ptr ;
1282
+ struct srp_target_port * target = context -> srp_target ;
1283
+ u32 tag = blk_mq_unique_tag (scmnd -> request );
1284
+ struct srp_rdma_ch * ch = & target -> ch [blk_mq_unique_tag_to_hwq (tag )];
1285
+ struct srp_request * req = scsi_cmd_priv (scmnd );
1297
1286
1298
- for (i = 0 ; i < target -> ch_count ; i ++ ) {
1299
- ch = & target -> ch [i ];
1287
+ srp_finish_req (ch , req , NULL , context -> scsi_result );
1300
1288
1301
- for ( j = 0 ; j < target -> req_ring_size ; ++ j ) {
1302
- struct srp_request * req = & ch -> req_ring [ j ];
1289
+ return true;
1290
+ }
1303
1291
1304
- srp_finish_req (ch , req , NULL ,
1305
- DID_TRANSPORT_FAILFAST << 16 );
1306
- }
1307
- }
1292
+ static void srp_terminate_io (struct srp_rport * rport )
1293
+ {
1294
+ struct srp_target_port * target = rport -> lld_data ;
1295
+ struct srp_terminate_context context = { .srp_target = target ,
1296
+ .scsi_result = DID_TRANSPORT_FAILFAST << 16 };
1297
+
1298
+ scsi_host_busy_iter (target -> scsi_host , srp_terminate_cmd , & context );
1308
1299
}
1309
1300
1310
1301
/* Calculate maximum initiator to target information unit length. */
@@ -1360,13 +1351,12 @@ static int srp_rport_reconnect(struct srp_rport *rport)
1360
1351
ch = & target -> ch [i ];
1361
1352
ret += srp_new_cm_id (ch );
1362
1353
}
1363
- for (i = 0 ; i < target -> ch_count ; i ++ ) {
1364
- ch = & target -> ch [i ];
1365
- for (j = 0 ; j < target -> req_ring_size ; ++ j ) {
1366
- struct srp_request * req = & ch -> req_ring [j ];
1354
+ {
1355
+ struct srp_terminate_context context = {
1356
+ .srp_target = target , .scsi_result = DID_RESET << 16 };
1367
1357
1368
- srp_finish_req ( ch , req , NULL , DID_RESET << 16 );
1369
- }
1358
+ scsi_host_busy_iter ( target -> scsi_host , srp_terminate_cmd ,
1359
+ & context );
1370
1360
}
1371
1361
for (i = 0 ; i < target -> ch_count ; i ++ ) {
1372
1362
ch = & target -> ch [i ];
@@ -1962,13 +1952,10 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1962
1952
spin_unlock_irqrestore (& ch -> lock , flags );
1963
1953
} else {
1964
1954
scmnd = scsi_host_find_tag (target -> scsi_host , rsp -> tag );
1965
- if (scmnd && scmnd -> host_scribble ) {
1966
- req = ( void * ) scmnd -> host_scribble ;
1955
+ if (scmnd ) {
1956
+ req = scsi_cmd_priv ( scmnd ) ;
1967
1957
scmnd = srp_claim_req (ch , req , NULL , scmnd );
1968
1958
} else {
1969
- scmnd = NULL ;
1970
- }
1971
- if (!scmnd ) {
1972
1959
shost_printk (KERN_ERR , target -> scsi_host ,
1973
1960
"Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n" ,
1974
1961
rsp -> tag , ch - target -> ch , ch -> qp -> qp_num );
@@ -2000,7 +1987,6 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
2000
1987
srp_free_req (ch , req , scmnd ,
2001
1988
be32_to_cpu (rsp -> req_lim_delta ));
2002
1989
2003
- scmnd -> host_scribble = NULL ;
2004
1990
scmnd -> scsi_done (scmnd );
2005
1991
}
2006
1992
}
@@ -2168,13 +2154,12 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2168
2154
{
2169
2155
struct srp_target_port * target = host_to_target (shost );
2170
2156
struct srp_rdma_ch * ch ;
2171
- struct srp_request * req ;
2157
+ struct srp_request * req = scsi_cmd_priv ( scmnd ) ;
2172
2158
struct srp_iu * iu ;
2173
2159
struct srp_cmd * cmd ;
2174
2160
struct ib_device * dev ;
2175
2161
unsigned long flags ;
2176
2162
u32 tag ;
2177
- u16 idx ;
2178
2163
int len , ret ;
2179
2164
2180
2165
scmnd -> result = srp_chkready (target -> rport );
@@ -2184,10 +2169,6 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2184
2169
WARN_ON_ONCE (scmnd -> request -> tag < 0 );
2185
2170
tag = blk_mq_unique_tag (scmnd -> request );
2186
2171
ch = & target -> ch [blk_mq_unique_tag_to_hwq (tag )];
2187
- idx = blk_mq_unique_tag_to_tag (tag );
2188
- WARN_ONCE (idx >= target -> req_ring_size , "%s: tag %#x: idx %d >= %d\n" ,
2189
- dev_name (& shost -> shost_gendev ), tag , idx ,
2190
- target -> req_ring_size );
2191
2172
2192
2173
spin_lock_irqsave (& ch -> lock , flags );
2193
2174
iu = __srp_get_tx_iu (ch , SRP_IU_CMD );
@@ -2196,13 +2177,10 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2196
2177
if (!iu )
2197
2178
goto err ;
2198
2179
2199
- req = & ch -> req_ring [idx ];
2200
2180
dev = target -> srp_host -> srp_dev -> dev ;
2201
2181
ib_dma_sync_single_for_cpu (dev , iu -> dma , ch -> max_it_iu_len ,
2202
2182
DMA_TO_DEVICE );
2203
2183
2204
- scmnd -> host_scribble = (void * ) req ;
2205
-
2206
2184
cmd = iu -> buf ;
2207
2185
memset (cmd , 0 , sizeof * cmd );
2208
2186
@@ -3083,6 +3061,8 @@ static struct scsi_host_template srp_template = {
3083
3061
.target_alloc = srp_target_alloc ,
3084
3062
.slave_configure = srp_slave_configure ,
3085
3063
.info = srp_target_info ,
3064
+ .init_cmd_priv = srp_init_cmd_priv ,
3065
+ .exit_cmd_priv = srp_exit_cmd_priv ,
3086
3066
.queuecommand = srp_queuecommand ,
3087
3067
.change_queue_depth = srp_change_queue_depth ,
3088
3068
.eh_timed_out = srp_timed_out ,
@@ -3096,6 +3076,7 @@ static struct scsi_host_template srp_template = {
3096
3076
.cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE ,
3097
3077
.shost_attrs = srp_host_attrs ,
3098
3078
.track_queue_depth = 1 ,
3079
+ .cmd_size = sizeof (struct srp_request ),
3099
3080
};
3100
3081
3101
3082
static int srp_sdev_count (struct Scsi_Host * host )
@@ -3675,8 +3656,6 @@ static ssize_t srp_create_target(struct device *dev,
3675
3656
if (ret )
3676
3657
goto out ;
3677
3658
3678
- target -> req_ring_size = target -> queue_size - SRP_TSK_MGMT_SQ_SIZE ;
3679
-
3680
3659
if (!srp_conn_unique (target -> srp_host , target )) {
3681
3660
if (target -> using_rdma_cm ) {
3682
3661
shost_printk (KERN_INFO , target -> scsi_host ,
@@ -3779,10 +3758,6 @@ static ssize_t srp_create_target(struct device *dev,
3779
3758
if (ret )
3780
3759
goto err_disconnect ;
3781
3760
3782
- ret = srp_alloc_req_data (ch );
3783
- if (ret )
3784
- goto err_disconnect ;
3785
-
3786
3761
ret = srp_connect_ch (ch , max_iu_len , multich );
3787
3762
if (ret ) {
3788
3763
char dst [64 ];
@@ -3801,7 +3776,6 @@ static ssize_t srp_create_target(struct device *dev,
3801
3776
goto free_ch ;
3802
3777
} else {
3803
3778
srp_free_ch_ib (target , ch );
3804
- srp_free_req_data (target , ch );
3805
3779
target -> ch_count = ch - target -> ch ;
3806
3780
goto connected ;
3807
3781
}
@@ -3862,7 +3836,6 @@ static ssize_t srp_create_target(struct device *dev,
3862
3836
for (i = 0 ; i < target -> ch_count ; i ++ ) {
3863
3837
ch = & target -> ch [i ];
3864
3838
srp_free_ch_ib (target , ch );
3865
- srp_free_req_data (target , ch );
3866
3839
}
3867
3840
3868
3841
kfree (target -> ch );
0 commit comments