@@ -94,6 +94,11 @@ struct mmc_blk_data {
94
94
unsigned int read_only ;
95
95
unsigned int part_type ;
96
96
unsigned int name_idx ;
97
+ unsigned int reset_done ;
98
+ #define MMC_BLK_READ BIT(0)
99
+ #define MMC_BLK_WRITE BIT(1)
100
+ #define MMC_BLK_DISCARD BIT(2)
101
+ #define MMC_BLK_SECDISCARD BIT(3)
97
102
98
103
/*
99
104
* Only set in main mmc_blk_data associated
@@ -109,11 +114,11 @@ static DEFINE_MUTEX(open_lock);
109
114
enum mmc_blk_status {
110
115
MMC_BLK_SUCCESS = 0 ,
111
116
MMC_BLK_PARTIAL ,
112
- MMC_BLK_RETRY ,
113
- MMC_BLK_RETRY_SINGLE ,
114
- MMC_BLK_DATA_ERR ,
115
117
MMC_BLK_CMD_ERR ,
118
+ MMC_BLK_RETRY ,
116
119
MMC_BLK_ABORT ,
120
+ MMC_BLK_DATA_ERR ,
121
+ MMC_BLK_ECC_ERR ,
117
122
};
118
123
119
124
module_param (perdev_minors , int , 0444 );
@@ -454,7 +459,7 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
454
459
card -> ext_csd .part_time );
455
460
if (ret )
456
461
return ret ;
457
- }
462
+ }
458
463
459
464
main_md -> part_curr = md -> part_type ;
460
465
return 0 ;
@@ -616,7 +621,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
616
621
* Otherwise we don't understand what happened, so abort.
617
622
*/
618
623
static int mmc_blk_cmd_recovery (struct mmc_card * card , struct request * req ,
619
- struct mmc_blk_request * brq )
624
+ struct mmc_blk_request * brq , int * ecc_err )
620
625
{
621
626
bool prev_cmd_status_valid = true;
622
627
u32 status , stop_status = 0 ;
@@ -641,6 +646,12 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
641
646
if (err )
642
647
return ERR_ABORT ;
643
648
649
+ /* Flag ECC errors */
650
+ if ((status & R1_CARD_ECC_FAILED ) ||
651
+ (brq -> stop .resp [0 ] & R1_CARD_ECC_FAILED ) ||
652
+ (brq -> cmd .resp [0 ] & R1_CARD_ECC_FAILED ))
653
+ * ecc_err = 1 ;
654
+
644
655
/*
645
656
* Check the current card state. If it is in some data transfer
646
657
* mode, tell it to stop (and hopefully transition back to TRAN.)
@@ -658,6 +669,8 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
658
669
*/
659
670
if (err )
660
671
return ERR_ABORT ;
672
+ if (stop_status & R1_CARD_ECC_FAILED )
673
+ * ecc_err = 1 ;
661
674
}
662
675
663
676
/* Check for set block count errors */
@@ -670,6 +683,10 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
670
683
return mmc_blk_cmd_error (req , "r/w cmd" , brq -> cmd .error ,
671
684
prev_cmd_status_valid , status );
672
685
686
+ /* Data errors */
687
+ if (!brq -> stop .error )
688
+ return ERR_CONTINUE ;
689
+
673
690
/* Now for stop errors. These aren't fatal to the transfer. */
674
691
pr_err ("%s: error %d sending stop command, original cmd response %#x, card status %#x\n" ,
675
692
req -> rq_disk -> disk_name , brq -> stop .error ,
@@ -686,12 +703,45 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
686
703
return ERR_CONTINUE ;
687
704
}
688
705
706
+ static int mmc_blk_reset (struct mmc_blk_data * md , struct mmc_host * host ,
707
+ int type )
708
+ {
709
+ int err ;
710
+
711
+ if (md -> reset_done & type )
712
+ return - EEXIST ;
713
+
714
+ md -> reset_done |= type ;
715
+ err = mmc_hw_reset (host );
716
+ /* Ensure we switch back to the correct partition */
717
+ if (err != - EOPNOTSUPP ) {
718
+ struct mmc_blk_data * main_md = mmc_get_drvdata (host -> card );
719
+ int part_err ;
720
+
721
+ main_md -> part_curr = main_md -> part_type ;
722
+ part_err = mmc_blk_part_switch (host -> card , md );
723
+ if (part_err ) {
724
+ /*
725
+ * We have failed to get back into the correct
726
+ * partition, so we need to abort the whole request.
727
+ */
728
+ return - ENODEV ;
729
+ }
730
+ }
731
+ return err ;
732
+ }
733
+
734
+ static inline void mmc_blk_reset_success (struct mmc_blk_data * md , int type )
735
+ {
736
+ md -> reset_done &= ~type ;
737
+ }
738
+
689
739
static int mmc_blk_issue_discard_rq (struct mmc_queue * mq , struct request * req )
690
740
{
691
741
struct mmc_blk_data * md = mq -> data ;
692
742
struct mmc_card * card = md -> queue .card ;
693
743
unsigned int from , nr , arg ;
694
- int err = 0 ;
744
+ int err = 0 , type = MMC_BLK_DISCARD ;
695
745
696
746
if (!mmc_can_erase (card )) {
697
747
err = - EOPNOTSUPP ;
@@ -705,7 +755,7 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
705
755
arg = MMC_TRIM_ARG ;
706
756
else
707
757
arg = MMC_ERASE_ARG ;
708
-
758
+ retry :
709
759
if (card -> quirks & MMC_QUIRK_INAND_CMD38 ) {
710
760
err = mmc_switch (card , EXT_CSD_CMD_SET_NORMAL ,
711
761
INAND_CMD38_ARG_EXT_CSD ,
@@ -718,6 +768,10 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
718
768
}
719
769
err = mmc_erase (card , from , nr , arg );
720
770
out :
771
+ if (err == - EIO && !mmc_blk_reset (md , card -> host , type ))
772
+ goto retry ;
773
+ if (!err )
774
+ mmc_blk_reset_success (md , type );
721
775
spin_lock_irq (& md -> lock );
722
776
__blk_end_request (req , err , blk_rq_bytes (req ));
723
777
spin_unlock_irq (& md -> lock );
@@ -731,7 +785,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
731
785
struct mmc_blk_data * md = mq -> data ;
732
786
struct mmc_card * card = md -> queue .card ;
733
787
unsigned int from , nr , arg ;
734
- int err = 0 ;
788
+ int err = 0 , type = MMC_BLK_SECDISCARD ;
735
789
736
790
if (!mmc_can_secure_erase_trim (card )) {
737
791
err = - EOPNOTSUPP ;
@@ -745,7 +799,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
745
799
arg = MMC_SECURE_TRIM1_ARG ;
746
800
else
747
801
arg = MMC_SECURE_ERASE_ARG ;
748
-
802
+ retry :
749
803
if (card -> quirks & MMC_QUIRK_INAND_CMD38 ) {
750
804
err = mmc_switch (card , EXT_CSD_CMD_SET_NORMAL ,
751
805
INAND_CMD38_ARG_EXT_CSD ,
@@ -769,6 +823,10 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
769
823
err = mmc_erase (card , from , nr , MMC_SECURE_TRIM2_ARG );
770
824
}
771
825
out :
826
+ if (err == - EIO && !mmc_blk_reset (md , card -> host , type ))
827
+ goto retry ;
828
+ if (!err )
829
+ mmc_blk_reset_success (md , type );
772
830
spin_lock_irq (& md -> lock );
773
831
__blk_end_request (req , err , blk_rq_bytes (req ));
774
832
spin_unlock_irq (& md -> lock );
@@ -825,11 +883,11 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
825
883
static int mmc_blk_err_check (struct mmc_card * card ,
826
884
struct mmc_async_req * areq )
827
885
{
828
- enum mmc_blk_status ret = MMC_BLK_SUCCESS ;
829
886
struct mmc_queue_req * mq_mrq = container_of (areq , struct mmc_queue_req ,
830
887
mmc_active );
831
888
struct mmc_blk_request * brq = & mq_mrq -> brq ;
832
889
struct request * req = mq_mrq -> req ;
890
+ int ecc_err = 0 ;
833
891
834
892
/*
835
893
* sbc.error indicates a problem with the set block count
@@ -841,8 +899,9 @@ static int mmc_blk_err_check(struct mmc_card *card,
841
899
* stop.error indicates a problem with the stop command. Data
842
900
* may have been transferred, or may still be transferring.
843
901
*/
844
- if (brq -> sbc .error || brq -> cmd .error || brq -> stop .error ) {
845
- switch (mmc_blk_cmd_recovery (card , req , brq )) {
902
+ if (brq -> sbc .error || brq -> cmd .error || brq -> stop .error ||
903
+ brq -> data .error ) {
904
+ switch (mmc_blk_cmd_recovery (card , req , brq , & ecc_err )) {
846
905
case ERR_RETRY :
847
906
return MMC_BLK_RETRY ;
848
907
case ERR_ABORT :
@@ -894,23 +953,21 @@ static int mmc_blk_err_check(struct mmc_card *card,
894
953
brq -> cmd .resp [0 ], brq -> stop .resp [0 ]);
895
954
896
955
if (rq_data_dir (req ) == READ ) {
897
- if (brq -> data .blocks > 1 ) {
898
- /* Redo read one sector at a time */
899
- pr_warning ("%s: retrying using single block read\n" ,
900
- req -> rq_disk -> disk_name );
901
- return MMC_BLK_RETRY_SINGLE ;
902
- }
956
+ if (ecc_err )
957
+ return MMC_BLK_ECC_ERR ;
903
958
return MMC_BLK_DATA_ERR ;
904
959
} else {
905
960
return MMC_BLK_CMD_ERR ;
906
961
}
907
962
}
908
963
909
- if (ret == MMC_BLK_SUCCESS &&
910
- blk_rq_bytes (req ) != brq -> data .bytes_xfered )
911
- ret = MMC_BLK_PARTIAL ;
964
+ if (!brq -> data .bytes_xfered )
965
+ return MMC_BLK_RETRY ;
912
966
913
- return ret ;
967
+ if (blk_rq_bytes (req ) != brq -> data .bytes_xfered )
968
+ return MMC_BLK_PARTIAL ;
969
+
970
+ return MMC_BLK_SUCCESS ;
914
971
}
915
972
916
973
static void mmc_blk_rw_rq_prep (struct mmc_queue_req * mqrq ,
@@ -1049,12 +1106,41 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1049
1106
mmc_queue_bounce_pre (mqrq );
1050
1107
}
1051
1108
1109
+ static int mmc_blk_cmd_err (struct mmc_blk_data * md , struct mmc_card * card ,
1110
+ struct mmc_blk_request * brq , struct request * req ,
1111
+ int ret )
1112
+ {
1113
+ /*
1114
+ * If this is an SD card and we're writing, we can first
1115
+ * mark the known good sectors as ok.
1116
+ *
1117
+ * If the card is not SD, we can still ok written sectors
1118
+ * as reported by the controller (which might be less than
1119
+ * the real number of written sectors, but never more).
1120
+ */
1121
+ if (mmc_card_sd (card )) {
1122
+ u32 blocks ;
1123
+
1124
+ blocks = mmc_sd_num_wr_blocks (card );
1125
+ if (blocks != (u32 )- 1 ) {
1126
+ spin_lock_irq (& md -> lock );
1127
+ ret = __blk_end_request (req , 0 , blocks << 9 );
1128
+ spin_unlock_irq (& md -> lock );
1129
+ }
1130
+ } else {
1131
+ spin_lock_irq (& md -> lock );
1132
+ ret = __blk_end_request (req , 0 , brq -> data .bytes_xfered );
1133
+ spin_unlock_irq (& md -> lock );
1134
+ }
1135
+ return ret ;
1136
+ }
1137
+
1052
1138
static int mmc_blk_issue_rw_rq (struct mmc_queue * mq , struct request * rqc )
1053
1139
{
1054
1140
struct mmc_blk_data * md = mq -> data ;
1055
1141
struct mmc_card * card = md -> queue .card ;
1056
1142
struct mmc_blk_request * brq = & mq -> mqrq_cur -> brq ;
1057
- int ret = 1 , disable_multi = 0 , retry = 0 ;
1143
+ int ret = 1 , disable_multi = 0 , retry = 0 , type ;
1058
1144
enum mmc_blk_status status ;
1059
1145
struct mmc_queue_req * mq_rq ;
1060
1146
struct request * req ;
@@ -1076,6 +1162,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1076
1162
mq_rq = container_of (areq , struct mmc_queue_req , mmc_active );
1077
1163
brq = & mq_rq -> brq ;
1078
1164
req = mq_rq -> req ;
1165
+ type = rq_data_dir (req ) == READ ? MMC_BLK_READ : MMC_BLK_WRITE ;
1079
1166
mmc_queue_bounce_post (mq_rq );
1080
1167
1081
1168
switch (status ) {
@@ -1084,17 +1171,17 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1084
1171
/*
1085
1172
* A block was successfully transferred.
1086
1173
*/
1174
+ mmc_blk_reset_success (md , type );
1087
1175
spin_lock_irq (& md -> lock );
1088
1176
ret = __blk_end_request (req , 0 ,
1089
1177
brq -> data .bytes_xfered );
1090
1178
spin_unlock_irq (& md -> lock );
1179
+ /*
1180
+ * If the blk_end_request function returns non-zero even
1181
+ * though all data has been transferred and no errors
1182
+ * were returned by the host controller, it's a bug.
1183
+ */
1091
1184
if (status == MMC_BLK_SUCCESS && ret ) {
1092
- /*
1093
- * The blk_end_request has returned non zero
1094
- * even though all data is transfered and no
1095
- * erros returned by host.
1096
- * If this happen it's a bug.
1097
- */
1098
1185
printk (KERN_ERR "%s BUG rq_tot %d d_xfer %d\n" ,
1099
1186
__func__ , blk_rq_bytes (req ),
1100
1187
brq -> data .bytes_xfered );
@@ -1103,16 +1190,36 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1103
1190
}
1104
1191
break ;
1105
1192
case MMC_BLK_CMD_ERR :
1106
- goto cmd_err ;
1107
- case MMC_BLK_RETRY_SINGLE :
1108
- disable_multi = 1 ;
1109
- break ;
1193
+ ret = mmc_blk_cmd_err ( md , card , brq , req , ret ) ;
1194
+ if (! mmc_blk_reset ( md , card -> host , type ))
1195
+ break ;
1196
+ goto cmd_abort ;
1110
1197
case MMC_BLK_RETRY :
1111
1198
if (retry ++ < 5 )
1112
1199
break ;
1200
+ /* Fall through */
1113
1201
case MMC_BLK_ABORT :
1202
+ if (!mmc_blk_reset (md , card -> host , type ))
1203
+ break ;
1114
1204
goto cmd_abort ;
1115
- case MMC_BLK_DATA_ERR :
1205
+ case MMC_BLK_DATA_ERR : {
1206
+ int err ;
1207
+
1208
+ err = mmc_blk_reset (md , card -> host , type );
1209
+ if (!err )
1210
+ break ;
1211
+ if (err == - ENODEV )
1212
+ goto cmd_abort ;
1213
+ /* Fall through */
1214
+ }
1215
+ case MMC_BLK_ECC_ERR :
1216
+ if (brq -> data .blocks > 1 ) {
1217
+ /* Redo read one sector at a time */
1218
+ pr_warning ("%s: retrying using single block read\n" ,
1219
+ req -> rq_disk -> disk_name );
1220
+ disable_multi = 1 ;
1221
+ break ;
1222
+ }
1116
1223
/*
1117
1224
* After an error, we redo I/O one sector at a
1118
1225
* time, so we only reach here after trying to
@@ -1129,7 +1236,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1129
1236
1130
1237
if (ret ) {
1131
1238
/*
1132
- * In case of a none complete request
1239
+ * In case of a incomplete request
1133
1240
* prepare it again and resend.
1134
1241
*/
1135
1242
mmc_blk_rw_rq_prep (mq_rq , card , disable_multi , mq );
@@ -1139,30 +1246,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1139
1246
1140
1247
return 1 ;
1141
1248
1142
- cmd_err :
1143
- /*
1144
- * If this is an SD card and we're writing, we can first
1145
- * mark the known good sectors as ok.
1146
- *
1147
- * If the card is not SD, we can still ok written sectors
1148
- * as reported by the controller (which might be less than
1149
- * the real number of written sectors, but never more).
1150
- */
1151
- if (mmc_card_sd (card )) {
1152
- u32 blocks ;
1153
-
1154
- blocks = mmc_sd_num_wr_blocks (card );
1155
- if (blocks != (u32 )- 1 ) {
1156
- spin_lock_irq (& md -> lock );
1157
- ret = __blk_end_request (req , 0 , blocks << 9 );
1158
- spin_unlock_irq (& md -> lock );
1159
- }
1160
- } else {
1161
- spin_lock_irq (& md -> lock );
1162
- ret = __blk_end_request (req , 0 , brq -> data .bytes_xfered );
1163
- spin_unlock_irq (& md -> lock );
1164
- }
1165
-
1166
1249
cmd_abort :
1167
1250
spin_lock_irq (& md -> lock );
1168
1251
while (ret )
0 commit comments