[PATCH 4/4] mmc: block: Use MQRQ_XFER_SINGLE_BLOCK for both read and write recovery

From: Shawn Lin

Date: Mon Mar 30 2026 - 00:46:43 EST


Currently, the code uses the MQRQ_XFER_SINGLE_BLOCK flag to handle write
failures by retrying with single-block transfers. However, read failures
bypass this mechanism and instead use a dedicated legacy path mmc_blk_read_single()
that performs sector-by-sector retries.

Extend the MQRQ_XFER_SINGLE_BLOCK logic to cover multi-block read failures
as well. By doing so, we can remove the redundant and complex mmc_blk_read_single()
function, unifying the retry logic for both read and write operations under
a single, consistent, easier-to-maintain mechanism.

Signed-off-by: Shawn Lin <shawn.lin@xxxxxxxxxxxxxx>
---

drivers/mmc/core/block.c | 70 ++----------------------------------------------
1 file changed, 2 insertions(+), 68 deletions(-)

diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 53c1b04..0274e8d 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1544,8 +1544,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)

if (err) {
if (mqrq->retries++ < MMC_CQE_RETRIES) {
- if (rq_data_dir(req) == WRITE)
- mqrq->flags |= MQRQ_XFER_SINGLE_BLOCK;
+ mqrq->flags |= MQRQ_XFER_SINGLE_BLOCK;
blk_mq_requeue_request(req, true);
} else {
blk_mq_end_request(req, BLK_STS_IOERR);
@@ -1782,63 +1781,6 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
return err;
}

-#define MMC_READ_SINGLE_RETRIES 2
-
-/* Single (native) sector read during recovery */
-static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
-{
- struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
- struct mmc_request *mrq = &mqrq->brq.mrq;
- struct mmc_card *card = mq->card;
- struct mmc_host *host = card->host;
- blk_status_t error = BLK_STS_OK;
- size_t bytes_per_read = queue_physical_block_size(mq->queue);
-
- do {
- u32 status;
- int err;
- int retries = 0;
-
- while (retries++ <= MMC_READ_SINGLE_RETRIES) {
- mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
-
- mmc_wait_for_req(host, mrq);
-
- err = mmc_send_status(card, &status);
- if (err)
- goto error_exit;
-
- if (!mmc_host_is_spi(host) &&
- !mmc_ready_for_data(status)) {
- err = mmc_blk_fix_state(card, req);
- if (err)
- goto error_exit;
- }
-
- if (!mrq->cmd->error)
- break;
- }
-
- if (mrq->cmd->error ||
- mrq->data->error ||
- (!mmc_host_is_spi(host) &&
- (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS)))
- error = BLK_STS_IOERR;
- else
- error = BLK_STS_OK;
-
- } while (blk_update_request(req, error, bytes_per_read));
-
- return;
-
-error_exit:
- mrq->data->bytes_xfered = 0;
- blk_update_request(req, BLK_STS_IOERR, bytes_per_read);
- /* Let it try the remaining request again */
- if (mqrq->retries > MMC_MAX_RETRIES - 1)
- mqrq->retries = MMC_MAX_RETRIES - 1;
-}
-
static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq)
{
return !!brq->mrq.sbc;
@@ -1974,13 +1916,6 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES;
return;
}
-
- if (rq_data_dir(req) == READ && brq->data.blocks >
- queue_physical_block_size(mq->queue) >> SECTOR_SHIFT) {
- /* Read one (native) sector at a time */
- mmc_blk_read_single(mq, req);
- return;
- }
}

static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
@@ -2091,8 +2026,7 @@ static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
} else if (!blk_rq_bytes(req)) {
__blk_mq_end_request(req, BLK_STS_IOERR);
} else if (mqrq->retries++ < MMC_MAX_RETRIES) {
- if (rq_data_dir(req) == WRITE)
- mqrq->flags |= MQRQ_XFER_SINGLE_BLOCK;
+ mqrq->flags |= MQRQ_XFER_SINGLE_BLOCK;
blk_mq_requeue_request(req, true);
} else {
if (mmc_card_removed(mq->card))
--
2.7.4