+ /* MCQ mode */
+ if (is_mcq_enabled(hba))
+ return ufshcd_clear_cmds(hba, 1UL << lrbp->task_tag);
static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
+ struct ufshcd_lrb *lrbp;
+ u32 hwq_num, utag;
+ int tag;
+
/* Resetting interrupt aggregation counters first and reading the
* DOOR_BELL afterward allows us to handle all the completed requests.
* In order to prevent other interrupts starvation the DB is read once
@@ -5580,7 +5590,22 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
* Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
* do not want polling to trigger spurious interrupt complaints.
*/
- ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
+ if (!is_mcq_enabled(hba)) {
+ ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
+ goto out;
+ }
+
+ /* MCQ mode */
+ for (tag = 0; tag < hba->nutrs; tag++) {
+ lrbp = &hba->lrb[tag];
+ if (lrbp->cmd) {
+ utag = blk_mq_unique_tag(scsi_cmd_to_rq(lrbp->cmd));
+ hwq_num = blk_mq_unique_tag_to_hwq(utag);
+ ufshcd_poll(hba->host, hwq_num);
+ }
+ }
+ if (is_mcq_enabled(hba)) {
+ struct ufshcd_lrb *lrbp;
+ int tag;
+
+ for (tag = 0; tag < hba->nutrs; tag++) {
+ lrbp = &hba->lrb[tag];
+ if (lrbp->cmd) {
+ ret = ufshcd_try_to_abort_task(hba, tag);
+ dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
+ hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+ ret ? "failed" : "succeeded");
+ }
+ if (ret) {
+ needs_reset = true;
+ goto out;
+ }
+ }
+ } else {
+ /* Clear pending transfer requests */
+ for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
+ ret = ufshcd_try_to_abort_task(hba, tag);
+ dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
+ hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+ ret ? "failed" : "succeeded");
+ if (ret) {
+ needs_reset = true;
+ goto out;
+ }
}
}