[PATCH 4.14 005/104] scsi: virtio_scsi: fix IO hang caused by automatic irq vector affinity
From: Greg Kroah-Hartman
Date: Tue Aug 14 2018 - 13:35:01 EST
4.14-stable review patch. If anyone has any objections, please let me know.
------------------
From: Ming Lei <ming.lei@xxxxxxxxxx>
commit b5b6e8c8d3b4cbeb447a0f10c7d5de3caa573299 upstream.
Since commit 84676c1f21e8ff5 ("genirq/affinity: assign vectors to all
possible CPUs") it is possible to end up in a scenario where only
offline CPUs are mapped to an interrupt vector.
This is only an issue for the legacy I/O path since with blk-mq/scsi-mq
an I/O can't be submitted to a hardware queue if the queue isn't mapped
to an online CPU.
Fix this issue by forcing virtio-scsi to use blk-mq.
[mkp: commit desc]
Cc: Omar Sandoval <osandov@xxxxxx>,
Cc: "Martin K. Petersen" <martin.petersen@xxxxxxxxxx>,
Cc: James Bottomley <james.bottomley@xxxxxxxxxxxxxxxxxxxxx>,
Cc: Christoph Hellwig <hch@xxxxxx>,
Cc: Don Brace <don.brace@xxxxxxxxxxxxx>
Cc: Kashyap Desai <kashyap.desai@xxxxxxxxxxxx>
Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx>
Cc: Mike Snitzer <snitzer@xxxxxxxxxx>
Cc: Laurence Oberman <loberman@xxxxxxxxxx>
Fixes: 84676c1f21e8 ("genirq/affinity: assign vectors to all possible CPUs")
Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx>
Reviewed-by: Hannes Reinecke <hare@xxxxxxx>
Acked-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>
Reviewed-by: Christoph Hellwig <hch@xxxxxx>
Signed-off-by: Martin K. Petersen <martin.petersen@xxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
drivers/scsi/virtio_scsi.c | 59 ++-------------------------------------------
1 file changed, 3 insertions(+), 56 deletions(-)
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -91,9 +91,6 @@ struct virtio_scsi_vq {
struct virtio_scsi_target_state {
seqcount_t tgt_seq;
- /* Count of outstanding requests. */
- atomic_t reqs;
-
/* Currently active virtqueue for requests sent to this target. */
struct virtio_scsi_vq *req_vq;
};
@@ -152,8 +149,6 @@ static void virtscsi_complete_cmd(struct
struct virtio_scsi_cmd *cmd = buf;
struct scsi_cmnd *sc = cmd->sc;
struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
- struct virtio_scsi_target_state *tgt =
- scsi_target(sc->device)->hostdata;
dev_dbg(&sc->device->sdev_gendev,
"cmd %p response %u status %#02x sense_len %u\n",
@@ -210,8 +205,6 @@ static void virtscsi_complete_cmd(struct
}
sc->scsi_done(sc);
-
- atomic_dec(&tgt->reqs);
}
static void virtscsi_vq_done(struct virtio_scsi *vscsi,
@@ -580,10 +573,7 @@ static int virtscsi_queuecommand_single(
struct scsi_cmnd *sc)
{
struct virtio_scsi *vscsi = shost_priv(sh);
- struct virtio_scsi_target_state *tgt =
- scsi_target(sc->device)->hostdata;
- atomic_inc(&tgt->reqs);
return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
}
@@ -596,55 +586,11 @@ static struct virtio_scsi_vq *virtscsi_p
return &vscsi->req_vqs[hwq];
}
-static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
- struct virtio_scsi_target_state *tgt)
-{
- struct virtio_scsi_vq *vq;
- unsigned long flags;
- u32 queue_num;
-
- local_irq_save(flags);
- if (atomic_inc_return(&tgt->reqs) > 1) {
- unsigned long seq;
-
- do {
- seq = read_seqcount_begin(&tgt->tgt_seq);
- vq = tgt->req_vq;
- } while (read_seqcount_retry(&tgt->tgt_seq, seq));
- } else {
- /* no writes can be concurrent because of atomic_t */
- write_seqcount_begin(&tgt->tgt_seq);
-
- /* keep previous req_vq if a reader just arrived */
- if (unlikely(atomic_read(&tgt->reqs) > 1)) {
- vq = tgt->req_vq;
- goto unlock;
- }
-
- queue_num = smp_processor_id();
- while (unlikely(queue_num >= vscsi->num_queues))
- queue_num -= vscsi->num_queues;
- tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
- unlock:
- write_seqcount_end(&tgt->tgt_seq);
- }
- local_irq_restore(flags);
-
- return vq;
-}
-
static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
struct scsi_cmnd *sc)
{
struct virtio_scsi *vscsi = shost_priv(sh);
- struct virtio_scsi_target_state *tgt =
- scsi_target(sc->device)->hostdata;
- struct virtio_scsi_vq *req_vq;
-
- if (shost_use_blk_mq(sh))
- req_vq = virtscsi_pick_vq_mq(vscsi, sc);
- else
- req_vq = virtscsi_pick_vq(vscsi, tgt);
+ struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
return virtscsi_queuecommand(vscsi, req_vq, sc);
}
@@ -775,7 +721,6 @@ static int virtscsi_target_alloc(struct
return -ENOMEM;
seqcount_init(&tgt->tgt_seq);
- atomic_set(&tgt->reqs, 0);
tgt->req_vq = &vscsi->req_vqs[0];
starget->hostdata = tgt;
@@ -823,6 +768,7 @@ static struct scsi_host_template virtscs
.target_alloc = virtscsi_target_alloc,
.target_destroy = virtscsi_target_destroy,
.track_queue_depth = 1,
+ .force_blk_mq = 1,
};
static struct scsi_host_template virtscsi_host_template_multi = {
@@ -844,6 +790,7 @@ static struct scsi_host_template virtscs
.target_destroy = virtscsi_target_destroy,
.map_queues = virtscsi_map_queues,
.track_queue_depth = 1,
+ .force_blk_mq = 1,
};
#define virtscsi_config_get(vdev, fld) \