[PATCH v3 06/11] virtio: blk: Add freeze, restore handlers to support S4
From: Amit Shah
Date: Thu Nov 17 2011 - 06:58:22 EST
Delete the vq and flush any pending requests from the block queue on the
freeze callback to prepare for hibernation.
Re-create the vq in the restore callback to resume normal function.
Signed-off-by: Amit Shah <amit.shah@xxxxxxxxxx>
---
drivers/block/virtio_blk.c | 36 ++++++++++++++++++++++++++++++++++++
1 files changed, 36 insertions(+), 0 deletions(-)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 467f218..f73fb2d 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -568,6 +568,38 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
ida_simple_remove(&vd_index_ida, index);
}
+#ifdef CONFIG_PM
+static int virtblk_freeze(struct virtio_device *vdev)
+{
+ struct virtio_blk *vblk = vdev->priv;
+
+ flush_work(&vblk->config_work);
+ spin_lock_irq(vblk->disk->queue->queue_lock);
+ blk_stop_queue(vblk->disk->queue);
+ spin_unlock_irq(vblk->disk->queue->queue_lock);
+ blk_sync_queue(vblk->disk->queue);
+
+ /* Stop all the virtqueues. */
+ vdev->config->reset(vdev);
+ vdev->config->del_vqs(vdev);
+ return 0;
+}
+
+static int virtblk_restore(struct virtio_device *vdev)
+{
+ struct virtio_blk *vblk = vdev->priv;
+ int ret;
+
+ ret = init_vq(vdev->priv);
+ if (!ret) {
+ spin_lock_irq(vblk->disk->queue->queue_lock);
+ blk_start_queue(vblk->disk->queue);
+ spin_unlock_irq(vblk->disk->queue->queue_lock);
+ }
+ return ret;
+}
+#endif
+
static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -593,6 +625,10 @@ static struct virtio_driver __refdata virtio_blk = {
.probe = virtblk_probe,
.remove = __devexit_p(virtblk_remove),
.config_changed = virtblk_config_changed,
+#ifdef CONFIG_PM
+ .freeze = virtblk_freeze,
+ .restore = virtblk_restore,
+#endif
};
static int __init init(void)
--
1.7.7.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/