On Wed, Jul 12, 2017 at 08:40:21PM +0800, Wei Wang wrote:
Add a new vq, cmdq, to handle requests between the device and driver.That's an ugly way to detect ring full.
This patch implements two commands sent from the device and handled in
the driver.
1) VIRTIO_BALLOON_CMDQ_REPORT_STATS: this command is used to report
the guest memory statistics to the host. The stats_vq mechanism is not
used when the cmdq mechanism is enabled.
2) VIRTIO_BALLOON_CMDQ_REPORT_UNUSED_PAGES: this command is used to
report the guest unused pages to the host.
Since now we have a vq to handle multiple commands, we need to keep only
one vq operation at a time. Here, we change the existing START_USE()
and END_USE() to lock on each vq operation.
Signed-off-by: Wei Wang <wei.w.wang@xxxxxxxxx>
Signed-off-by: Liang Li <liang.z.li@xxxxxxxxx>
---
drivers/virtio/virtio_balloon.c | 245 ++++++++++++++++++++++++++++++++++--
drivers/virtio/virtio_ring.c | 25 +++-
include/linux/virtio.h | 2 +
include/uapi/linux/virtio_balloon.h | 10 ++
4 files changed, 265 insertions(+), 17 deletions(-)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index aa4e7ec..ae91fbf 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -54,11 +54,12 @@ static struct vfsmount *balloon_mnt;
struct virtio_balloon {
struct virtio_device *vdev;
- struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
+ struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *cmd_vq;
/* The balloon servicing is delegated to a freezable workqueue. */
struct work_struct update_balloon_stats_work;
struct work_struct update_balloon_size_work;
+ struct work_struct cmdq_handle_work;
/* Prevent updating balloon when it is being canceled. */
spinlock_t stop_update_lock;
@@ -90,6 +91,12 @@ struct virtio_balloon {
/* Memory statistics */
struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
+ /* Cmdq msg buffer for memory statistics */
+ struct virtio_balloon_cmdq_hdr cmdq_stats_hdr;
+
+ /* Cmdq msg buffer for reporting ununsed pages */
+ struct virtio_balloon_cmdq_hdr cmdq_unused_page_hdr;
+
/* To register callback in oom notifier call chain */
struct notifier_block nb;
};
@@ -485,25 +492,214 @@ static void update_balloon_size_func(struct work_struct *work)
queue_work(system_freezable_wq, work);
}
+static unsigned int cmdq_hdr_add(struct virtqueue *vq,
+ struct virtio_balloon_cmdq_hdr *hdr,
+ bool in)
+{
+ unsigned int id = VIRTQUEUE_DESC_ID_INIT;
+ uint64_t hdr_pa = (uint64_t)virt_to_phys((void *)hdr);
+
+ virtqueue_add_chain_desc(vq, hdr_pa, sizeof(*hdr), &id, &id, in);
+
+ /* Deliver the hdr for the host to send commands. */
+ if (in) {
+ hdr->flags = 0;
+ virtqueue_add_chain(vq, id, 0, NULL, hdr, NULL);
+ virtqueue_kick(vq);
+ }
+
+ return id;
+}
+
+static void cmdq_add_chain_desc(struct virtio_balloon *vb,
+ struct virtio_balloon_cmdq_hdr *hdr,
+ uint64_t addr,
+ uint32_t len,
+ unsigned int *head_id,
+ unsigned int *prev_id)
+{
+retry:
+ if (*head_id == VIRTQUEUE_DESC_ID_INIT) {
+ *head_id = cmdq_hdr_add(vb->cmd_vq, hdr, 0);
+ *prev_id = *head_id;
+ }
+
+ virtqueue_add_chain_desc(vb->cmd_vq, addr, len, head_id, prev_id, 0);
+ if (*head_id == *prev_id) {