[RFC PATCH] vhost-vdpa: mark vhost device invalid to reflect vdpa device unregistration

From: gautam.dawar
Date: Sun Jul 04 2021 - 16:53:14 EST


From: Gautam Dawar <gdawar@xxxxxxxxxx>

As mentioned in Bug 213179, any malicious user-space application can render
a module registering a vDPA device to hang forever. This will typically
surface when vdpa_device_unregister() is called from the function
responsible for module unload, leading rmmod commands to not return.

This patch unblocks the caller module by continuing with the clean-up
but after marking the vhost device as unavailable. For future requests
from user-space application, the vhost device availability is checked
first and if it has gone unavailable, such requests are denied.

Signed-off-by: Gautam Dawar <gdawar@xxxxxxxxxx>
---
drivers/vhost/vdpa.c | 45 ++++++++++++++++++++++++++++++++++++++------
1 file changed, 39 insertions(+), 6 deletions(-)

diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index e4b7d26649d8..623bc7f0c0ca 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -47,6 +47,7 @@ struct vhost_vdpa {
int minor;
struct eventfd_ctx *config_ctx;
int in_batch;
+ int dev_invalid;
struct vdpa_iova_range range;
};

@@ -61,6 +62,11 @@ static void handle_vq_kick(struct vhost_work *work)
struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
const struct vdpa_config_ops *ops = v->vdpa->config;

+ if (v->dev_invalid) {
+ dev_info(&v->dev,
+ "%s: vhost_vdpa device unavailable\n", __func__);
+ return;
+ }
ops->kick_vq(v->vdpa, vq - v->vqs);
}

@@ -120,6 +126,11 @@ static void vhost_vdpa_reset(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;

+ if (v->dev_invalid) {
+ dev_info(&v->dev,
+ "%s: vhost_vdpa device unavailable\n", __func__);
+ return;
+ }
vdpa_reset(vdpa);
v->in_batch = 0;
}
@@ -367,6 +378,11 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
u32 idx;
long r;

+ if (v->dev_invalid) {
+ dev_info(&v->dev,
+ "%s: vhost_vdpa device unavailable\n", __func__);
+ return -ENODEV;
+ }
r = get_user(idx, (u32 __user *)argp);
if (r < 0)
return r;
@@ -450,6 +466,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
return 0;
}

+ if (v->dev_invalid) {
+ dev_info(&v->dev,
+ "%s: vhost_vdpa device unavailable\n", __func__);
+ return -ENODEV;
+ }
mutex_lock(&d->mutex);

switch (cmd) {
@@ -745,8 +766,13 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
const struct vdpa_config_ops *ops = vdpa->config;
int r = 0;

- mutex_lock(&dev->mutex);
+ if (v->dev_invalid) {
+ dev_info(&v->dev,
+ "%s: vhost_vdpa device unavailable\n", __func__);
+ return -ENODEV;
+ }

+ mutex_lock(&dev->mutex);
r = vhost_dev_check_owner(dev);
if (r)
goto unlock;
@@ -949,6 +975,11 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
u16 index = vma->vm_pgoff;

+ if (v->dev_invalid) {
+ dev_info(&v->dev,
+ "%s: vhost_vdpa device unavailable\n", __func__);
+ return VM_FAULT_NOPAGE;
+ }
notify = ops->get_vq_notification(vdpa, index);

vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -1091,11 +1122,13 @@ static void vhost_vdpa_remove(struct vdpa_device *vdpa)
opened = atomic_cmpxchg(&v->opened, 0, 1);
if (!opened)
break;
- wait_for_completion_timeout(&v->completion,
- msecs_to_jiffies(1000));
- dev_warn_once(&v->dev,
- "%s waiting for /dev/%s to be closed\n",
- __func__, dev_name(&v->dev));
+ if (!wait_for_completion_timeout(&v->completion,
+ msecs_to_jiffies(1000))) {
+ dev_warn(&v->dev,
+ "%s /dev/%s in use, continue..\n",
+ __func__, dev_name(&v->dev));
+ break;
+ }
} while (1);

put_device(&v->dev);
+ v->dev_invalid = true;
--
2.30.1