[PATCH v4 20/22] vfio_pci: Allow to mmap the fault queue

From: Eric Auger
Date: Mon Feb 18 2019 - 08:58:31 EST


The Producer Fault region contains the fault queue in the second page.
There is benefit to let the userspace mmap this area. So let's expose
this mmappable area through a sparse mmap entry and implement the mmap
operation.

Signed-off-by: Eric Auger <eric.auger@xxxxxxxxxx>
---
drivers/vfio/pci/vfio_pci.c | 61 +++++++++++++++++++++++++++++++++++--
1 file changed, 59 insertions(+), 2 deletions(-)

diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 019c9fd380a5..3aaa14eb5518 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -223,15 +223,70 @@ static const struct vfio_pci_fault_abi fault_abi_versions[] = {

#define NR_FAULT_ABIS ARRAY_SIZE(fault_abi_versions)

+static int vfio_pci_fault_mmap(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region,
+ struct vm_area_struct *vma)
+{
+ u64 phys_len, req_len, pgoff, req_start;
+ unsigned long long addr;
+ unsigned int index, ret;
+
+ index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+
+ phys_len = region->size;
+
+ req_len = vma->vm_end - vma->vm_start;
+ pgoff = vma->vm_pgoff &
+ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+ req_start = pgoff << PAGE_SHIFT;
+
+ /* only the second page of the producer fault region is mmappable */
+ if (req_start < PAGE_SIZE)
+ return -EINVAL;
+
+ if (req_start + req_len > phys_len)
+ return -EINVAL;
+
+ addr = virt_to_phys(vdev->fault_pages);
+ vma->vm_private_data = vdev;
+ vma->vm_pgoff = (addr >> PAGE_SHIFT) + pgoff;
+
+ ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ req_len, vma->vm_page_prot);
+ return ret;
+}
+
static int vfio_pci_fault_prod_add_capability(struct vfio_pci_device *vdev,
struct vfio_pci_region *region, struct vfio_info_cap *caps)
{
+ struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
struct vfio_region_info_cap_fault cap = {
.header.id = VFIO_REGION_INFO_CAP_PRODUCER_FAULT,
.header.version = 1,
.version = NR_FAULT_ABIS,
};
- return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
+ size_t size = sizeof(*sparse) + sizeof(*sparse->areas);
+ int ret;
+
+ ret = vfio_info_add_capability(caps, &cap.header, sizeof(cap));
+ if (ret)
+ return ret;
+
+ sparse = kzalloc(size, GFP_KERNEL);
+ if (!sparse)
+ return -ENOMEM;
+
+ sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
+ sparse->header.version = 1;
+ sparse->nr_areas = 1;
+ sparse->areas[0].offset = PAGE_SIZE;
+ sparse->areas[0].size = PAGE_SIZE;
+
+ ret = vfio_info_add_capability(caps, &sparse->header, size);
+ if (ret)
+ kfree(sparse);
+
+ return ret;
}

static const struct vfio_pci_regops vfio_pci_fault_cons_regops = {
@@ -242,6 +297,7 @@ static const struct vfio_pci_regops vfio_pci_fault_cons_regops = {
static const struct vfio_pci_regops vfio_pci_fault_prod_regops = {
.rw = vfio_pci_fault_prod_rw,
.release = vfio_pci_fault_release,
+ .mmap = vfio_pci_fault_mmap,
.add_capability = vfio_pci_fault_prod_add_capability,
};

@@ -300,7 +356,8 @@ static int vfio_pci_init_fault_region(struct vfio_pci_device *vdev)
VFIO_REGION_TYPE_NESTED,
VFIO_REGION_SUBTYPE_NESTED_FAULT_PROD,
&vfio_pci_fault_prod_regops, 2 * PAGE_SIZE,
- VFIO_REGION_INFO_FLAG_READ, vdev->fault_pages);
+ VFIO_REGION_INFO_FLAG_READ | VFIO_REGION_INFO_FLAG_MMAP,
+ vdev->fault_pages);
if (ret)
goto out;

--
2.20.1