Re: [PATCH v2 1/2] iommu/virtio: Make use of ops->iotlb_sync_map

From: Jason Gunthorpe
Date: Mon Sep 25 2023 - 08:40:59 EST


On Mon, Sep 25, 2023 at 10:48:21AM +0800, Baolu Lu wrote:
> On 9/23/23 7:33 AM, Jason Gunthorpe wrote:
> > On Fri, Sep 22, 2023 at 07:07:40PM +0100, Robin Murphy wrote:
> >
> > > virtio isn't setting ops->pgsize_bitmap for the sake of direct mappings
> > > either; it sets it once it's discovered any instance, since apparently it's
> > > assuming that all instances must support identical page sizes, and thus once
> > > it's seen one it can work "normally" per the core code's assumptions. It's
> > > also I think the only driver which has a "finalise" bodge but*can* still
> > > properly support map-before-attach, by virtue of having to replay mappings
> > > to every new endpoint anyway.
> > Well it can't quite do that since it doesn't know the geometry - it
> > all is sort of guessing and hoping it doesn't explode on replay. If it
> > knows the geometry it wouldn't need finalize...
>
> The ultimate solution to this problem seems to be to add device pointer
> to the parameter of ops->domain_alloc. So once the domain is allocated,
> it is fully initialized. Attaching this domain to a device that is not
> compatible will return -EINVAL, then the caller has to allocate a new
> domain for this device.

Sure, it looks like this, and it works already for default domain
setup.

diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 8599394c9157d7..1697f5a2370785 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -637,15 +637,10 @@ static void viommu_event_handler(struct virtqueue *vq)

/* IOMMU API */

-static struct iommu_domain *viommu_domain_alloc(unsigned type)
+static struct viommu_domain *__viommu_domain_alloc(void)
{
struct viommu_domain *vdomain;

- if (type != IOMMU_DOMAIN_UNMANAGED &&
- type != IOMMU_DOMAIN_DMA &&
- type != IOMMU_DOMAIN_IDENTITY)
- return NULL;
-
vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
if (!vdomain)
return NULL;
@@ -654,16 +649,32 @@ static struct iommu_domain *viommu_domain_alloc(unsigned type)
spin_lock_init(&vdomain->mappings_lock);
vdomain->mappings = RB_ROOT_CACHED;

+ return vdomain;
+}
+
+static struct iommu_domain *viommu_domain_alloc(unsigned type)
+{
+ struct viommu_domain *vdomain;
+
+ /*
+ * viommu sometimes creates an identity domain out of a
+ * paging domain, that can't use the global static.
+ * During attach it will fill in an identity page table.
+ */
+ if (type != IOMMU_DOMAIN_IDENTITY)
+ return NULL;
+ vdomain = __viommu_domain_alloc();
+ if (!vdomain)
+ return NULL;
return &vdomain->domain;
}

static int viommu_domain_finalise(struct viommu_endpoint *vdev,
- struct iommu_domain *domain)
+ struct viommu_domain *vdomain)
{
int ret;
unsigned long viommu_page_size;
struct viommu_dev *viommu = vdev->viommu;
- struct viommu_domain *vdomain = to_viommu_domain(domain);

viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
if (viommu_page_size > PAGE_SIZE) {
@@ -680,13 +691,13 @@ static int viommu_domain_finalise(struct viommu_endpoint *vdev,

vdomain->id = (unsigned int)ret;

- domain->pgsize_bitmap = viommu->pgsize_bitmap;
- domain->geometry = viommu->geometry;
+ vdomain->domain.pgsize_bitmap = viommu->pgsize_bitmap;
+ vdomain->domain.geometry = viommu->geometry;

vdomain->map_flags = viommu->map_flags;
vdomain->viommu = viommu;

- if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+ if (vdomain->domain.type == IOMMU_DOMAIN_IDENTITY) {
if (virtio_has_feature(viommu->vdev,
VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
vdomain->bypass = true;
@@ -717,6 +728,24 @@ static void viommu_domain_free(struct iommu_domain *domain)
kfree(vdomain);
}

+static struct iommu_domain *viommu_domain_alloc_paging(struct device *dev)
+{
+ struct viommu_domain *vdomain;
+ vdomain = __viommu_domain_alloc();
+ if (!vdomain)
+ return NULL;
+
+ if (dev) {
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+
+ if (viommu_domain_finalise(vdev, vdomain)) {
+ viommu_domain_free(&vdomain->domain);
+ return NULL;
+ }
+ }
+ return &vdomain->domain;
+}
+
static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int i;
@@ -732,7 +761,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
* Properly initialize the domain now that we know which viommu
* owns it.
*/
- ret = viommu_domain_finalise(vdev, domain);
+ ret = viommu_domain_finalise(vdev, vdomain);
} else if (vdomain->viommu != vdev->viommu) {
ret = -EINVAL;
}
@@ -1045,6 +1074,7 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
static struct iommu_ops viommu_ops = {
.capable = viommu_capable,
.domain_alloc = viommu_domain_alloc,
+ .domain_alloc_paging = viommu_domain_alloc_paging,
.probe_device = viommu_probe_device,
.probe_finalize = viommu_probe_finalize,
.release_device = viommu_release_device,