+static void *mtk_iommu_alloc_pgt(struct device *dev, size_t size, gfp_t gfp)
+{
+ dma_addr_t dma;
+ void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
+
+ if (!pages)
+ return NULL;
+
+ dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto out_free;
+ /*
+ * We depend on the IOMMU being able to work with any physical
+ * address directly, so if the DMA layer suggests otherwise by
+ * translating or truncating them, that bodes very badly...
+ */
+ if (dma != virt_to_phys(pages))
+ goto out_unmap;
+ kmemleak_ignore(pages);
+ return pages;
+
+out_unmap:
+ dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
+ dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
+out_free:
+ free_pages_exact(pages, size);
+ return NULL;
+
+}
+
+static void mtk_iommu_free_pgt(struct device *dev, void *pages, size_t size)
+{
+ dma_unmap_single(dev, (dma_addr_t)virt_to_phys(pages),
+ size, DMA_TO_DEVICE);
+ free_pages_exact(pages, size);
+}
+
+static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
+{
+ struct mtk_iommu_domain *dom = data->m4u_dom;
+
+ spin_lock_init(&dom->pgtlock);
+
+ dom->pgt_va = mtk_iommu_alloc_pgt(data->dev,
+ dom->pgt_size, GFP_KERNEL);
+ if (!dom->pgt_va)
+ return -ENOMEM;
+
+ dom->pgt_pa = virt_to_phys(dom->pgt_va);
+
+ writel(dom->pgt_pa, data->base + REG_MMU_PT_BASE_ADDR);
+
+ dom->cookie = (void *)data;
+
+ return 0;
+}
+
+static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
+{
+ struct mtk_iommu_domain *dom;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ dom = kzalloc(sizeof(*dom), GFP_KERNEL);
+ if (!dom)
+ return NULL;
+
+ /*
+ * MTK m4u support 4GB iova address space, and oly support 4K page
+ * mapping. So the pagetable size should be exactly as 4M.
+ */
+ dom->pgt_size = SZ_4M;
+ return &dom->domain;
+}
+
+static void mtk_iommu_domain_free(struct iommu_domain *domain)
+{
+ kfree(to_mtk_domain(domain));
+}
+
+static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct mtk_iommu_data *data = dom->cookie;
+ unsigned int page_num = size >> MTK_IOMMU_PAGE_SHIFT;
+ unsigned long flags;
+ unsigned int i;
+ u32 *pgt_base_iova;
+ u32 pabase = (u32)paddr;
+ int map_size = 0;
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ pgt_base_iova = dom->pgt_va + (iova >> MTK_IOMMU_PAGE_SHIFT);
+ for (i = 0; i < page_num; i++) {
+ pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC;
+ pabase += MTK_IOMMU_PAGE_SIZE;
+ map_size += MTK_IOMMU_PAGE_SIZE;
+ }
+ dma_sync_single_for_device(data->dev,
+ dom->pgt_pa + (iova >> MTK_IOMMU_PAGE_SHIFT),
+ (size >> MTK_IOMMU_PAGE_SHIFT) * sizeof(u32),
+ DMA_TO_DEVICE);
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ mtk_iommu_tlb_flush_range(data, iova, size);
+
+ return map_size;
+}
+static struct iommu_ops mtk_iommu_ops = {
+ .domain_alloc = mtk_iommu_domain_alloc,
+ .domain_free = mtk_iommu_domain_free,
+ .attach_dev = mtk_iommu_attach_device,
+ .detach_dev = mtk_iommu_detach_device,
+ .map = mtk_iommu_map,
+ .unmap = mtk_iommu_unmap,
+ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = mtk_iommu_iova_to_phys,
+ .add_device = mtk_iommu_add_device,
+ .remove_device = mtk_iommu_remove_device,
+ .device_group = mtk_iommu_device_group,
+ .pgsize_bitmap = MTK_IOMMU_PAGE_SIZE,
+};