[PATCH 12/13] iommu: amd: Introduce iommu_v1_map_page and iommu_v1_unmap_page

From: Suravee Suthikulpanit
Date: Wed Sep 23 2020 - 06:11:58 EST


These implement map and unmap for AMD IOMMU v1 pagetable, which
will be used by the IO pagetable framework.

Also clean up unused extern function declarations.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
---
drivers/iommu/amd/amd_iommu.h | 13 -------------
drivers/iommu/amd/io_pgtable.c | 25 ++++++++++++-------------
drivers/iommu/amd/iommu.c | 7 ++++---
3 files changed, 16 insertions(+), 29 deletions(-)

diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 69996e57fae2..2e8dc2a1ec0f 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -124,19 +124,6 @@ void amd_iommu_apply_ivrs_quirks(void);
static inline void amd_iommu_apply_ivrs_quirks(void) { }
#endif

-/* TODO: These are temporary and will be removed once fully transition */
-extern int iommu_map_page(struct protection_domain *dom,
- unsigned long bus_addr,
- unsigned long phys_addr,
- unsigned long page_size,
- int prot,
- gfp_t gfp);
-extern unsigned long iommu_unmap_page(struct protection_domain *dom,
- unsigned long bus_addr,
- unsigned long page_size);
-extern u64 *fetch_pte(struct amd_io_pgtable *pgtable,
- unsigned long address,
- unsigned long *page_size);
extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode);
extern void amd_iommu_free_pgtable(struct amd_io_pgtable *pgtable);
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 2f36bab23516..7f7be302c538 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -348,9 +348,9 @@ static u64 *alloc_pte(struct protection_domain *domain,
* This function checks if there is a PTE for a given dma address. If
* there is one, it returns the pointer to it.
*/
-u64 *fetch_pte(struct amd_io_pgtable *pgtable,
- unsigned long address,
- unsigned long *page_size)
+static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
+ unsigned long address,
+ unsigned long *page_size)
{
int level;
u64 *pte;
@@ -423,13 +423,10 @@ static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
* supporting all features of AMD IOMMU page tables like level skipping
* and full 64 bit address spaces.
*/
-int iommu_map_page(struct protection_domain *dom,
- unsigned long iova,
- unsigned long paddr,
- unsigned long size,
- int prot,
- gfp_t gfp)
+static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
+ struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
struct page *freelist = NULL;
bool updated = false;
u64 __pte, *pte;
@@ -492,11 +489,11 @@ int iommu_map_page(struct protection_domain *dom,
return ret;
}

-unsigned long iommu_unmap_page(struct protection_domain *dom,
- unsigned long iova,
- unsigned long size)
+static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
+ unsigned long iova,
+ size_t size,
+ struct iommu_iotlb_gather *gather)
{
- struct io_pgtable_ops *ops = &dom->iop.iop.ops;
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
unsigned long long unmapped;
unsigned long unmap_size;
@@ -571,6 +568,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
{
struct protection_domain *pdom = (struct protection_domain *)cookie;

+ pdom->iop.iop.ops.map = iommu_v1_map_page;
+ pdom->iop.iop.ops.unmap = iommu_v1_unmap_page;
pdom->iop.iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
return &pdom->iop.iop;
}
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 9a1a16031e00..77f44b927ae7 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2044,6 +2044,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
gfp_t gfp)
{
struct protection_domain *domain = to_pdomain(dom);
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;
int prot = 0;
int ret;

@@ -2055,8 +2056,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;

- ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);
-
+ ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
domain_flush_np_cache(domain, iova, page_size);

return ret;
@@ -2067,11 +2067,12 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;

if (domain->iop.mode == PAGE_MODE_NONE)
return 0;

- return iommu_unmap_page(domain, iova, page_size);
+ return ops->unmap(ops, iova, page_size, gather);
}

static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
--
2.17.1