[PATCH 08/13] iommu: amd: Remove amd_iommu_domain_get_pgtable
From: Suravee Suthikulpanit
Date: Wed Sep 23 2020 - 06:15:54 EST
Since the IO page table root and mode parameters have been moved into
the struct amd_io_pg, the function is no longer needed. Therefore,
remove it along with the struct domain_pgtable.
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
---
drivers/iommu/amd/amd_iommu.h | 4 ++--
drivers/iommu/amd/amd_iommu_types.h | 6 -----
drivers/iommu/amd/io_pgtable.c | 36 ++++++++++-------------------
drivers/iommu/amd/iommu.c | 34 ++++-----------------------
4 files changed, 19 insertions(+), 61 deletions(-)
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 8dff7d85be79..2059e64fdc53 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -101,6 +101,8 @@ static inline
void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root)
{
atomic64_set(&domain->iop.pt_root, root);
+ domain->iop.root = (u64 *)(root & PAGE_MASK);
+ domain->iop.mode = root & 7; /* lowest 3 bits encode pgtable mode */
}
static inline
@@ -135,8 +137,6 @@ extern unsigned long iommu_unmap_page(struct protection_domain *dom,
extern u64 *fetch_pte(struct protection_domain *domain,
unsigned long address,
unsigned long *page_size);
-extern void amd_iommu_domain_get_pgtable(struct protection_domain *domain,
- struct domain_pgtable *pgtable);
extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode);
extern void amd_iommu_free_pgtable(struct amd_io_pgtable *pgtable);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 5d53b7bec256..a07af389eae1 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -511,12 +511,6 @@ struct protection_domain {
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
};
-/* For decocded pt_root */
-struct domain_pgtable {
- int mode;
- u64 *root;
-};
-
/*
* Structure where we save information about one hardware AMD IOMMU in the
* system.
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 8ce2f0325123..524c5406ccd6 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -215,30 +215,27 @@ static bool increase_address_space(struct protection_domain *domain,
unsigned long address,
gfp_t gfp)
{
- struct domain_pgtable pgtable;
unsigned long flags;
bool ret = true;
u64 *pte;
spin_lock_irqsave(&domain->lock, flags);
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- if (address <= PM_LEVEL_SIZE(pgtable.mode))
+ if (address <= PM_LEVEL_SIZE(domain->iop.mode))
goto out;
ret = false;
- if (WARN_ON_ONCE(pgtable.mode == PAGE_MODE_6_LEVEL))
+ if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL))
goto out;
pte = (void *)get_zeroed_page(gfp);
if (!pte)
goto out;
- *pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root));
+ *pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root));
- pgtable.root = pte;
- pgtable.mode += 1;
+ domain->iop.root = pte;
+ domain->iop.mode += 1;
amd_iommu_update_and_flush_device_table(domain);
amd_iommu_domain_flush_complete(domain);
@@ -246,7 +243,7 @@ static bool increase_address_space(struct protection_domain *domain,
* Device Table needs to be updated and flushed before the new root can
* be published.
*/
- amd_iommu_domain_set_pgtable(domain, pte, pgtable.mode);
+ amd_iommu_domain_set_pgtable(domain, pte, domain->iop.mode);
ret = true;
@@ -263,29 +260,23 @@ static u64 *alloc_pte(struct protection_domain *domain,
gfp_t gfp,
bool *updated)
{
- struct domain_pgtable pgtable;
int level, end_lvl;
u64 *pte, *page;
BUG_ON(!is_power_of_2(page_size));
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- while (address > PM_LEVEL_SIZE(pgtable.mode)) {
+ while (address > PM_LEVEL_SIZE(domain->iop.mode)) {
/*
* Return an error if there is no memory to update the
* page-table.
*/
if (!increase_address_space(domain, address, gfp))
return NULL;
-
- /* Read new values to check if update was successful */
- amd_iommu_domain_get_pgtable(domain, &pgtable);
}
- level = pgtable.mode - 1;
- pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
+ level = domain->iop.mode - 1;
+ pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
address = PAGE_SIZE_ALIGN(address, page_size);
end_lvl = PAGE_SIZE_LEVEL(page_size);
@@ -361,19 +352,16 @@ u64 *fetch_pte(struct protection_domain *domain,
unsigned long address,
unsigned long *page_size)
{
- struct domain_pgtable pgtable;
int level;
u64 *pte;
*page_size = 0;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- if (address > PM_LEVEL_SIZE(pgtable.mode))
+ if (address > PM_LEVEL_SIZE(domain->iop.mode))
return NULL;
- level = pgtable.mode - 1;
- pte = &pgtable.root[PM_LEVEL_INDEX(level, address)];
+ level = domain->iop.mode - 1;
+ pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
*page_size = PTE_LEVEL_PAGE_SIZE(level);
while (level > 0) {
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index cbbea7b952fb..3f6ede1e572c 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -140,15 +140,6 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
-void amd_iommu_domain_get_pgtable(struct protection_domain *domain,
- struct domain_pgtable *pgtable)
-{
- u64 pt_root = atomic64_read(&domain->iop.pt_root);
-
- pgtable->root = (u64 *)(pt_root & PAGE_MASK);
- pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */
-}
-
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -1464,7 +1455,6 @@ static void clear_dte_entry(u16 devid)
static void do_attach(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
- struct domain_pgtable pgtable;
struct amd_iommu *iommu;
bool ats;
@@ -1480,7 +1470,6 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_cnt += 1;
/* Update device table */
- amd_iommu_domain_get_pgtable(domain, &pgtable);
set_dte_entry(dev_data->devid, domain,
ats, dev_data->iommu_v2);
clone_aliases(dev_data->pdev);
@@ -1806,10 +1795,7 @@ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
void amd_iommu_domain_update(struct protection_domain *domain)
{
- struct domain_pgtable pgtable;
-
/* Update device table */
- amd_iommu_domain_get_pgtable(domain, &pgtable);
amd_iommu_update_and_flush_device_table(domain);
/* Flush domain TLB(s) and wait for completion */
@@ -2058,12 +2044,10 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
gfp_t gfp)
{
struct protection_domain *domain = to_pdomain(dom);
- struct domain_pgtable pgtable;
int prot = 0;
int ret;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode == PAGE_MODE_NONE)
+ if (domain->iop.mode == PAGE_MODE_NONE)
return -EINVAL;
if (iommu_prot & IOMMU_READ)
@@ -2083,10 +2067,8 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
- struct domain_pgtable pgtable;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode == PAGE_MODE_NONE)
+ if (domain->iop.mode == PAGE_MODE_NONE)
return 0;
return iommu_unmap_page(domain, iova, page_size);
@@ -2097,11 +2079,9 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
{
struct protection_domain *domain = to_pdomain(dom);
unsigned long offset_mask, pte_pgsize;
- struct domain_pgtable pgtable;
u64 *pte, __pte;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode == PAGE_MODE_NONE)
+ if (domain->iop.mode == PAGE_MODE_NONE)
return iova;
pte = fetch_pte(domain, iova, &pte_pgsize);
@@ -2470,11 +2450,9 @@ static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
static int __set_gcr3(struct protection_domain *domain, int pasid,
unsigned long cr3)
{
- struct domain_pgtable pgtable;
u64 *pte;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode != PAGE_MODE_NONE)
+ if (domain->iop.mode != PAGE_MODE_NONE)
return -EINVAL;
pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
@@ -2488,11 +2466,9 @@ static int __set_gcr3(struct protection_domain *domain, int pasid,
static int __clear_gcr3(struct protection_domain *domain, int pasid)
{
- struct domain_pgtable pgtable;
u64 *pte;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- if (pgtable.mode != PAGE_MODE_NONE)
+ if (domain->iop.mode != PAGE_MODE_NONE)
return -EINVAL;
pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
--
2.17.1