[PATCH 1/9] iommu/amd: Introduce helper functions for managing IOMMU memory

From: Suravee Suthikulpanit
Date: Tue Apr 30 2024 - 11:25:26 EST


Depending on the modes of operation, certain AMD IOMMU data structures are
allocated with constraints. For example:

* Some buffers must be 4K-aligned when running in SNP-enabled host

* To support AMD IOMMU emulation in an SEV guest, some data structures
cannot be encrypted so that the VMM can access the memory successfully.

Introduce struct amd_iommu_mem along with helper functions to allocate and
free memory areas based on the modes of operation. Initially, two modes
are supported :

1. 4K-aligned mode: Required when running on SNP-enabled host system.
Note that this mode replaces the iommu_alloc_4k_pages().

2. SEV-guest shared mode: Set memory as shared when in an SEV guest.
Note that the memory encryption bit of the GPA must be cleared when
programming into the guest base address MMIO register field of
these buffers.

Suggested-by: Thomas Lendacky <thomas.lendacky@xxxxxxx>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
---
drivers/iommu/amd/amd_iommu.h | 29 +++++++++++++
drivers/iommu/amd/amd_iommu_types.h | 15 +++++++
drivers/iommu/amd/iommu.c | 66 +++++++++++++++++++++++++++++
3 files changed, 110 insertions(+)

diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 2fde1302a584..ccd9003813ac 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -28,6 +28,11 @@ void iommu_feature_enable(struct amd_iommu *iommu, u8 bit);
void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
gfp_t gfp, size_t size);

+void *amd_iommu_get_zeroed_mem(gfp_t gfp_mask, struct amd_iommu_mem *mem);
+void *amd_iommu_get_zeroed_mem_node(int nid, gfp_t gfp_mask,
+ struct amd_iommu_mem *mem);
+void amd_iommu_free_mem(struct amd_iommu_mem *mem);
+
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
#else
@@ -137,6 +142,30 @@ static inline u64 iommu_virt_to_phys(void *vaddr)
return (u64)__sme_set(virt_to_phys(vaddr));
}

+static inline bool amd_iommu_mem_is_decrypted(struct amd_iommu_mem *mem)
+{
+ return (mem->modes & ALLOC_MODE_GUEST_MEM_DECRYPT);
+}
+
+static inline bool amd_iommu_mem_is_4k(struct amd_iommu_mem *mem)
+{
+ return (mem->modes & ALLOC_MODE_4K);
+}
+
+static inline u64 amd_iommu_mem_to_phys(struct amd_iommu_mem *mem)
+{
+ /*
+ * Return physical address without the encryption bit for data
+ * structures allocated with the flag ALLOC_MODE_GUEST_MEM_DECRYPT
+ * when running in SEV guest.
+ */
+ if (amd_iommu_mem_is_decrypted(mem) &&
+ cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
+ return (u64)virt_to_phys(mem->buf);
+
+ return iommu_virt_to_phys(mem->buf);
+}
+
static inline void *iommu_phys_to_virt(unsigned long paddr)
{
return phys_to_virt(__sme_clr(paddr));
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 2b76b5dedc1d..a42e10777922 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -479,6 +479,21 @@ extern bool amd_iommu_np_cache;
/* Only true if all IOMMUs support device IOTLBs */
extern bool amd_iommu_iotlb_sup;

+/*
+ * Allocation modes for struct amd_iommu_mem.
+ */
+
+/* Allocate memory as 4K-aligned mode */
+#define ALLOC_MODE_4K BIT(0)
+/* Allocate memory as decrypted mode (for SEV guest) */
+#define ALLOC_MODE_GUEST_MEM_DECRYPT BIT(1)
+
+struct amd_iommu_mem {
+ void *buf;
+ int order;
+ u32 modes;
+};
+
struct irq_remap_table {
raw_spinlock_t lock;
unsigned min_index;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 1fdf37e04215..05e967ad7032 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -37,6 +37,7 @@
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/dma.h>
+#include <asm/set_memory.h>
#include <uapi/linux/iommufd.h>

#include "amd_iommu.h"
@@ -574,6 +575,71 @@ static void amd_iommu_uninit_device(struct device *dev)
*/
}

+void amd_iommu_free_mem(struct amd_iommu_mem *mem)
+{
+ int ret;
+ unsigned long addr = (unsigned long)mem->buf;
+
+ if (amd_iommu_mem_is_decrypted(mem) &&
+ cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
+ ret = set_memory_encrypted(addr, 1 << mem->order);
+ if (ret) {
+ pr_warn("%s: Fail to set memory encrypted (ret=%d)\n",
+ __func__, ret);
+ return;
+ }
+ }
+
+ iommu_free_pages(mem->buf, mem->order);
+ mem->buf = NULL;
+}
+
+void *amd_iommu_get_zeroed_mem_node(int nid, gfp_t gfp_mask, struct amd_iommu_mem *mem)
+{
+ int ret;
+ unsigned long addr;
+ int numpages = (1 << mem->order);
+
+ mem->buf = iommu_alloc_pages_node(nid, gfp_mask, mem->order);
+ if (!mem->buf)
+ return NULL;
+
+ addr = (unsigned long)mem->buf;
+
+ /* Allocate SEV guest memory as decrypted */
+ if (amd_iommu_mem_is_decrypted(mem) &&
+ cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
+ ret = set_memory_decrypted(addr, numpages);
+ if (ret) {
+ pr_warn("%s: Fail to set memory decrypted (ret=%d)\n",
+ __func__, ret);
+ goto out_free;
+ }
+ }
+
+ /* Allocate memory as 4K-aligned on SNP-enabled system. */
+ if (amd_iommu_mem_is_4k(mem) &&
+ check_feature(FEATURE_SNP)) {
+ ret = set_memory_4k(addr, numpages);
+ if (ret) {
+ pr_warn("%s: Fail to set memory 4K(ret=%d)\n",
+ __func__, ret);
+ goto out_free;
+ }
+ }
+
+ return mem->buf;
+
+out_free:
+ amd_iommu_free_mem(mem);
+ return NULL;
+}
+
+void *amd_iommu_get_zeroed_mem(gfp_t gfp_mask, struct amd_iommu_mem *mem)
+{
+ return amd_iommu_get_zeroed_mem_node(NUMA_NO_NODE, gfp_mask, mem);
+}
+
/****************************************************************************
*
* Interrupt handling functions
--
2.34.1