Re: [PATCH] x86/virt/tdx: Use precalculated TDVPR page physical address

From: Dave Hansen

Date: Wed Oct 29 2025 - 19:51:19 EST


On 10/20/25 07:42, Sean Christopherson wrote:
...
> If some form of type safety is the goal, why not do something like this?
>
> typedef void __private *tdx_page_t;
>
> Or maybe even define a new address space.
>
> # define __tdx __attribute__((noderef, address_space(__tdx)))

Sean,

I hacked up a TDX physical address namespace for sparse. It's not awful.
It doesn't make the .c files any uglier (or prettier really). It
definitely adds code because it needs a handful of conversion functions.
But those are all one-liner functions.

Net, this approach seems to add a few conversion functions versus the
'struct page' approach. That's because there are at least a couple of
places that *need* a 'struct page' like tdx_unpin().

There's some wonkiness in this like using virtual addresses to back the
"paddr" type. I did that so we could still do NULL checks instead of
keeping some explicit "invalid paddr" value. It's hidden in the helpers
and not exposed to the users, but it is weird for sure. The important
part isn't what the type is in the end, it's that something is making it
opaque.

This can definitely be taken further like getting rid of
tdx->vp.tdvpr_pa precalcuation. But it's mostly a straight s/struct page
*/tdx_paddr_t/ replacement.

I'm not looking at this and jumping up and down for how much better it
makes the code. It certainly *can* find a few things by leveraging
sparse. But, honestly, after seeing that nobody runs or cares about
sparse on this code, it's hard to take it seriously.

Was this generally what you had in mind? Should I turn this into a real
series?diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
index 6b338d7f01b7d..644b53bcfdfed 100644
--- a/arch/x86/include/asm/tdx.h
+++ b/arch/x86/include/asm/tdx.h
@@ -37,6 +37,7 @@
#include <uapi/asm/mce.h>
#include <asm/tdx_global_metadata.h>
#include <linux/pgtable.h>
+#include <linux/mm.h>

/*
* Used by the #VE exception handler to gather the #VE exception
@@ -154,15 +155,61 @@ int tdx_guest_keyid_alloc(void);
u32 tdx_get_nr_guest_keyids(void);
void tdx_guest_keyid_free(unsigned int keyid);

-void tdx_quirk_reset_page(struct page *page);
+struct tdx_paddr;
+#if defined(__CHECKER__)
+#define __tdx __attribute__((noderef, address_space(__tdx)))
+#else
+#define __tdx
+#endif
+typedef struct tdx_paddr __tdx * tdx_paddr_t;
+
+static inline tdx_paddr_t tdx_alloc_page(gfp_t gfp_flags)
+{
+ return (__force tdx_paddr_t)kmalloc(PAGE_SIZE, gfp_flags);
+}
+
+static inline void tdx_free_page(tdx_paddr_t paddr)
+{
+ kfree((__force void *)paddr);
+}
+
+static inline phys_addr_t tdx_paddr_to_phys(tdx_paddr_t paddr)
+{
+ // tdx_paddr_t is actually a virtual address to kernel memory:
+ return __pa((__force void *)paddr);
+}
+
+static inline struct page *tdx_paddr_to_page(tdx_paddr_t paddr)
+{
+ // tdx_paddr_t is actually a virtual address to kernel memory:
+ return virt_to_page((__force void *)paddr);
+}
+
+static inline tdx_paddr_t tdx_page_to_paddr(struct page *page)
+{
+ return (__force tdx_paddr_t)page_to_virt(page);
+}
+
+static inline tdx_paddr_t tdx_virt_to_paddr(void *vaddr)
+{
+ return (__force tdx_paddr_t)vaddr;
+}
+
+void tdx_quirk_reset_page(tdx_paddr_t paddr);
+
+static inline struct page *tdx_paddr_to_virt(tdx_paddr_t paddr)
+{
+ // tdx_paddr_t is actually a virtual address to kernel memory:
+ return (__force void *)paddr;
+}

struct tdx_td {
/* TD root structure: */
- struct page *tdr_page;
+ tdx_paddr_t tdr_page;

int tdcs_nr_pages;
/* TD control structure: */
- struct page **tdcs_pages;
+ tdx_paddr_t *tdcs_pages;

/* Size of `tdcx_pages` in struct tdx_vp */
int tdcx_nr_pages;
@@ -170,19 +217,19 @@ struct tdx_td {

struct tdx_vp {
/* TDVP root page */
- struct page *tdvpr_page;
+ tdx_paddr_t tdvpr_page;
/* precalculated page_to_phys(tdvpr_page) for use in noinstr code */
phys_addr_t tdvpr_pa;

/* TD vCPU control structure: */
- struct page **tdcx_pages;
+ tdx_paddr_t *tdcx_pages;
};

-static inline u64 mk_keyed_paddr(u16 hkid, struct page *page)
+static inline u64 mk_keyed_paddr(u16 hkid, tdx_paddr_t paddr)
{
u64 ret;

- ret = page_to_phys(page);
+ ret = tdx_paddr_to_phys(paddr);
/* KeyID bits are just above the physical address bits: */
ret |= (u64)hkid << boot_cpu_data.x86_phys_bits;

@@ -196,11 +243,11 @@ static inline int pg_level_to_tdx_sept_level(enum pg_level level)
}

u64 tdh_vp_enter(struct tdx_vp *vp, struct tdx_module_args *args);
-u64 tdh_mng_addcx(struct tdx_td *td, struct page *tdcs_page);
-u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, struct page *page, struct page *source, u64 *ext_err1, u64 *ext_err2);
-u64 tdh_mem_sept_add(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2);
-u64 tdh_vp_addcx(struct tdx_vp *vp, struct page *tdcx_page);
-u64 tdh_mem_page_aug(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2);
+u64 tdh_mng_addcx(struct tdx_td *td, tdx_paddr_t tdcs_page);
+u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, tdx_paddr_t paddr, tdx_paddr_t source, u64 *ext_err1, u64 *ext_err2);
+u64 tdh_mem_sept_add(struct tdx_td *td, u64 gpa, int level, tdx_paddr_t paddr, u64 *ext_err1, u64 *ext_err2);
+u64 tdh_vp_addcx(struct tdx_vp *vp, tdx_paddr_t tdcx_page);
+u64 tdh_mem_page_aug(struct tdx_td *td, u64 gpa, int level, tdx_paddr_t paddr, u64 *ext_err1, u64 *ext_err2);
u64 tdh_mem_range_block(struct tdx_td *td, u64 gpa, int level, u64 *ext_err1, u64 *ext_err2);
u64 tdh_mng_key_config(struct tdx_td *td);
u64 tdh_mng_create(struct tdx_td *td, u16 hkid);
@@ -215,12 +262,12 @@ u64 tdh_mng_init(struct tdx_td *td, u64 td_params, u64 *extended_err);
u64 tdh_vp_init(struct tdx_vp *vp, u64 initial_rcx, u32 x2apicid);
u64 tdh_vp_rd(struct tdx_vp *vp, u64 field, u64 *data);
u64 tdh_vp_wr(struct tdx_vp *vp, u64 field, u64 data, u64 mask);
-u64 tdh_phymem_page_reclaim(struct page *page, u64 *tdx_pt, u64 *tdx_owner, u64 *tdx_size);
+u64 tdh_phymem_page_reclaim(tdx_paddr_t paddr, u64 *tdx_pt, u64 *tdx_owner, u64 *tdx_size);
u64 tdh_mem_track(struct tdx_td *tdr);
u64 tdh_mem_page_remove(struct tdx_td *td, u64 gpa, u64 level, u64 *ext_err1, u64 *ext_err2);
u64 tdh_phymem_cache_wb(bool resume);
u64 tdh_phymem_page_wbinvd_tdr(struct tdx_td *td);
-u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page);
+u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, tdx_paddr_t paddr);
#else
static inline void tdx_init(void) { }
static inline int tdx_cpu_enable(void) { return -ENODEV; }
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 0a49c863c811b..09abc0f6b27cb 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -302,11 +302,11 @@ static void tdx_no_vcpus_enter_stop(struct kvm *kvm)
}

/* TDH.PHYMEM.PAGE.RECLAIM is allowed only when destroying the TD. */
-static int __tdx_reclaim_page(struct page *page)
+static int __tdx_reclaim_page(tdx_paddr_t paddr)
{
u64 err, rcx, rdx, r8;

- err = tdh_phymem_page_reclaim(page, &rcx, &rdx, &r8);
+ err = tdh_phymem_page_reclaim(paddr, &rcx, &rdx, &r8);

/*
* No need to check for TDX_OPERAND_BUSY; all TD pages are freed
@@ -320,13 +320,13 @@ static int __tdx_reclaim_page(struct page *page)
return 0;
}

-static int tdx_reclaim_page(struct page *page)
+static int tdx_reclaim_page(tdx_paddr_t paddr)
{
int r;

- r = __tdx_reclaim_page(page);
+ r = __tdx_reclaim_page(paddr);
if (!r)
- tdx_quirk_reset_page(page);
+ tdx_quirk_reset_page(paddr);
return r;
}

@@ -336,7 +336,7 @@ static int tdx_reclaim_page(struct page *page)
* private KeyID. Assume the cache associated with the TDX private KeyID has
* been flushed.
*/
-static void tdx_reclaim_control_page(struct page *ctrl_page)
+static void tdx_reclaim_control_page(tdx_paddr_t ctrl_page)
{
/*
* Leak the page if the kernel failed to reclaim the page.
@@ -345,7 +345,7 @@ static void tdx_reclaim_control_page(struct page *ctrl_page)
if (tdx_reclaim_page(ctrl_page))
return;

- __free_page(ctrl_page);
+ __free_page(tdx_paddr_to_page(ctrl_page));
}

struct tdx_flush_vp_arg {
@@ -586,7 +586,7 @@ static void tdx_reclaim_td_control_pages(struct kvm *kvm)
}
tdx_quirk_reset_page(kvm_tdx->td.tdr_page);

- __free_page(kvm_tdx->td.tdr_page);
+ tdx_free_page(kvm_tdx->td.tdr_page);
kvm_tdx->td.tdr_page = NULL;
}

@@ -856,7 +856,7 @@ void tdx_vcpu_free(struct kvm_vcpu *vcpu)
}
if (tdx->vp.tdvpr_page) {
tdx_reclaim_control_page(tdx->vp.tdvpr_page);
- tdx->vp.tdvpr_page = 0;
+ tdx->vp.tdvpr_page = NULL;
tdx->vp.tdvpr_pa = 0;
}

@@ -1583,13 +1583,13 @@ void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int pgd_level)
td_vmcs_write64(to_tdx(vcpu), SHARED_EPT_POINTER, root_hpa);
}

-static void tdx_unpin(struct kvm *kvm, struct page *page)
+static void tdx_unpin(struct kvm *kvm, tdx_paddr_t paddr)
{
- put_page(page);
+ put_page(tdx_paddr_to_page(paddr));
}

static int tdx_mem_page_aug(struct kvm *kvm, gfn_t gfn,
- enum pg_level level, struct page *page)
+ enum pg_level level, tdx_paddr_t paddr)
{
int tdx_level = pg_level_to_tdx_sept_level(level);
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
@@ -1597,15 +1597,15 @@ static int tdx_mem_page_aug(struct kvm *kvm, gfn_t gfn,
u64 entry, level_state;
u64 err;

- err = tdh_mem_page_aug(&kvm_tdx->td, gpa, tdx_level, page, &entry, &level_state);
+ err = tdh_mem_page_aug(&kvm_tdx->td, gpa, tdx_level, paddr, &entry, &level_state);
if (unlikely(tdx_operand_busy(err))) {
- tdx_unpin(kvm, page);
+ tdx_unpin(kvm, paddr);
return -EBUSY;
}

if (KVM_BUG_ON(err, kvm)) {
pr_tdx_error_2(TDH_MEM_PAGE_AUG, err, entry, level_state);
- tdx_unpin(kvm, page);
+ tdx_unpin(kvm, paddr);
return -EIO;
}

@@ -1661,13 +1661,13 @@ static int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
*/
smp_rmb();
if (likely(kvm_tdx->state == TD_STATE_RUNNABLE))
- return tdx_mem_page_aug(kvm, gfn, level, page);
+ return tdx_mem_page_aug(kvm, gfn, level, tdx_page_to_paddr(page));

return tdx_mem_page_record_premap_cnt(kvm, gfn, level, pfn);
}

static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
- enum pg_level level, struct page *page)
+ enum pg_level level, tdx_paddr_t paddr)
{
int tdx_level = pg_level_to_tdx_sept_level(level);
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
@@ -1705,14 +1705,14 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
return -EIO;
}

- err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, page);
+ err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, paddr);

if (KVM_BUG_ON(err, kvm)) {
pr_tdx_error(TDH_PHYMEM_PAGE_WBINVD, err);
return -EIO;
}
- tdx_quirk_reset_page(page);
- tdx_unpin(kvm, page);
+ tdx_quirk_reset_page(paddr);
+ tdx_unpin(kvm, paddr);
return 0;
}

@@ -1721,10 +1721,10 @@ static int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
{
int tdx_level = pg_level_to_tdx_sept_level(level);
gpa_t gpa = gfn_to_gpa(gfn);
- struct page *page = virt_to_page(private_spt);
+ tdx_paddr_t paddr = tdx_virt_to_paddr(private_spt);
u64 err, entry, level_state;

- err = tdh_mem_sept_add(&to_kvm_tdx(kvm)->td, gpa, tdx_level, page, &entry,
+ err = tdh_mem_sept_add(&to_kvm_tdx(kvm)->td, gpa, tdx_level, paddr, &entry,
&level_state);
if (unlikely(tdx_operand_busy(err)))
return -EBUSY;
@@ -1771,7 +1771,7 @@ static int tdx_is_sept_zap_err_due_to_premap(struct kvm_tdx *kvm_tdx, u64 err,
}

static int tdx_sept_zap_private_spte(struct kvm *kvm, gfn_t gfn,
- enum pg_level level, struct page *page)
+ enum pg_level level, tdx_paddr_t paddr)
{
int tdx_level = pg_level_to_tdx_sept_level(level);
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
@@ -1792,7 +1792,7 @@ static int tdx_sept_zap_private_spte(struct kvm *kvm, gfn_t gfn,
if (tdx_is_sept_zap_err_due_to_premap(kvm_tdx, err, entry, level) &&
!KVM_BUG_ON(!atomic64_read(&kvm_tdx->nr_premapped), kvm)) {
atomic64_dec(&kvm_tdx->nr_premapped);
- tdx_unpin(kvm, page);
+ tdx_unpin(kvm, paddr);
return 0;
}

@@ -1872,13 +1872,13 @@ static int tdx_sept_free_private_spt(struct kvm *kvm, gfn_t gfn,
* The HKID assigned to this TD was already freed and cache was
* already flushed. We don't have to flush again.
*/
- return tdx_reclaim_page(virt_to_page(private_spt));
+ return tdx_reclaim_page(tdx_virt_to_paddr(private_spt));
}

static int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
enum pg_level level, kvm_pfn_t pfn)
{
- struct page *page = pfn_to_page(pfn);
+ tdx_paddr_t paddr = tdx_page_to_paddr(pfn_to_page(pfn));
int ret;

/*
@@ -1889,7 +1889,7 @@ static int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
if (KVM_BUG_ON(!is_hkid_assigned(to_kvm_tdx(kvm)), kvm))
return -EINVAL;

- ret = tdx_sept_zap_private_spte(kvm, gfn, level, page);
+ ret = tdx_sept_zap_private_spte(kvm, gfn, level, paddr);
if (ret <= 0)
return ret;

@@ -1899,7 +1899,7 @@ static int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
*/
tdx_track(kvm);

- return tdx_sept_drop_private_spte(kvm, gfn, level, page);
+ return tdx_sept_drop_private_spte(kvm, gfn, level, paddr);
}

void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
@@ -2461,8 +2461,8 @@ static int __tdx_td_init(struct kvm *kvm, struct td_params *td_params,
{
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
cpumask_var_t packages;
- struct page **tdcs_pages = NULL;
- struct page *tdr_page;
+ tdx_paddr_t *tdcs_pages = NULL;
+ tdx_paddr_t tdr_page;
int ret, i;
u64 err, rcx;

@@ -2480,7 +2480,7 @@ static int __tdx_td_init(struct kvm *kvm, struct td_params *td_params,

atomic_inc(&nr_configured_hkid);

- tdr_page = alloc_page(GFP_KERNEL);
+ tdr_page = tdx_alloc_page(GFP_KERNEL);
if (!tdr_page)
goto free_hkid;

@@ -2493,7 +2493,7 @@ static int __tdx_td_init(struct kvm *kvm, struct td_params *td_params,
goto free_tdr;

for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
- tdcs_pages[i] = alloc_page(GFP_KERNEL);
+ tdcs_pages[i] = tdx_alloc_page(GFP_KERNEL);
if (!tdcs_pages[i])
goto free_tdcs;
}
@@ -2615,7 +2615,7 @@ static int __tdx_td_init(struct kvm *kvm, struct td_params *td_params,
/* Only free pages not yet added, so start at 'i' */
for (; i < kvm_tdx->td.tdcs_nr_pages; i++) {
if (tdcs_pages[i]) {
- __free_page(tdcs_pages[i]);
+ tdx_free_page(tdcs_pages[i]);
tdcs_pages[i] = NULL;
}
}
@@ -2634,15 +2634,15 @@ static int __tdx_td_init(struct kvm *kvm, struct td_params *td_params,
free_tdcs:
for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
if (tdcs_pages[i])
- __free_page(tdcs_pages[i]);
+ tdx_free_page(tdcs_pages[i]);
}
kfree(tdcs_pages);
kvm_tdx->td.tdcs_pages = NULL;

free_tdr:
if (tdr_page)
- __free_page(tdr_page);
- kvm_tdx->td.tdr_page = 0;
+ tdx_free_page(tdr_page);
+ kvm_tdx->td.tdr_page = NULL;

free_hkid:
tdx_hkid_free(kvm_tdx);
@@ -2939,11 +2939,11 @@ static int tdx_td_vcpu_init(struct kvm_vcpu *vcpu, u64 vcpu_rcx)
{
struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
struct vcpu_tdx *tdx = to_tdx(vcpu);
- struct page *page;
+ tdx_paddr_t page;
int ret, i;
u64 err;

- page = alloc_page(GFP_KERNEL);
+ page = tdx_alloc_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
tdx->vp.tdvpr_page = page;
@@ -2953,7 +2953,7 @@ static int tdx_td_vcpu_init(struct kvm_vcpu *vcpu, u64 vcpu_rcx)
* entry via tdh_vp_enter(). Precalculate and store it instead
* of doing it at runtime later.
*/
- tdx->vp.tdvpr_pa = page_to_phys(tdx->vp.tdvpr_page);
+ tdx->vp.tdvpr_pa = tdx_paddr_to_phys(tdx->vp.tdvpr_page);

tdx->vp.tdcx_pages = kcalloc(kvm_tdx->td.tdcx_nr_pages, sizeof(*tdx->vp.tdcx_pages),
GFP_KERNEL);
@@ -2963,7 +2963,7 @@ static int tdx_td_vcpu_init(struct kvm_vcpu *vcpu, u64 vcpu_rcx)
}

for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
- page = alloc_page(GFP_KERNEL);
+ page = tdx_alloc_page(GFP_KERNEL);
if (!page) {
ret = -ENOMEM;
goto free_tdcx;
@@ -2987,7 +2987,7 @@ static int tdx_td_vcpu_init(struct kvm_vcpu *vcpu, u64 vcpu_rcx)
* method, but the rest are freed here.
*/
for (; i < kvm_tdx->td.tdcx_nr_pages; i++) {
- __free_page(tdx->vp.tdcx_pages[i]);
+ tdx_free_page(tdx->vp.tdcx_pages[i]);
tdx->vp.tdcx_pages[i] = NULL;
}
return -EIO;
@@ -3007,7 +3007,7 @@ static int tdx_td_vcpu_init(struct kvm_vcpu *vcpu, u64 vcpu_rcx)
free_tdcx:
for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
if (tdx->vp.tdcx_pages[i])
- __free_page(tdx->vp.tdcx_pages[i]);
+ tdx_free_page(tdx->vp.tdcx_pages[i]);
tdx->vp.tdcx_pages[i] = NULL;
}
kfree(tdx->vp.tdcx_pages);
@@ -3015,8 +3015,8 @@ static int tdx_td_vcpu_init(struct kvm_vcpu *vcpu, u64 vcpu_rcx)

free_tdvpr:
if (tdx->vp.tdvpr_page)
- __free_page(tdx->vp.tdvpr_page);
- tdx->vp.tdvpr_page = 0;
+ tdx_free_page(tdx->vp.tdvpr_page);
+ tdx->vp.tdvpr_page = NULL;
tdx->vp.tdvpr_pa = 0;

return ret;
@@ -3054,7 +3054,8 @@ static int tdx_vcpu_get_cpuid_leaf(struct kvm_vcpu *vcpu, u32 leaf, int *entry_i

static int tdx_vcpu_get_cpuid(struct kvm_vcpu *vcpu, struct kvm_tdx_cmd *cmd)
{
- struct kvm_cpuid2 __user *output, *td_cpuid;
+ struct kvm_cpuid2 __user *output;
+ struct kvm_cpuid2 *td_cpuid;
int r = 0, i = 0, leaf;
u32 level;

@@ -3206,8 +3207,8 @@ static int tdx_gmem_post_populate(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
}

ret = 0;
- err = tdh_mem_page_add(&kvm_tdx->td, gpa, pfn_to_page(pfn),
- src_page, &entry, &level_state);
+ err = tdh_mem_page_add(&kvm_tdx->td, gpa, tdx_page_to_paddr(phys_to_page(PFN_PHYS(pfn))),
+ tdx_page_to_paddr(src_page), &entry, &level_state);
if (err) {
ret = unlikely(tdx_operand_busy(err)) ? -EBUSY : -EIO;
goto out;
diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
index eac4032484626..b55bbd38e5e1f 100644
--- a/arch/x86/virt/vmx/tdx/tdx.c
+++ b/arch/x86/virt/vmx/tdx/tdx.c
@@ -658,9 +658,9 @@ static void tdx_quirk_reset_paddr(unsigned long base, unsigned long size)
mb();
}

-void tdx_quirk_reset_page(struct page *page)
+void tdx_quirk_reset_page(tdx_paddr_t paddr)
{
- tdx_quirk_reset_paddr(page_to_phys(page), PAGE_SIZE);
+ tdx_quirk_reset_paddr(tdx_paddr_to_phys(paddr), PAGE_SIZE);
}
EXPORT_SYMBOL_GPL(tdx_quirk_reset_page);

@@ -1501,7 +1501,7 @@ EXPORT_SYMBOL_GPL(tdx_guest_keyid_free);

static inline u64 tdx_tdr_pa(struct tdx_td *td)
{
- return page_to_phys(td->tdr_page);
+ return tdx_paddr_to_phys(td->tdr_page);
}

/*
@@ -1510,9 +1510,9 @@ static inline u64 tdx_tdr_pa(struct tdx_td *td)
* Be conservative and make the code simpler by doing the CLFLUSH
* unconditionally.
*/
-static void tdx_clflush_page(struct page *page)
+static void tdx_clflush_page(tdx_paddr_t paddr)
{
- clflush_cache_range(page_to_virt(page), PAGE_SIZE);
+ clflush_cache_range(tdx_paddr_to_virt(paddr), PAGE_SIZE);
}

noinstr u64 tdh_vp_enter(struct tdx_vp *td, struct tdx_module_args *args)
@@ -1523,10 +1523,10 @@ noinstr u64 tdh_vp_enter(struct tdx_vp *td, struct tdx_module_args *args)
}
EXPORT_SYMBOL_GPL(tdh_vp_enter);

-u64 tdh_mng_addcx(struct tdx_td *td, struct page *tdcs_page)
+u64 tdh_mng_addcx(struct tdx_td *td, tdx_paddr_t tdcs_page)
{
struct tdx_module_args args = {
- .rcx = page_to_phys(tdcs_page),
+ .rcx = tdx_paddr_to_phys(tdcs_page),
.rdx = tdx_tdr_pa(td),
};

@@ -1535,13 +1535,13 @@ u64 tdh_mng_addcx(struct tdx_td *td, struct page *tdcs_page)
}
EXPORT_SYMBOL_GPL(tdh_mng_addcx);

-u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, struct page *page, struct page *source, u64 *ext_err1, u64 *ext_err2)
+u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, tdx_paddr_t page, tdx_paddr_t source, u64 *ext_err1, u64 *ext_err2)
{
struct tdx_module_args args = {
.rcx = gpa,
.rdx = tdx_tdr_pa(td),
- .r8 = page_to_phys(page),
- .r9 = page_to_phys(source),
+ .r8 = tdx_paddr_to_phys(page),
+ .r9 = tdx_paddr_to_phys(source),
};
u64 ret;

@@ -1555,12 +1555,12 @@ u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, struct page *page, struct page
}
EXPORT_SYMBOL_GPL(tdh_mem_page_add);

-u64 tdh_mem_sept_add(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2)
+u64 tdh_mem_sept_add(struct tdx_td *td, u64 gpa, int level, tdx_paddr_t page, u64 *ext_err1, u64 *ext_err2)
{
struct tdx_module_args args = {
.rcx = gpa | level,
.rdx = tdx_tdr_pa(td),
- .r8 = page_to_phys(page),
+ .r8 = tdx_paddr_to_phys(page),
};
u64 ret;

@@ -1574,10 +1574,10 @@ u64 tdh_mem_sept_add(struct tdx_td *td, u64 gpa, int level, struct page *page, u
}
EXPORT_SYMBOL_GPL(tdh_mem_sept_add);

-u64 tdh_vp_addcx(struct tdx_vp *vp, struct page *tdcx_page)
+u64 tdh_vp_addcx(struct tdx_vp *vp, tdx_paddr_t tdcx_page)
{
struct tdx_module_args args = {
- .rcx = page_to_phys(tdcx_page),
+ .rcx = tdx_paddr_to_phys(tdcx_page),
.rdx = vp->tdvpr_pa,
};

@@ -1586,12 +1586,12 @@ u64 tdh_vp_addcx(struct tdx_vp *vp, struct page *tdcx_page)
}
EXPORT_SYMBOL_GPL(tdh_vp_addcx);

-u64 tdh_mem_page_aug(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2)
+u64 tdh_mem_page_aug(struct tdx_td *td, u64 gpa, int level, tdx_paddr_t page, u64 *ext_err1, u64 *ext_err2)
{
struct tdx_module_args args = {
.rcx = gpa | level,
.rdx = tdx_tdr_pa(td),
- .r8 = page_to_phys(page),
+ .r8 = tdx_paddr_to_phys(page),
};
u64 ret;

@@ -1794,10 +1794,10 @@ EXPORT_SYMBOL_GPL(tdh_vp_init);
* So despite the names, they must be interpted specially as described by the spec. Return
* them only for error reporting purposes.
*/
-u64 tdh_phymem_page_reclaim(struct page *page, u64 *tdx_pt, u64 *tdx_owner, u64 *tdx_size)
+u64 tdh_phymem_page_reclaim(tdx_paddr_t page, u64 *tdx_pt, u64 *tdx_owner, u64 *tdx_size)
{
struct tdx_module_args args = {
- .rcx = page_to_phys(page),
+ .rcx = tdx_paddr_to_phys(page),
};
u64 ret;

@@ -1858,11 +1858,11 @@ u64 tdh_phymem_page_wbinvd_tdr(struct tdx_td *td)
}
EXPORT_SYMBOL_GPL(tdh_phymem_page_wbinvd_tdr);

-u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page)
+u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, tdx_paddr_t paddr)
{
struct tdx_module_args args = {};

- args.rcx = mk_keyed_paddr(hkid, page);
+ args.rcx = mk_keyed_paddr(hkid, paddr);

return seamcall(TDH_PHYMEM_PAGE_WBINVD, &args);
}