[PATCH v2 5/6] KVM: x86: Track available/dirty register masks as "unsigned long" values
From: Sean Christopherson
Date: Thu Apr 09 2026 - 18:44:52 EST
Convert regs_{avail,dirty} and all related masks to "unsigned long" values
as an intermediate step towards declaring the fields as actual bitmaps, and
as a step toward support APX, which will push the total number of registers
beyond 32 on 64-bit kernels.
Opportunistically convert TDX's ULL bitmask to a UL to match everything
else (TDX is 64-bit only, so it's a nop in the end).
No functional change intended.
Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 4 ++--
arch/x86/kvm/kvm_cache_regs.h | 2 +-
arch/x86/kvm/svm/svm.h | 2 +-
arch/x86/kvm/vmx/tdx.c | 36 ++++++++++++++++-----------------
arch/x86/kvm/vmx/vmx.h | 20 +++++++++---------
5 files changed, 32 insertions(+), 32 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b1eae1e7b04f..c47eb294c066 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -802,8 +802,8 @@ struct kvm_vcpu_arch {
*/
unsigned long regs[NR_VCPU_GENERAL_PURPOSE_REGS];
unsigned long rip;
- u32 regs_avail;
- u32 regs_dirty;
+ unsigned long regs_avail;
+ unsigned long regs_dirty;
unsigned long cr0;
unsigned long cr0_guest_owned_bits;
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 7f71d468178c..171e6bc2e169 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -106,7 +106,7 @@ static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu
}
static __always_inline void kvm_clear_available_registers(struct kvm_vcpu *vcpu,
- u32 clear_mask)
+ unsigned long clear_mask)
{
/*
* Note the bitwise-AND! In practice, a straight write would also work
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 677d268ae9c7..7b46a3f13de1 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -474,7 +474,7 @@ static inline bool svm_is_vmrun_failure(u64 exit_code)
* KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
* is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
*/
-#define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_REG_PDPTR)
+#define SVM_REGS_LAZY_LOAD_SET (BIT(VCPU_REG_PDPTR))
static inline void __vmcb_set_intercept(unsigned long *intercepts, u32 bit)
{
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index c9ab7902151f..85f28363e4cc 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1013,23 +1013,23 @@ static fastpath_t tdx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
return EXIT_FASTPATH_NONE;
}
-#define TDX_REGS_AVAIL_SET (BIT_ULL(VCPU_REG_EXIT_INFO_1) | \
- BIT_ULL(VCPU_REG_EXIT_INFO_2) | \
- BIT_ULL(VCPU_REGS_RAX) | \
- BIT_ULL(VCPU_REGS_RBX) | \
- BIT_ULL(VCPU_REGS_RCX) | \
- BIT_ULL(VCPU_REGS_RDX) | \
- BIT_ULL(VCPU_REGS_RBP) | \
- BIT_ULL(VCPU_REGS_RSI) | \
- BIT_ULL(VCPU_REGS_RDI) | \
- BIT_ULL(VCPU_REGS_R8) | \
- BIT_ULL(VCPU_REGS_R9) | \
- BIT_ULL(VCPU_REGS_R10) | \
- BIT_ULL(VCPU_REGS_R11) | \
- BIT_ULL(VCPU_REGS_R12) | \
- BIT_ULL(VCPU_REGS_R13) | \
- BIT_ULL(VCPU_REGS_R14) | \
- BIT_ULL(VCPU_REGS_R15))
+#define TDX_REGS_AVAIL_SET (BIT(VCPU_REG_EXIT_INFO_1) | \
+ BIT(VCPU_REG_EXIT_INFO_2) | \
+ BIT(VCPU_REGS_RAX) | \
+ BIT(VCPU_REGS_RBX) | \
+ BIT(VCPU_REGS_RCX) | \
+ BIT(VCPU_REGS_RDX) | \
+ BIT(VCPU_REGS_RBP) | \
+ BIT(VCPU_REGS_RSI) | \
+ BIT(VCPU_REGS_RDI) | \
+ BIT(VCPU_REGS_R8) | \
+ BIT(VCPU_REGS_R9) | \
+ BIT(VCPU_REGS_R10) | \
+ BIT(VCPU_REGS_R11) | \
+ BIT(VCPU_REGS_R12) | \
+ BIT(VCPU_REGS_R13) | \
+ BIT(VCPU_REGS_R14) | \
+ BIT(VCPU_REGS_R15))
static void tdx_load_host_xsave_state(struct kvm_vcpu *vcpu)
{
@@ -1098,7 +1098,7 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
tdx_load_host_xsave_state(vcpu);
- kvm_clear_available_registers(vcpu, ~(u32)TDX_REGS_AVAIL_SET);
+ kvm_clear_available_registers(vcpu, ~TDX_REGS_AVAIL_SET);
if (unlikely(tdx->vp_enter_ret == EXIT_REASON_EPT_MISCONFIG))
return EXIT_FASTPATH_NONE;
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 9fb76ea48caf..48447fa983f4 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -620,16 +620,16 @@ BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64)
* cache on demand. Other registers not listed here are synced to
* the cache immediately after VM-Exit.
*/
-#define VMX_REGS_LAZY_LOAD_SET ((1 << VCPU_REG_RIP) | \
- (1 << VCPU_REGS_RSP) | \
- (1 << VCPU_REG_RFLAGS) | \
- (1 << VCPU_REG_PDPTR) | \
- (1 << VCPU_REG_SEGMENTS) | \
- (1 << VCPU_REG_CR0) | \
- (1 << VCPU_REG_CR3) | \
- (1 << VCPU_REG_CR4) | \
- (1 << VCPU_REG_EXIT_INFO_1) | \
- (1 << VCPU_REG_EXIT_INFO_2))
+#define VMX_REGS_LAZY_LOAD_SET (BIT(VCPU_REGS_RSP) | \
+ BIT(VCPU_REG_RIP) | \
+ BIT(VCPU_REG_RFLAGS) | \
+ BIT(VCPU_REG_PDPTR) | \
+ BIT(VCPU_REG_SEGMENTS) | \
+ BIT(VCPU_REG_CR0) | \
+ BIT(VCPU_REG_CR3) | \
+ BIT(VCPU_REG_CR4) | \
+ BIT(VCPU_REG_EXIT_INFO_1) | \
+ BIT(VCPU_REG_EXIT_INFO_2))
static inline unsigned long vmx_l1_guest_owned_cr0_bits(void)
{
--
2.53.0.1213.gd9a14994de-goog