[PATCH 6/7] KVM: x86: Use a proper bitmap for tracking available/dirty registers
From: Sean Christopherson
Date: Tue Mar 10 2026 - 20:34:26 EST
Define regs_{avail,dirty} as bitmaps instead of U32s to harden against
overflow, and to allow for dynamically sizing the bitmaps when APX comes
along, which will add 16 more GPRs (R16-R31) and thus increase the total
number of registers beyond 32.
Open code writes in the "reset" APIs, as the writes are hot paths and
bitmap_write() is complete overkill for what KVM needs. Even better,
hardcoding writes to entry '0' in the array is a perfect excuse to assert
that the array contains exactly one entry, e.g. to effectively add guard
against defining R16-R31 in 32-bit kernels.
For all intents and purposes, no functional change intended even though
using bitmap_fill() will mean "undefined" registers are no longer marked
available and dirty (KVM should never be querying those bits).
Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 6 ++++--
arch/x86/kvm/kvm_cache_regs.h | 21 +++++++++++++--------
arch/x86/kvm/x86.c | 4 ++--
3 files changed, 19 insertions(+), 12 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 734c2eee58e0..cff9023f12c7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -211,6 +211,8 @@ enum kvm_reg {
VCPU_REG_SEGMENTS,
VCPU_REG_EXIT_INFO_1,
VCPU_REG_EXIT_INFO_2,
+
+ NR_VCPU_TOTAL_REGS,
};
enum {
@@ -802,8 +804,8 @@ struct kvm_vcpu_arch {
*/
unsigned long regs[NR_VCPU_GENERAL_PURPOSE_REGS];
unsigned long rip;
- unsigned long regs_avail;
- unsigned long regs_dirty;
+ DECLARE_BITMAP(regs_avail, NR_VCPU_TOTAL_REGS);
+ DECLARE_BITMAP(regs_dirty, NR_VCPU_TOTAL_REGS);
unsigned long cr0;
unsigned long cr0_guest_owned_bits;
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 5de6c7dfd63b..782710829608 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -67,29 +67,29 @@ static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
enum kvm_reg reg)
{
kvm_assert_register_caching_allowed(vcpu);
- return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+ return test_bit(reg, vcpu->arch.regs_avail);
}
static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
enum kvm_reg reg)
{
kvm_assert_register_caching_allowed(vcpu);
- return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
+ return test_bit(reg, vcpu->arch.regs_dirty);
}
static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
enum kvm_reg reg)
{
kvm_assert_register_caching_allowed(vcpu);
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+ __set_bit(reg, vcpu->arch.regs_avail);
}
static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
enum kvm_reg reg)
{
kvm_assert_register_caching_allowed(vcpu);
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
+ __set_bit(reg, vcpu->arch.regs_avail);
+ __set_bit(reg, vcpu->arch.regs_dirty);
}
/*
@@ -102,12 +102,15 @@ static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu
enum kvm_reg reg)
{
kvm_assert_register_caching_allowed(vcpu);
- return arch___test_and_set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+ return arch___test_and_set_bit(reg, vcpu->arch.regs_avail);
}
static __always_inline void kvm_reset_available_registers(struct kvm_vcpu *vcpu,
unsigned long available_mask)
{
+ BUILD_BUG_ON(sizeof(available_mask) != sizeof(vcpu->arch.regs_avail[0]));
+ BUILD_BUG_ON(ARRAY_SIZE(vcpu->arch.regs_avail) != 1);
+
/*
* Note the bitwise-AND! In practice, a straight write would also work
* as KVM initializes the mask to all ones and never clears registers
@@ -115,13 +118,15 @@ static __always_inline void kvm_reset_available_registers(struct kvm_vcpu *vcpu,
* sanity checking as incorrectly marking an eagerly sync'd register
* unavailable will generate a WARN due to an unexpected cache request.
*/
- vcpu->arch.regs_avail &= available_mask;
+ vcpu->arch.regs_avail[0] &= available_mask;
}
static __always_inline void kvm_reset_dirty_registers(struct kvm_vcpu *vcpu,
unsigned long dirty_mask)
{
- vcpu->arch.regs_dirty = dirty_mask;
+ BUILD_BUG_ON(sizeof(dirty_mask) != sizeof(vcpu->arch.regs_dirty[0]));
+ BUILD_BUG_ON(ARRAY_SIZE(vcpu->arch.regs_dirty) != 1);
+ vcpu->arch.regs_dirty[0] = dirty_mask;
}
/*
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index dd39ccbff0d6..c1e1b3030786 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -12809,8 +12809,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
int r;
vcpu->arch.last_vmentry_cpu = -1;
- vcpu->arch.regs_avail = ~0;
- vcpu->arch.regs_dirty = ~0;
+ bitmap_fill(vcpu->arch.regs_avail, NR_VCPU_TOTAL_REGS);
+ bitmap_fill(vcpu->arch.regs_dirty, NR_VCPU_TOTAL_REGS);
kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm);
--
2.53.0.473.g4a7958ca14-goog