[PATCH 3/3] KVM: x86: consolidate different ways to test for in-kernel LAPIC

From: Paolo Bonzini
Date: Mon Feb 08 2016 - 11:17:29 EST


We had checks on vcpu->arch.apic being (non-)NULL, kvm_vcpu_has_lapic
(more optimized) and lapic_in_kernel. Replace everything with
lapic_in_kernel's name and kvm_vcpu_has_lapic's implementation.

Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>
---
arch/x86/kvm/irq.h | 8 --------
arch/x86/kvm/lapic.c | 16 ++++++++--------
arch/x86/kvm/lapic.h | 8 ++++----
arch/x86/kvm/pmu.c | 2 +-
arch/x86/kvm/x86.c | 17 +++++++++--------
5 files changed, 22 insertions(+), 29 deletions(-)

diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index ae5c78f2337d..61ebdc13a29a 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -109,14 +109,6 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
return ret;
}

-static inline int lapic_in_kernel(struct kvm_vcpu *vcpu)
-{
- /* Same as irqchip_in_kernel(vcpu->kvm), but with less
- * pointer chasing and no unnecessary memory barriers.
- */
- return vcpu->arch.apic != NULL;
-}
-
void kvm_pic_reset(struct kvm_kpic_state *s);

void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 57e3f27bdadb..1482a581a83c 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -281,7 +281,7 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
struct kvm_cpuid_entry2 *feat;
u32 v = APIC_VERSION;

- if (!kvm_vcpu_has_lapic(vcpu))
+ if (!lapic_in_kernel(vcpu))
return;

feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
@@ -1319,7 +1319,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
struct kvm_lapic *apic = vcpu->arch.apic;
u64 guest_tsc, tsc_deadline;

- if (!kvm_vcpu_has_lapic(vcpu))
+ if (!lapic_in_kernel(vcpu))
return;

if (apic->lapic_timer.expired_tscdeadline == 0)
@@ -1645,7 +1645,7 @@ u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;

- if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) ||
+ if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
apic_lvtt_period(apic))
return 0;

@@ -1656,7 +1656,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm_lapic *apic = vcpu->arch.apic;

- if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) ||
+ if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
apic_lvtt_period(apic))
return;

@@ -2001,7 +2001,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
{
struct hrtimer *timer;

- if (!kvm_vcpu_has_lapic(vcpu))
+ if (!lapic_in_kernel(vcpu))
return;

timer = &vcpu->arch.apic->lapic_timer.timer;
@@ -2174,7 +2174,7 @@ int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
{
struct kvm_lapic *apic = vcpu->arch.apic;

- if (!kvm_vcpu_has_lapic(vcpu))
+ if (!lapic_in_kernel(vcpu))
return 1;

/* if this is ICR write vector before command */
@@ -2188,7 +2188,7 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
struct kvm_lapic *apic = vcpu->arch.apic;
u32 low, high = 0;

- if (!kvm_vcpu_has_lapic(vcpu))
+ if (!lapic_in_kernel(vcpu))
return 1;

if (apic_reg_read(apic, reg, 4, &low))
@@ -2220,7 +2220,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
u8 sipi_vector;
unsigned long pe;

- if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events)
+ if (!lapic_in_kernel(vcpu) || !apic->pending_events)
return;

/*
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index afccf4099b00..59610099af04 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -103,7 +103,7 @@ static inline u32 kvm_apic_get_reg(struct kvm_lapic *apic, int reg_off)

extern struct static_key kvm_no_apic_vcpu;

-static inline bool kvm_vcpu_has_lapic(struct kvm_vcpu *vcpu)
+static inline bool lapic_in_kernel(struct kvm_vcpu *vcpu)
{
if (static_key_false(&kvm_no_apic_vcpu))
return vcpu->arch.apic;
@@ -130,7 +130,7 @@ static inline bool kvm_apic_sw_enabled(struct kvm_lapic *apic)

static inline bool kvm_apic_present(struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_has_lapic(vcpu) && kvm_apic_hw_enabled(vcpu->arch.apic);
+ return lapic_in_kernel(vcpu) && kvm_apic_hw_enabled(vcpu->arch.apic);
}

static inline int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
@@ -150,7 +150,7 @@ static inline bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)

static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events;
+ return lapic_in_kernel(vcpu) && vcpu->arch.apic->pending_events;
}

static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq)
@@ -161,7 +161,7 @@ static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq)

static inline int kvm_lapic_latched_init(struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_has_lapic(vcpu) && test_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
+ return lapic_in_kernel(vcpu) && test_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
}

static inline int kvm_apic_id(struct kvm_lapic *apic)
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 31aa2c85dc97..06ce377dcbc9 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -257,7 +257,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)

void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.apic)
+ if (lapic_in_kernel(vcpu))
kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
}

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 63a53e358ba3..cf15bc5b0dff 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3010,7 +3010,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);

if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
- kvm_vcpu_has_lapic(vcpu))
+ lapic_in_kernel(vcpu))
vcpu->arch.apic->sipi_vector = events->sipi_vector;

if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
@@ -3023,7 +3023,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
else
vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
- if (kvm_vcpu_has_lapic(vcpu)) {
+ if (lapic_in_kernel(vcpu)) {
if (events->smi.latched_init)
set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
else
@@ -3263,7 +3263,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
switch (ioctl) {
case KVM_GET_LAPIC: {
r = -EINVAL;
- if (!vcpu->arch.apic)
+ if (!lapic_in_kernel(vcpu))
goto out;
u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);

@@ -3281,7 +3281,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}
case KVM_SET_LAPIC: {
r = -EINVAL;
- if (!vcpu->arch.apic)
+ if (!lapic_in_kernel(vcpu))
goto out;
u.lapic = memdup_user(argp, sizeof(*u.lapic));
if (IS_ERR(u.lapic))
@@ -4116,7 +4116,7 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,

do {
n = min(len, 8);
- if (!(vcpu->arch.apic &&
+ if (!(lapic_in_kernel(vcpu) &&
!kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
&& kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
break;
@@ -4136,7 +4136,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)

do {
n = min(len, 8);
- if (!(vcpu->arch.apic &&
+ if (!(lapic_in_kernel(vcpu) &&
!kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
addr, n, v))
&& kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
@@ -6033,7 +6033,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
if (!kvm_x86_ops->update_cr8_intercept)
return;

- if (!vcpu->arch.apic)
+ if (!lapic_in_kernel(vcpu))
return;

if (vcpu->arch.apicv_active)
@@ -7061,7 +7061,7 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- if (!kvm_vcpu_has_lapic(vcpu) &&
+ if (!lapic_in_kernel(vcpu) &&
mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
return -EINVAL;

@@ -7616,6 +7616,7 @@ bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
}

struct static_key kvm_no_apic_vcpu __read_mostly;
+EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);

int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
--
1.8.3.1