[PATCH v2 08/15] svm: Add support for setup/destroy virutal APIC backing page for AVIC

From: Suthikulpanit, Suravee
Date: Thu Aug 15 2019 - 12:25:54 EST


Activate/deactivate AVIC requires setting/unsetting the memory region used
for virtual APIC backing page (APIC_ACCESS_PAGE_PRIVATE_MEMSLOT).
So, re-factor avic_init_access_page() to avic_setup_access_page()
and add srcu_read_lock/unlock, which are needed to allow this function
to be called during run-time.

Also, introduce avic_destroy_access_page() to unset the page when
deactivate AVIC.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
---
arch/x86/kvm/svm.c | 48 +++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 43 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b674cd0..47f2439 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1468,7 +1468,9 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
static void avic_init_vmcb(struct vcpu_svm *svm)
{
struct vmcb *vmcb = svm->vmcb;
- struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
+ struct kvm *kvm = svm->vcpu.kvm;
+ struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
+
phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
@@ -1477,7 +1479,13 @@ static void avic_init_vmcb(struct vcpu_svm *svm)
vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
- vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
+
+ mutex_lock(&kvm->arch.apicv_lock);
+ if (kvm->arch.apicv_state == APICV_ACTIVATED)
+ vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
+ else
+ vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
+ mutex_unlock(&kvm->arch.apicv_lock);
}

static void init_vmcb(struct vcpu_svm *svm)
@@ -1660,19 +1668,24 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
* field of the VMCB. Therefore, we set up the
* APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
*/
-static int avic_init_access_page(struct kvm_vcpu *vcpu)
+static int avic_setup_access_page(struct kvm_vcpu *vcpu, bool init)
{
struct kvm *kvm = vcpu->kvm;
int ret = 0;

mutex_lock(&kvm->slots_lock);
+
if (kvm->arch.apic_access_page_done)
goto out;

+ if (!init)
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
ret = __x86_set_memory_region(kvm,
APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
APIC_DEFAULT_PHYS_BASE,
PAGE_SIZE);
+ if (!init)
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
if (ret)
goto out;

@@ -1682,14 +1695,39 @@ static int avic_init_access_page(struct kvm_vcpu *vcpu)
return ret;
}

+static void avic_destroy_access_page(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ mutex_lock(&kvm->slots_lock);
+
+ if (!kvm->arch.apic_access_page_done)
+ goto out;
+
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+ __x86_set_memory_region(kvm,
+ APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
+ APIC_DEFAULT_PHYS_BASE,
+ 0);
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+ kvm->arch.apic_access_page_done = false;
+out:
+ mutex_unlock(&kvm->slots_lock);
+}
+
static int avic_init_backing_page(struct kvm_vcpu *vcpu)
{
- int ret;
+ int ret = 0;
u64 *entry, new_entry;
int id = vcpu->vcpu_id;
+ struct kvm *kvm = vcpu->kvm;
struct vcpu_svm *svm = to_svm(vcpu);

- ret = avic_init_access_page(vcpu);
+ mutex_lock(&kvm->arch.apicv_lock);
+ if (kvm->arch.apicv_state == APICV_ACTIVATED)
+ ret = avic_setup_access_page(vcpu, true);
+ mutex_unlock(&kvm->arch.apicv_lock);
+
if (ret)
return ret;

--
1.8.3.1