[PATCH 2/4] KVM: allocate kvm->vcpus separately
From: Radim KrÄmÃÅ
Date: Thu Apr 13 2017 - 16:21:39 EST
The maximal number of VCPUs is going to be high, but most VMs are still
going to use just a few. We want to save memory and there are two main
conservative possibilities:
1) turn vcpus into a pointer and allocate separately
2) turn vcpus into variable length array at the end of struct kvm
This patch does (1) as it is slightly safer, (2) would avoid one level
of indirection and is a nice follow up.
The vcpus array going to be dynamic and might take several pages, which
is why it is allocated with kvm_kvzalloc().
Generic users of KVM_MAX_VCPUS are switched to kvm->max_vcpus as the
array size is going to change.
Signed-off-by: Radim KrÄmÃÅ <rkrcmar@xxxxxxxxxx>
---
include/linux/kvm_host.h | 8 ++++++--
virt/kvm/kvm_main.c | 23 +++++++++++++++++++----
2 files changed, 25 insertions(+), 6 deletions(-)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ae4e114cb7d1..6ba7bc831094 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -377,16 +377,20 @@ struct kvm {
struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
- struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+ struct kvm_vcpu **vcpus;
/*
* created_vcpus is protected by kvm->lock, and is incremented
* at the beginning of KVM_CREATE_VCPU. online_vcpus is only
* incremented after storing the kvm_vcpu pointer in vcpus,
* and is accessed atomically.
+ * max_vcpus is the size of vcpus array and can be changed only before
+ * any vcpu is created. Updates to max_vcpus are protected by
+ * kvm->lock.
*/
atomic_t online_vcpus;
int created_vcpus;
+ int max_vcpus;
int last_boosted_vcpu;
struct list_head vm_list;
struct mutex lock;
@@ -480,7 +484,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
if (id < 0)
return NULL;
- if (id < KVM_MAX_VCPUS)
+ if (id < kvm->max_vcpus)
vcpu = kvm_get_vcpu(kvm, id);
if (vcpu && vcpu->vcpu_id == id)
return vcpu;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f03b093abffe..0f1579f118b4 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -604,20 +604,35 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
return 0;
}
-static inline struct kvm *kvm_alloc_vm(void)
+static inline struct kvm *kvm_alloc_vm(size_t max_vcpus)
{
- return kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ struct kvm *kvm;
+
+ kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ if (!kvm)
+ return NULL;
+
+ kvm->vcpus = kvm_kvzalloc(sizeof(*kvm->vcpus) * max_vcpus);
+ if (!kvm->vcpus) {
+ kfree(kvm);
+ return NULL;
+ }
+ kvm->max_vcpus = max_vcpus;
+
+ return kvm;
}
static inline void kvm_free_vm(struct kvm *kvm)
{
+ if (kvm)
+ kvfree(kvm->vcpus);
kfree(kvm);
}
static struct kvm *kvm_create_vm(unsigned long type)
{
int r, i;
- struct kvm *kvm = kvm_alloc_vm();
+ struct kvm *kvm = kvm_alloc_vm(KVM_MAX_VCPUS);
if (!kvm)
return ERR_PTR(-ENOMEM);
@@ -2445,7 +2460,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
return -EINVAL;
mutex_lock(&kvm->lock);
- if (kvm->created_vcpus == KVM_MAX_VCPUS) {
+ if (kvm->created_vcpus == kvm->max_vcpus) {
mutex_unlock(&kvm->lock);
return -EINVAL;
}
--
2.12.0