Re: [PATCH v2] KVM: Optimize kvm_arch_vcpu_ioctl_run function

From: Tianjia Zhang
Date: Thu Apr 16 2020 - 04:54:35 EST




On 2020/4/16 16:28, Marc Zyngier wrote:
On 2020-04-16 08:03, Vitaly Kuznetsov wrote:
Tianjia Zhang <tianjia.zhang@xxxxxxxxxxxxxxxxx> writes:

In earlier versions of kvm, 'kvm_run' is an independent structure
and is not included in the vcpu structure. At present, 'kvm_run'
is already included in the vcpu structure, so the parameter
'kvm_run' is redundant.

This patch simplify the function definition, removes the extra
'kvm_run' parameter, and extract it from the 'kvm_vcpu' structure
if necessary.

Signed-off-by: Tianjia Zhang <tianjia.zhang@xxxxxxxxxxxxxxxxx>
---

v2 change:
 remove 'kvm_run' parameter and extract it from 'kvm_vcpu'

Âarch/mips/kvm/mips.cÂÂÂÂÂÂ |Â 3 ++-
Âarch/powerpc/kvm/powerpc.c |Â 3 ++-
Âarch/s390/kvm/kvm-s390.cÂÂ |Â 3 ++-
Âarch/x86/kvm/x86.cÂÂÂÂÂÂÂÂ | 11 ++++++-----
Âinclude/linux/kvm_host.hÂÂ |Â 2 +-
Âvirt/kvm/arm/arm.cÂÂÂÂÂÂÂÂ |Â 6 +++---
Âvirt/kvm/kvm_main.cÂÂÂÂÂÂÂ |Â 2 +-
Â7 files changed, 17 insertions(+), 13 deletions(-)

diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 8f05dd0a0f4e..ec24adf4857e 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -439,8 +439,9 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
ÂÂÂÂ return -ENOIOCTLCMD;
Â}

-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
Â{
+ÂÂÂ struct kvm_run *run = vcpu->run;
ÂÂÂÂ int r = -EINTR;

ÂÂÂÂ vcpu_load(vcpu);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index e15166b0a16d..7e24691e138a 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -1764,8 +1764,9 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
ÂÂÂÂ return r;
Â}

-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
Â{
+ÂÂÂ struct kvm_run *run = vcpu->run;
ÂÂÂÂ int r;

ÂÂÂÂ vcpu_load(vcpu);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 19a81024fe16..443af3ead739 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4333,8 +4333,9 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
ÂÂÂÂÂÂÂÂ store_regs_fmt2(vcpu, kvm_run);
Â}

-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
Â{
+ÂÂÂ struct kvm_run *kvm_run = vcpu->run;
ÂÂÂÂ int rc;

ÂÂÂÂ if (kvm_run->immediate_exit)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3bf2ecafd027..a0338e86c90f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8707,8 +8707,9 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
ÂÂÂÂ trace_kvm_fpu(0);
Â}

-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
Â{
+ÂÂÂ struct kvm_run *kvm_run = vcpu->run;
ÂÂÂÂ int r;

ÂÂÂÂ vcpu_load(vcpu);
@@ -8726,18 +8727,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
ÂÂÂÂÂÂÂÂ r = -EAGAIN;
ÂÂÂÂÂÂÂÂ if (signal_pending(current)) {
ÂÂÂÂÂÂÂÂÂÂÂÂ r = -EINTR;
-ÂÂÂÂÂÂÂÂÂÂÂ vcpu->run->exit_reason = KVM_EXIT_INTR;
+ÂÂÂÂÂÂÂÂÂÂÂ kvm_run->exit_reason = KVM_EXIT_INTR;
ÂÂÂÂÂÂÂÂÂÂÂÂ ++vcpu->stat.signal_exits;
ÂÂÂÂÂÂÂÂ }
ÂÂÂÂÂÂÂÂ goto out;
ÂÂÂÂ }

-ÂÂÂ if (vcpu->run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) {
+ÂÂÂ if (kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) {
ÂÂÂÂÂÂÂÂ r = -EINVAL;
ÂÂÂÂÂÂÂÂ goto out;
ÂÂÂÂ }

-ÂÂÂ if (vcpu->run->kvm_dirty_regs) {
+ÂÂÂ if (kvm_run->kvm_dirty_regs) {
ÂÂÂÂÂÂÂÂ r = sync_regs(vcpu);
ÂÂÂÂÂÂÂÂ if (r != 0)
ÂÂÂÂÂÂÂÂÂÂÂÂ goto out;
@@ -8767,7 +8768,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)

Âout:
ÂÂÂÂ kvm_put_guest_fpu(vcpu);
-ÂÂÂ if (vcpu->run->kvm_valid_regs)
+ÂÂÂ if (kvm_run->kvm_valid_regs)
ÂÂÂÂÂÂÂÂ store_regs(vcpu);
ÂÂÂÂ post_kvm_run_save(vcpu);
ÂÂÂÂ kvm_sigset_deactivate(vcpu);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6d58beb65454..1e17ef719595 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -866,7 +866,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct kvm_mp_state *mp_state);
Âint kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ struct kvm_guest_debug *dbg);
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);

Âint kvm_arch_init(void *opaque);
Âvoid kvm_arch_exit(void);
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 48d0ec44ad77..f5390ac2165b 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -639,7 +639,6 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
Â/**
 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
 * @vcpu: The VCPU pointer
- * @run:ÂÂÂ The kvm_run structure pointer used for userspace state exchange
 *
 * This function is called through the VCPU_RUN ioctl called from user space. It
 * will execute VM code in a loop until the time slice for the process is used
@@ -647,8 +646,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
 * return with return value 0 and with the kvm_run structure filled in with the
 * required data for the requested emulation.
 */
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
Â{
+ÂÂÂ struct kvm_run *run = vcpu->run;
ÂÂÂÂ int ret;

ÂÂÂÂ if (unlikely(!kvm_vcpu_initialized(vcpu)))
@@ -659,7 +659,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ÂÂÂÂÂÂÂÂ return ret;

ÂÂÂÂ if (run->exit_reason == KVM_EXIT_MMIO) {
-ÂÂÂÂÂÂÂ ret = kvm_handle_mmio_return(vcpu, vcpu->run);
+ÂÂÂÂÂÂÂ ret = kvm_handle_mmio_return(vcpu, run);

I don't know much about ARM but this also seems redundant,
kvm_handle_mmio_return() is also able to extruct 'struct kvm_run' from'
'struct kvm_vcpu'. This likely deserves it's own patch though.

Something like this (untested):

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 32c8a675e5a4..82978995bdd6 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -490,7 +490,7 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
Âvoid kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
Âunsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);

-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
Âint io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
ÂÂÂÂÂÂÂÂÂ phys_addr_t fault_ipa);

diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
index 4e0366759726..3b2c822b4859 100644
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -77,9 +77,8 @@ unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
 * or in-kernel IO emulation
 *
 * @vcpu: The VCPU pointer
- * @run:Â The VCPU run struct containing the mmio data
 */
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
+int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
Â{
ÂÂÂÂ unsigned long data;
ÂÂÂÂ unsigned int len;
@@ -93,7 +92,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)

ÂÂÂÂ if (!kvm_vcpu_dabt_iswrite(vcpu)) {
ÂÂÂÂÂÂÂÂ len = kvm_vcpu_dabt_get_as(vcpu);
-ÂÂÂÂÂÂÂ data = kvm_mmio_read_buf(run->mmio.data, len);
+ÂÂÂÂÂÂÂ data = kvm_mmio_read_buf(vcpu->run->mmio.data, len);

ÂÂÂÂÂÂÂÂ if (kvm_vcpu_dabt_issext(vcpu) &&
ÂÂÂÂÂÂÂÂÂÂÂÂ len < sizeof(unsigned long)) {
@@ -104,7 +103,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
ÂÂÂÂÂÂÂÂ if (!kvm_vcpu_dabt_issf(vcpu))
ÂÂÂÂÂÂÂÂÂÂÂÂ data = data & 0xffffffff;

-ÂÂÂÂÂÂÂ trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
+ÂÂÂÂÂÂÂ trace_kvm_mmio(KVM_TRACE_MMIO_READ, len,
+ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ vcpu->run->mmio.phys_addr,
ÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ &data);
ÂÂÂÂÂÂÂÂ data = vcpu_data_host_to_guest(vcpu, data, len);
ÂÂÂÂÂÂÂÂ vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);

Overall, there is a large set of cleanups to be done when both the vcpu and the run
structures are passed as parameters at the same time. Just grepping the tree for
kvm_run is pretty instructive.

ÂÂÂÂÂÂÂ M.

Sorry, it's my mistake, I only compiled the x86 platform, I will submit patch again.

Thanks,
Tianjia