[PATCH v4 15/26] KVM: kvm_arch.c: Remove _nolock post fix

From: isaku . yamahata
Date: Thu Sep 08 2022 - 19:27:22 EST


From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>

Now all related callbacks are called under kvm_lock, no point for _nolock
post fix. Remove _nolock post fix for readability with shorter function
names.

Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
---
virt/kvm/kvm_arch.c | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/virt/kvm/kvm_arch.c b/virt/kvm/kvm_arch.c
index 32befdbf7d6e..4fe16e8ef2e5 100644
--- a/virt/kvm/kvm_arch.c
+++ b/virt/kvm/kvm_arch.c
@@ -20,7 +20,7 @@ __weak int kvm_arch_post_init_vm(struct kvm *kvm)
return 0;
}

-static void hardware_enable_nolock(void *caller_name)
+static void hardware_enable(void *caller_name)
{
int cpu = raw_smp_processor_id();
int r;
@@ -42,7 +42,7 @@ static void hardware_enable_nolock(void *caller_name)
}
}

-static void hardware_disable_nolock(void *junk)
+static void hardware_disable(void *junk)
{
int cpu = raw_smp_processor_id();

@@ -56,7 +56,7 @@ static void hardware_disable_nolock(void *junk)

__weak void kvm_arch_pre_hardware_unsetup(void)
{
- on_each_cpu(hardware_disable_nolock, NULL, 1);
+ on_each_cpu(hardware_disable, NULL, 1);
}

/*
@@ -71,7 +71,7 @@ __weak int kvm_arch_add_vm(struct kvm *kvm, int usage_count)
return 0;

atomic_set(&hardware_enable_failed, 0);
- on_each_cpu(hardware_enable_nolock, (void *)__func__, 1);
+ on_each_cpu(hardware_enable, (void *)__func__, 1);

if (atomic_read(&hardware_enable_failed)) {
r = -EBUSY;
@@ -81,7 +81,7 @@ __weak int kvm_arch_add_vm(struct kvm *kvm, int usage_count)
r = kvm_arch_post_init_vm(kvm);
err:
if (r)
- on_each_cpu(hardware_disable_nolock, NULL, 1);
+ on_each_cpu(hardware_disable, NULL, 1);
return r;
}

@@ -90,7 +90,7 @@ __weak int kvm_arch_del_vm(int usage_count)
if (usage_count)
return 0;

- on_each_cpu(hardware_disable_nolock, NULL, 1);
+ on_each_cpu(hardware_disable, NULL, 1);
return 0;
}

@@ -116,7 +116,7 @@ __weak int kvm_arch_online_cpu(unsigned int cpu, int usage_count)
* preemption until all arch callbacks are fixed.
*/
preempt_disable();
- hardware_enable_nolock((void *)__func__);
+ hardware_enable((void *)__func__);
preempt_enable();
if (atomic_read(&hardware_enable_failed)) {
atomic_set(&hardware_enable_failed, 0);
@@ -135,7 +135,7 @@ __weak int kvm_arch_offline_cpu(unsigned int cpu, int usage_count)
* preemption until all arch callbacks are fixed.
*/
preempt_disable();
- hardware_disable_nolock(NULL);
+ hardware_disable(NULL);
preempt_enable();
}
return 0;
@@ -143,7 +143,7 @@ __weak int kvm_arch_offline_cpu(unsigned int cpu, int usage_count)

__weak int kvm_arch_reboot(int val)
{
- on_each_cpu(hardware_disable_nolock, NULL, 1);
+ on_each_cpu(hardware_disable, NULL, 1);
return NOTIFY_OK;
}

@@ -151,7 +151,7 @@ __weak int kvm_arch_suspend(int usage_count)
{
if (usage_count) {
preempt_disable();
- hardware_disable_nolock(NULL);
+ hardware_disable(NULL);
preempt_enable();
}
return 0;
@@ -168,7 +168,7 @@ __weak void kvm_arch_resume(int usage_count)

if (usage_count) {
preempt_disable();
- hardware_enable_nolock((void *)__func__);
+ hardware_enable((void *)__func__);
preempt_enable();
}
}
--
2.25.1