[PATCH 05/11] KVM: PPC: Book3S HV: Adjust nine checks for null pointers
From: SF Markus Elfring
Date: Fri Jan 20 2017 - 13:32:05 EST
From: Markus Elfring <elfring@xxxxxxxxxxxxxxxxxxxxx>
Date: Fri, 20 Jan 2017 11:25:48 +0100
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
The script "checkpatch.pl" pointed information out like the following.
Comparison to NULL could be written â
Thus fix affected source code places.
Signed-off-by: Markus Elfring <elfring@xxxxxxxxxxxxxxxxxxxxx>
---
arch/powerpc/kvm/book3s_hv.c | 19 +++++++++----------
1 file changed, 9 insertions(+), 10 deletions(-)
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index cfc7699d05df..3122998f6a32 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -458,7 +458,7 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
/* convert logical addr to kernel addr and read length */
va = kvmppc_pin_guest_page(kvm, vpa, &nb);
- if (va == NULL)
+ if (!va)
return H_PARAMETER;
if (subfunc == H_VPA_REG_VPA)
len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
@@ -1591,8 +1591,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
struct kvmppc_vcore *vcore;
vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
-
- if (vcore == NULL)
+ if (!vcore)
return NULL;
spin_lock_init(&vcore->lock);
@@ -2221,7 +2220,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
prepare_threads(pvc);
if (!pvc->n_runnable) {
list_del_init(&pvc->preempt_list);
- if (pvc->runner == NULL) {
+ if (!pvc->runner) {
pvc->vcore_state = VCORE_INACTIVE;
kvmppc_core_end_stolen(pvc);
}
@@ -2287,7 +2286,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
} else {
vc->vcore_state = VCORE_INACTIVE;
}
- if (vc->n_runnable > 0 && vc->runner == NULL) {
+ if (vc->n_runnable > 0 && !vc->runner) {
/* make sure there's a candidate runner awake */
i = -1;
vcpu = next_runnable_thread(vc, &i);
@@ -2786,7 +2785,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
!signal_pending(current)) {
- if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
+ if (vc->vcore_state == VCORE_PREEMPT && !vc->runner)
kvmppc_vcore_end_preempt(vc);
if (vc->vcore_state != VCORE_INACTIVE) {
@@ -2833,7 +2832,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vc->vcore_state == VCORE_PIGGYBACK))
kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
- if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
+ if (vc->vcore_state == VCORE_PREEMPT && !vc->runner)
kvmppc_vcore_end_preempt(vc);
if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
@@ -3203,7 +3202,7 @@ void kvmppc_alloc_host_rm_ops(void)
int size;
/* Not the first time here ? */
- if (kvmppc_host_rm_ops_hv != NULL)
+ if (kvmppc_host_rm_ops_hv)
return;
ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL);
@@ -3430,10 +3429,10 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
mutex_lock(&kvm->lock);
pimap = kvm->arch.pimap;
- if (pimap == NULL) {
+ if (!pimap) {
/* First call, allocate structure to hold IRQ map */
pimap = kvmppc_alloc_pimap();
- if (pimap == NULL) {
+ if (!pimap) {
mutex_unlock(&kvm->lock);
return -ENOMEM;
}
--
2.11.0