Check early if we need to allocate the bulk waiter. This helps toI think this is a behavior change, which might lead to a null pointer
improve readability and reduces the indentation of the 'if (waiter)'
conditional block.
No functional changes intended in this patch.
Signed-off-by: Umang Jain <umang.jain@xxxxxxxxxxxxxxxx>
---
.../interface/vchiq_arm/vchiq_arm.c | 34 +++++++++----------
1 file changed, 17 insertions(+), 17 deletions(-)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 27ceaac8f6cc..a4a7f31b124a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -564,28 +564,28 @@ vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handl
}
mutex_unlock(&instance->bulk_waiter_list_mutex);
- if (waiter) {
- struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
-
- if (bulk) {
- /* This thread has an outstanding bulk transfer. */
- /* FIXME: why compare a dma address to a pointer? */
- if ((bulk->data != (dma_addr_t)(uintptr_t)data) || (bulk->size != size)) {
- /*
- * This is not a retry of the previous one.
- * Cancel the signal when the transfer completes.
- */
- spin_lock(&service->state->bulk_waiter_spinlock);
- bulk->userdata = NULL;
- spin_unlock(&service->state->bulk_waiter_spinlock);
- }
- }
- } else {
+ if (!waiter) {
waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
if (!waiter)
return -ENOMEM;
}
+ struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
+
+ if (bulk) {
+ /* This thread has an outstanding bulk transfer. */
+ /* FIXME: why compare a dma address to a pointer? */
+ if ((bulk->data != (dma_addr_t)(uintptr_t)data) || (bulk->size != size)) {
+ /*
+ * This is not a retry of the previous one.
+ * Cancel the signal when the transfer completes.
+ */
+ spin_lock(&service->state->bulk_waiter_spinlock);
+ bulk->userdata = NULL;
+ spin_unlock(&service->state->bulk_waiter_spinlock);
+ }
+ }
+
ret = vchiq_bulk_xfer_blocking(instance, handle, data, NULL, size,
&waiter->bulk_waiter, dir);
if ((ret != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {