[PATCH 2/3] dma-buf: add dma_resv_get_singleton (v2)

From: Jason Ekstrand
Date: Tue Mar 10 2020 - 23:44:21 EST


From: Christian KÃnig <ckoenig.leichtzumerken@xxxxxxxxx>

Add a helper function to get a single fence representing
all fences in a dma_resv object.

This fence is either the only one in the object or all not
signaled fences of the object in a flatted out dma_fence_array.

v2 (Jason Ekstrand):
- Take reference of fences both for creating the dma_fence_array and in
the case where we return one fence.
- Handle the case where dma_resv_get_list() returns NULL

Signed-off-by: Jason Ekstrand <jason@xxxxxxxxxxxxxx>
---
drivers/dma-buf/dma-resv.c | 118 +++++++++++++++++++++++++++++++++++++
include/linux/dma-resv.h | 3 +
2 files changed, 121 insertions(+)

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 4264e64788c4..66591d8ab7ef 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -33,6 +33,8 @@
*/

#include <linux/dma-resv.h>
+#include <linux/dma-fence-chain.h>
+#include <linux/dma-fence-array.h>
#include <linux/export.h>
#include <linux/sched/mm.h>

@@ -47,6 +49,19 @@
* write-side updates.
*/

+/**
+ * dma_fence_deep_dive_for_each - deep dive into the fence containers
+ * @fence: resulting fence
+ * @chain: variable for a dma_fence_chain
+ * @index: index into a dma_fence_array
+ * @head: starting point
+ *
+ * Helper to deep dive into the fence containers for flattening them.
+ */
+#define dma_fence_deep_dive_for_each(fence, chain, index, head) \
+ dma_fence_chain_for_each(chain, head) \
+ dma_fence_array_for_each(fence, index, chain)
+
DEFINE_WD_CLASS(reservation_ww_class);
EXPORT_SYMBOL(reservation_ww_class);

@@ -516,6 +531,109 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
}
EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);

+/**
+ * dma_resv_get_singleton - get a single fence for the dma_resv object
+ * @obj: the reservation object
+ * @extra: extra fence to add to the resulting array
+ * @result: resulting dma_fence
+ *
+ * Get a single fence representing all unsignaled fences in the dma_resv object
+ * plus the given extra fence. If we got only one fence return a new
+ * reference to that, otherwise return a dma_fence_array object.
+ *
+ * RETURNS
+ * Returns -NOMEM if allocations fail, zero otherwise.
+ */
+int dma_resv_get_singleton(struct dma_resv *obj, struct dma_fence *extra,
+ struct dma_fence **result)
+{
+ struct dma_resv_list *fobj = dma_resv_get_list(obj);
+ struct dma_fence *excl = dma_resv_get_excl(obj);
+ struct dma_fence *fence, *chain, **fences;
+ struct dma_fence_array *array;
+ unsigned int num_fences, shared_count;
+ unsigned int i, j;
+
+ num_fences = 0;
+ *result = NULL;
+
+ dma_fence_deep_dive_for_each(fence, chain, i, extra) {
+ if (dma_fence_is_signaled(fence))
+ continue;
+
+ *result = fence;
+ ++num_fences;
+ }
+
+ dma_fence_deep_dive_for_each(fence, chain, i, excl) {
+ if (dma_fence_is_signaled(fence))
+ continue;
+
+ *result = fence;
+ ++num_fences;
+ }
+
+ shared_count = fobj ? fobj->shared_count : 0;
+ for (i = 0; i < shared_count; ++i) {
+ struct dma_fence *f;
+
+ f = rcu_dereference_protected(fobj->shared[i],
+ dma_resv_held(obj));
+ dma_fence_deep_dive_for_each(fence, chain, j, f) {
+ if (dma_fence_is_signaled(fence))
+ continue;
+
+ *result = fence;
+ ++num_fences;
+ }
+ }
+
+ if (num_fences <= 1) {
+ *result = dma_fence_get(*result);
+ return 0;
+ }
+
+ fences = kmalloc_array(num_fences, sizeof(struct dma_fence*),
+ GFP_KERNEL);
+ if (!fences)
+ return -ENOMEM;
+
+ num_fences = 0;
+
+ dma_fence_deep_dive_for_each(fence, chain, i, extra)
+ if (!dma_fence_is_signaled(fence))
+ fences[num_fences++] = dma_fence_get(fence);
+
+ dma_fence_deep_dive_for_each(fence, chain, i, excl)
+ if (!dma_fence_is_signaled(fence))
+ fences[num_fences++] = dma_fence_get(fence);
+
+ for (i = 0; i < shared_count; ++i) {
+ struct dma_fence *f;
+
+ f = rcu_dereference_protected(fobj->shared[i],
+ dma_resv_held(obj));
+ dma_fence_deep_dive_for_each(fence, chain, j, f)
+ if (!dma_fence_is_signaled(fence))
+ fences[num_fences++] = dma_fence_get(fence);
+ }
+
+ array = dma_fence_array_create(num_fences, fences,
+ dma_fence_context_alloc(1),
+ 1, false);
+ if (!array)
+ goto error_free;
+
+ *result = &array->base;
+ return 0;
+
+error_free:
+ while (num_fences--)
+ dma_fence_put(fences[num_fences]);
+ kfree(fences);
+ return -ENOMEM;
+}
+
/**
* dma_resv_wait_timeout_rcu - Wait on reservation's objects
* shared and/or exclusive fences.
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index ee50d10f052b..d50e753e4550 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -287,6 +287,9 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,

int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);

+int dma_resv_get_singleton(struct dma_resv *obj, struct dma_fence *extra,
+ struct dma_fence **result);
+
long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
unsigned long timeout);

--
2.24.1