On Wed, May 18, 2016 at 05:59:52PM -0300, Gustavo Padovan wrote:
+static void collection_check_cb_func(struct fence *fence, struct fence_cb *cb)We don't always have a convenient means to preallocate an array of
+{
+ struct fence_collection_cb *f_cb;
+ struct fence_collection *collection;
+
+ f_cb = container_of(cb, struct fence_collection_cb, cb);
+ collection = f_cb->collection;
+
+ if (atomic_dec_and_test(&collection->num_pending_fences))
+ fence_signal(&collection->base);
+}
+
+static bool fence_collection_enable_signaling(struct fence *fence)
+{
+ struct fence_collection *collection = to_fence_collection(fence);
+ int i;
+
+ for (i = 0 ; i < collection->num_fences ; i++) {
+ if (fence_add_callback(collection->fences[i].fence,
+ &collection->fences[i].cb,
+ collection_check_cb_func)) {
+ atomic_dec(&collection->num_pending_fences);
+ }
+ }
fences to use. Keeping a list of fences in addition to the array would
be easier to user in many circumstances.
Just means we need a
struct fence_collection_entry {
struct fence *fence;
struct list_head link;
};
int fence_collection_add(struct fence *_collection,
struct fence *fence)
{
struct fence_collection *collection =
to_fence_collection(_collection);
struct fence_collection_entry *entry;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->fence = fence_get(fence);
list_add_tail(&entry->link, &collection->fence_list);
atomic_inc(&collection->num_pending_fences);
return 0;
}
and a couple of list iterations as well as walking the arrays.
(This fence_collection_add() needs to be documented to only be valid
from the constructing thread before the fence is sealed for export/use.)
-Chris