Re: [PATCH 3/6] drm/gem: use new ww_mutex_(un)lock_for_each macros
From: Peter Zijlstra
Date: Fri Jun 14 2019 - 09:24:28 EST
On Fri, Jun 14, 2019 at 02:41:22PM +0200, Christian König wrote:
> Use the provided macros instead of implementing deadlock handling on our own.
>
> Signed-off-by: Christian König <christian.koenig@xxxxxxx>
> ---
> drivers/gpu/drm/drm_gem.c | 49 ++++++++++-----------------------------
> 1 file changed, 12 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> index 50de138c89e0..6e4623d3bee2 100644
> --- a/drivers/gpu/drm/drm_gem.c
> +++ b/drivers/gpu/drm/drm_gem.c
> @@ -1307,51 +1307,26 @@ int
> drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
> struct ww_acquire_ctx *acquire_ctx)
> {
> - int contended = -1;
> + struct ww_mutex *contended;
> int i, ret;
>
> ww_acquire_init(acquire_ctx, &reservation_ww_class);
>
> -retry:
> - if (contended != -1) {
> - struct drm_gem_object *obj = objs[contended];
> -
> - ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
> - acquire_ctx);
> - if (ret) {
> - ww_acquire_done(acquire_ctx);
> - return ret;
> - }
> - }
> -
> - for (i = 0; i < count; i++) {
> - if (i == contended)
> - continue;
> -
> - ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock,
> - acquire_ctx);
> - if (ret) {
> - int j;
> -
> - for (j = 0; j < i; j++)
> - ww_mutex_unlock(&objs[j]->resv->lock);
> -
> - if (contended != -1 && contended >= i)
> - ww_mutex_unlock(&objs[contended]->resv->lock);
> -
> - if (ret == -EDEADLK) {
> - contended = i;
> - goto retry;
> - }
> -
> - ww_acquire_done(acquire_ctx);
> - return ret;
> - }
> - }
I note all the sites you use this on are simple idx iterators; so how
about something like so:
int ww_mutex_unlock_all(int count, void *data, struct ww_mutex *(*func)(int, void *))
{
int i;
for (i = 0; i < count; i++) {
lock = func(i, data);
ww_mutex_unlock(lock);
}
}
int ww_mutex_lock_all(int count, struct ww_acquire_context *acquire_ctx, bool intr,
void *data, struct ww_mutex *(*func)(int, void *))
{
int i, ret, contended = -1;
struct ww_mutex *lock;
retry:
if (contended != -1) {
lock = func(contended, data);
if (intr)
ret = ww_mutex_lock_slow_interruptible(lock, acquire_ctx);
else
ret = ww_mutex_lock_slow(lock, acquire_ctx), 0;
if (ret) {
ww_acquire_done(acquire_ctx);
return ret;
}
}
for (i = 0; i < count; i++) {
if (i == contended)
continue;
lock = func(i, data);
if (intr)
ret = ww_mutex_lock_interruptible(lock, acquire_ctx);
else
ret = ww_mutex_lock(lock, acquire_ctx), 0;
if (ret) {
ww_mutex_unlock_all(i, data, func);
if (contended > i) {
lock = func(contended, data);
ww_mutex_unlock(lock);
}
if (ret == -EDEADLK) {
contended = i;
goto retry;
}
ww_acquire_done(acquire_ctx);
return ret;
}
}
ww_acquire_done(acquire_ctx);
return 0;
}
> + ww_mutex_lock_for_each(for (i = 0; i < count; i++),
> + &objs[i]->resv->lock, contended, ret, true,
> + acquire_ctx)
> + if (ret)
> + goto error;
which then becomes:
struct ww_mutex *gem_ww_mutex_func(int i, void *data)
{
struct drm_gem_object **objs = data;
return &objs[i]->resv->lock;
}
ret = ww_mutex_lock_all(count, acquire_ctx, true, objs, gem_ww_mutex_func);
> ww_acquire_done(acquire_ctx);
>
> return 0;
> +
> +error:
> + ww_mutex_unlock_for_each(for (i = 0; i < count; i++),
> + &objs[i]->resv->lock, contended);
> + ww_acquire_done(acquire_ctx);
> + return ret;
> }
> EXPORT_SYMBOL(drm_gem_lock_reservations);
>
> --
> 2.17.1
>