Home | History | Annotate | Line # | Download | only in drm
drm_modeset_lock.c revision 1.4
      1 /*	$NetBSD: drm_modeset_lock.c,v 1.4 2020/02/14 14:34:57 maya Exp $	*/
      2 
      3 /*
      4  * Copyright (C) 2014 Red Hat
      5  * Author: Rob Clark <robdclark (at) gmail.com>
      6  *
      7  * Permission is hereby granted, free of charge, to any person obtaining a
      8  * copy of this software and associated documentation files (the "Software"),
      9  * to deal in the Software without restriction, including without limitation
     10  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     11  * and/or sell copies of the Software, and to permit persons to whom the
     12  * Software is furnished to do so, subject to the following conditions:
     13  *
     14  * The above copyright notice and this permission notice shall be included in
     15  * all copies or substantial portions of the Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     23  * OTHER DEALINGS IN THE SOFTWARE.
     24  */
     25 
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: drm_modeset_lock.c,v 1.4 2020/02/14 14:34:57 maya Exp $");
     28 
     29 #include <drm/drmP.h>
     30 #include <drm/drm_crtc.h>
     31 #include <drm/drm_modeset_lock.h>
     32 
     33 /**
     34  * DOC: kms locking
     35  *
     36  * As KMS moves toward more fine grained locking, and atomic ioctl where
     37  * userspace can indirectly control locking order, it becomes necessary
     38  * to use ww_mutex and acquire-contexts to avoid deadlocks.  But because
     39  * the locking is more distributed around the driver code, we want a bit
     40  * of extra utility/tracking out of our acquire-ctx.  This is provided
     41  * by drm_modeset_lock / drm_modeset_acquire_ctx.
     42  *
     43  * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
     44  *
     45  * The basic usage pattern is to:
     46  *
     47  *     drm_modeset_acquire_init(&ctx)
     48  *   retry:
     49  *     foreach (lock in random_ordered_set_of_locks) {
     50  *       ret = drm_modeset_lock(lock, &ctx)
     51  *       if (ret == -EDEADLK) {
     52  *          drm_modeset_backoff(&ctx);
     53  *          goto retry;
     54  *       }
     55  *     }
     56  *
     57  *     ... do stuff ...
     58  *
     59  *     drm_modeset_drop_locks(&ctx);
     60  *     drm_modeset_acquire_fini(&ctx);
     61  */
     62 
     63 /**
     64  * drm_modeset_lock_all - take all modeset locks
     65  * @dev: drm device
     66  *
     67  * This function takes all modeset locks, suitable where a more fine-grained
     68  * scheme isn't (yet) implemented. Locks must be dropped with
     69  * drm_modeset_unlock_all.
     70  */
     71 void drm_modeset_lock_all(struct drm_device *dev)
     72 {
     73 	struct drm_mode_config *config = &dev->mode_config;
     74 	struct drm_modeset_acquire_ctx *ctx;
     75 	int ret;
     76 
     77 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
     78 	if (WARN_ON(!ctx))
     79 		return;
     80 
     81 	mutex_lock(&config->mutex);
     82 
     83 	drm_modeset_acquire_init(ctx, 0);
     84 
     85 retry:
     86 	ret = drm_modeset_lock(&config->connection_mutex, ctx);
     87 	if (ret)
     88 		goto fail;
     89 	ret = drm_modeset_lock_all_crtcs(dev, ctx);
     90 	if (ret)
     91 		goto fail;
     92 
     93 	WARN_ON(config->acquire_ctx);
     94 
     95 	/* now we hold the locks, so now that it is safe, stash the
     96 	 * ctx for drm_modeset_unlock_all():
     97 	 */
     98 	config->acquire_ctx = ctx;
     99 
    100 	drm_warn_on_modeset_not_all_locked(dev);
    101 
    102 	return;
    103 
    104 fail:
    105 	if (ret == -EDEADLK) {
    106 		drm_modeset_backoff(ctx);
    107 		goto retry;
    108 	}
    109 
    110 	kfree(ctx);
    111 }
    112 EXPORT_SYMBOL(drm_modeset_lock_all);
    113 
    114 /**
    115  * drm_modeset_unlock_all - drop all modeset locks
    116  * @dev: device
    117  *
    118  * This function drop all modeset locks taken by drm_modeset_lock_all.
    119  */
    120 void drm_modeset_unlock_all(struct drm_device *dev)
    121 {
    122 	struct drm_mode_config *config = &dev->mode_config;
    123 	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
    124 
    125 	if (WARN_ON(!ctx))
    126 		return;
    127 
    128 	config->acquire_ctx = NULL;
    129 	drm_modeset_drop_locks(ctx);
    130 	drm_modeset_acquire_fini(ctx);
    131 
    132 	kfree(ctx);
    133 
    134 	mutex_unlock(&dev->mode_config.mutex);
    135 }
    136 EXPORT_SYMBOL(drm_modeset_unlock_all);
    137 
    138 /**
    139  * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
    140  * @crtc: DRM CRTC
    141  * @plane: DRM plane to be updated on @crtc
    142  *
    143  * This function locks the given crtc and plane (which should be either the
    144  * primary or cursor plane) using a hidden acquire context. This is necessary so
    145  * that drivers internally using the atomic interfaces can grab further locks
    146  * with the lock acquire context.
    147  *
    148  * Note that @plane can be NULL, e.g. when the cursor support hasn't yet been
    149  * converted to universal planes yet.
    150  */
    151 void drm_modeset_lock_crtc(struct drm_crtc *crtc,
    152 			   struct drm_plane *plane)
    153 {
    154 	struct drm_modeset_acquire_ctx *ctx;
    155 	int ret;
    156 
    157 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
    158 	if (WARN_ON(!ctx))
    159 		return;
    160 
    161 	drm_modeset_acquire_init(ctx, 0);
    162 
    163 retry:
    164 	ret = drm_modeset_lock(&crtc->mutex, ctx);
    165 	if (ret)
    166 		goto fail;
    167 
    168 	if (plane) {
    169 		ret = drm_modeset_lock(&plane->mutex, ctx);
    170 		if (ret)
    171 			goto fail;
    172 
    173 		if (plane->crtc) {
    174 			ret = drm_modeset_lock(&plane->crtc->mutex, ctx);
    175 			if (ret)
    176 				goto fail;
    177 		}
    178 	}
    179 
    180 	WARN_ON(crtc->acquire_ctx);
    181 
    182 	/* now we hold the locks, so now that it is safe, stash the
    183 	 * ctx for drm_modeset_unlock_crtc():
    184 	 */
    185 	crtc->acquire_ctx = ctx;
    186 
    187 	return;
    188 
    189 fail:
    190 	if (ret == -EDEADLK) {
    191 		drm_modeset_backoff(ctx);
    192 		goto retry;
    193 	}
    194 }
    195 EXPORT_SYMBOL(drm_modeset_lock_crtc);
    196 
    197 /**
    198  * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
    199  * @crtc: drm crtc
    200  *
    201  * Legacy ioctl operations like cursor updates or page flips only have per-crtc
    202  * locking, and store the acquire ctx in the corresponding crtc. All other
    203  * legacy operations take all locks and use a global acquire context. This
    204  * function grabs the right one.
    205  */
    206 struct drm_modeset_acquire_ctx *
    207 drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
    208 {
    209 	if (crtc->acquire_ctx)
    210 		return crtc->acquire_ctx;
    211 
    212 	WARN_ON(!crtc->dev->mode_config.acquire_ctx);
    213 
    214 	return crtc->dev->mode_config.acquire_ctx;
    215 }
    216 EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
    217 
    218 /**
    219  * drm_modeset_unlock_crtc - drop crtc lock
    220  * @crtc: drm crtc
    221  *
    222  * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
    223  * locks acquired through the hidden context.
    224  */
    225 void drm_modeset_unlock_crtc(struct drm_crtc *crtc)
    226 {
    227 	struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx;
    228 
    229 	if (WARN_ON(!ctx))
    230 		return;
    231 
    232 	crtc->acquire_ctx = NULL;
    233 	drm_modeset_drop_locks(ctx);
    234 	drm_modeset_acquire_fini(ctx);
    235 
    236 	kfree(ctx);
    237 }
    238 EXPORT_SYMBOL(drm_modeset_unlock_crtc);
    239 
    240 /**
    241  * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
    242  * @dev: device
    243  *
    244  * Useful as a debug assert.
    245  */
    246 void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
    247 {
    248 	struct drm_crtc *crtc;
    249 
    250 	/* Locking is currently fubar in the panic handler. */
    251 	if (oops_in_progress)
    252 		return;
    253 
    254 	drm_for_each_crtc(crtc, dev)
    255 		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
    256 
    257 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
    258 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
    259 }
    260 EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
    261 
    262 /**
    263  * drm_modeset_acquire_init - initialize acquire context
    264  * @ctx: the acquire context
    265  * @flags: for future
    266  */
    267 void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
    268 		uint32_t flags)
    269 {
    270 	memset(ctx, 0, sizeof(*ctx));
    271 	ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
    272 	INIT_LIST_HEAD(&ctx->locked);
    273 }
    274 EXPORT_SYMBOL(drm_modeset_acquire_init);
    275 
    276 /**
    277  * drm_modeset_acquire_fini - cleanup acquire context
    278  * @ctx: the acquire context
    279  */
    280 void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
    281 {
    282 	ww_acquire_fini(&ctx->ww_ctx);
    283 }
    284 EXPORT_SYMBOL(drm_modeset_acquire_fini);
    285 
    286 /**
    287  * drm_modeset_drop_locks - drop all locks
    288  * @ctx: the acquire context
    289  *
    290  * Drop all locks currently held against this acquire context.
    291  */
    292 void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
    293 {
    294 	WARN_ON(ctx->contended);
    295 	while (!list_empty(&ctx->locked)) {
    296 		struct drm_modeset_lock *lock;
    297 
    298 		lock = list_first_entry(&ctx->locked,
    299 				struct drm_modeset_lock, head);
    300 
    301 		drm_modeset_unlock(lock);
    302 	}
    303 }
    304 EXPORT_SYMBOL(drm_modeset_drop_locks);
    305 
    306 static inline int modeset_lock(struct drm_modeset_lock *lock,
    307 		struct drm_modeset_acquire_ctx *ctx,
    308 		bool interruptible, bool slow)
    309 {
    310 	int ret;
    311 
    312 	WARN_ON(ctx->contended);
    313 
    314 	if (ctx->trylock_only) {
    315 		lockdep_assert_held(&ctx->ww_ctx);
    316 
    317 		if (!ww_mutex_trylock(&lock->mutex))
    318 			return -EBUSY;
    319 		else
    320 			return 0;
    321 	} else if (interruptible && slow) {
    322 		ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
    323 	} else if (interruptible) {
    324 		ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
    325 	} else if (slow) {
    326 		ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
    327 		ret = 0;
    328 	} else {
    329 		ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
    330 	}
    331 	if (!ret) {
    332 		WARN_ON(!list_empty(&lock->head));
    333 		list_add(&lock->head, &ctx->locked);
    334 	} else if (ret == -EALREADY) {
    335 		/* we already hold the lock.. this is fine.  For atomic
    336 		 * we will need to be able to drm_modeset_lock() things
    337 		 * without having to keep track of what is already locked
    338 		 * or not.
    339 		 */
    340 		ret = 0;
    341 	} else if (ret == -EDEADLK) {
    342 		ctx->contended = lock;
    343 	}
    344 
    345 	return ret;
    346 }
    347 
    348 static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
    349 		bool interruptible)
    350 {
    351 	struct drm_modeset_lock *contended = ctx->contended;
    352 
    353 	ctx->contended = NULL;
    354 
    355 	if (WARN_ON(!contended))
    356 		return 0;
    357 
    358 	drm_modeset_drop_locks(ctx);
    359 
    360 	return modeset_lock(contended, ctx, interruptible, true);
    361 }
    362 
    363 /**
    364  * drm_modeset_backoff - deadlock avoidance backoff
    365  * @ctx: the acquire context
    366  *
    367  * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
    368  * you must call this function to drop all currently held locks and
    369  * block until the contended lock becomes available.
    370  */
    371 void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
    372 {
    373 	modeset_backoff(ctx, false);
    374 }
    375 EXPORT_SYMBOL(drm_modeset_backoff);
    376 
    377 /**
    378  * drm_modeset_backoff_interruptible - deadlock avoidance backoff
    379  * @ctx: the acquire context
    380  *
    381  * Interruptible version of drm_modeset_backoff()
    382  */
    383 int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
    384 {
    385 	return modeset_backoff(ctx, true);
    386 }
    387 EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
    388 
    389 /**
    390  * drm_modeset_lock - take modeset lock
    391  * @lock: lock to take
    392  * @ctx: acquire ctx
    393  *
    394  * If ctx is not NULL, then its ww acquire context is used and the
    395  * lock will be tracked by the context and can be released by calling
    396  * drm_modeset_drop_locks().  If -EDEADLK is returned, this means a
    397  * deadlock scenario has been detected and it is an error to attempt
    398  * to take any more locks without first calling drm_modeset_backoff().
    399  */
    400 int drm_modeset_lock(struct drm_modeset_lock *lock,
    401 		struct drm_modeset_acquire_ctx *ctx)
    402 {
    403 	if (ctx)
    404 		return modeset_lock(lock, ctx, false, false);
    405 
    406 	ww_mutex_lock(&lock->mutex, NULL);
    407 	return 0;
    408 }
    409 EXPORT_SYMBOL(drm_modeset_lock);
    410 
    411 /**
    412  * drm_modeset_lock_interruptible - take modeset lock
    413  * @lock: lock to take
    414  * @ctx: acquire ctx
    415  *
    416  * Interruptible version of drm_modeset_lock()
    417  */
    418 int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
    419 		struct drm_modeset_acquire_ctx *ctx)
    420 {
    421 	if (ctx)
    422 		return modeset_lock(lock, ctx, true, false);
    423 
    424 	return ww_mutex_lock_interruptible(&lock->mutex, NULL);
    425 }
    426 EXPORT_SYMBOL(drm_modeset_lock_interruptible);
    427 
    428 /**
    429  * drm_modeset_unlock - drop modeset lock
    430  * @lock: lock to release
    431  */
    432 void drm_modeset_unlock(struct drm_modeset_lock *lock)
    433 {
    434 	list_del_init(&lock->head);
    435 	ww_mutex_unlock(&lock->mutex);
    436 }
    437 EXPORT_SYMBOL(drm_modeset_unlock);
    438 
    439 /* In some legacy codepaths it's convenient to just grab all the crtc and plane
    440  * related locks. */
    441 int drm_modeset_lock_all_crtcs(struct drm_device *dev,
    442 		struct drm_modeset_acquire_ctx *ctx)
    443 {
    444 	struct drm_crtc *crtc;
    445 	struct drm_plane *plane;
    446 	int ret = 0;
    447 
    448 	drm_for_each_crtc(crtc, dev) {
    449 		ret = drm_modeset_lock(&crtc->mutex, ctx);
    450 		if (ret)
    451 			return ret;
    452 	}
    453 
    454 	drm_for_each_plane(plane, dev) {
    455 		ret = drm_modeset_lock(&plane->mutex, ctx);
    456 		if (ret)
    457 			return ret;
    458 	}
    459 
    460 	return 0;
    461 }
    462 EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
    463