Home | History | Annotate | Line # | Download | only in linux
ww_mutex.h revision 1.7
      1 /*	$NetBSD: ww_mutex.h,v 1.7 2014/09/15 20:24:55 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _ASM_WW_MUTEX_H_
     33 #define _ASM_WW_MUTEX_H_
     34 
     35 #include <sys/rbtree.h>
     36 
     37 #include <linux/mutex.h>
     38 
     39 struct ww_class {
     40 	volatile uint64_t	wwc_ticket;
     41 };
     42 
     43 #define	DEFINE_WW_CLASS(CLASS)						      \
     44 	struct ww_class CLASS = {					      \
     45 		.wwc_ticket = 0,					      \
     46 	}
     47 
     48 struct ww_acquire_ctx {
     49 	struct ww_class	*wwx_class __diagused;
     50 	struct lwp	*wwx_owner __diagused;
     51 	uint64_t	wwx_ticket;
     52 	unsigned	wwx_acquired;
     53 	bool		wwx_acquire_done;
     54 	struct rb_node	wwx_rb_node;
     55 };
     56 
     57 static inline int
     58 ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb)
     59 {
     60 	const struct ww_acquire_ctx *const ctx_a = va;
     61 	const struct ww_acquire_ctx *const ctx_b = vb;
     62 
     63 	if (ctx_a->wwx_ticket < ctx_b->wwx_ticket)
     64 		return -1;
     65 	if (ctx_a->wwx_ticket > ctx_b->wwx_ticket)
     66 		return -1;
     67 	return 0;
     68 }
     69 
     70 static inline int
     71 ww_acquire_ctx_compare_key(void *cookie __unused, const void *vn,
     72     const void *vk)
     73 {
     74 	const struct ww_acquire_ctx *const ctx = vn;
     75 	const uint64_t *const ticketp = vk, ticket = *ticketp;
     76 
     77 	if (ctx->wwx_ticket < ticket)
     78 		return -1;
     79 	if (ctx->wwx_ticket > ticket)
     80 		return -1;
     81 	return 0;
     82 }
     83 
     84 static const rb_tree_ops_t ww_acquire_ctx_rb_ops = {
     85 	.rbto_compare_nodes = &ww_acquire_ctx_compare,
     86 	.rbto_compare_key = &ww_acquire_ctx_compare_key,
     87 	.rbto_node_offset = offsetof(struct ww_acquire_ctx, wwx_rb_node),
     88 	.rbto_context = NULL,
     89 };
     90 
     91 static inline void
     92 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *class)
     93 {
     94 
     95 	ctx->wwx_class = class;
     96 	ctx->wwx_owner = curlwp;
     97 	ctx->wwx_ticket = atomic_inc_64_nv(&class->wwc_ticket);
     98 	ctx->wwx_acquired = 0;
     99 	ctx->wwx_acquire_done = false;
    100 }
    101 
    102 static inline void
    103 ww_acquire_done(struct ww_acquire_ctx *ctx)
    104 {
    105 
    106 	KASSERTMSG((ctx->wwx_owner == curlwp),
    107 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
    108 
    109 	ctx->wwx_acquire_done = true;
    110 }
    111 
    112 static inline void
    113 ww_acquire_fini(struct ww_acquire_ctx *ctx)
    114 {
    115 
    116 	KASSERTMSG((ctx->wwx_owner == curlwp),
    117 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
    118 	KASSERTMSG((ctx->wwx_acquired == 0), "ctx %p still holds %u locks",
    119 	    ctx, ctx->wwx_acquired);
    120 
    121 	ctx->wwx_acquired = ~0U;	/* Fail if called again. */
    122 	ctx->wwx_owner = NULL;
    123 }
    124 
    125 struct ww_mutex {
    126 	kmutex_t		wwm_lock;
    127 	enum ww_mutex_state {
    128 		WW_UNLOCKED,	/* nobody owns it */
    129 		WW_OWNED,	/* owned by a lwp without a context */
    130 		WW_CTX,		/* owned by a context */
    131 		WW_WANTOWN,	/* owned by ctx, waiters w/o ctx waiting */
    132 	}			wwm_state;
    133 	union {
    134 		struct lwp		*owner;
    135 		struct ww_acquire_ctx	*ctx;
    136 	}			wwm_u;
    137 	struct ww_class		*wwm_class;
    138 	struct rb_tree		wwm_waiters;
    139 	kcondvar_t		wwm_cv;
    140 };
    141 
    142 static inline void
    143 ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
    144 {
    145 
    146 	/*
    147 	 * XXX Apparently Linux takes these with spin locks held.  That
    148 	 * strikes me as a bad idea, but so it is...
    149 	 */
    150 	mutex_init(&mutex->wwm_lock, MUTEX_DEFAULT, IPL_VM);
    151 	mutex->wwm_state = WW_UNLOCKED;
    152 	mutex->wwm_class = class;
    153 	rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
    154 	cv_init(&mutex->wwm_cv, "linuxwwm");
    155 }
    156 
    157 static inline void
    158 ww_mutex_destroy(struct ww_mutex *mutex)
    159 {
    160 
    161 	cv_destroy(&mutex->wwm_cv);
    162 #if 0
    163 	rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
    164 #endif
    165 	KASSERT(mutex->wwm_state == WW_UNLOCKED);
    166 	mutex_destroy(&mutex->wwm_lock);
    167 }
    168 
    169 /*
    170  * XXX WARNING: This returns true if it is locked by ANYONE.  Does not
    171  * mean `Do I hold this lock?' (answering which really requires an
    172  * acquire context).
    173  */
    174 static inline bool
    175 ww_mutex_is_locked(struct ww_mutex *mutex)
    176 {
    177 	int locked;
    178 
    179 	mutex_enter(&mutex->wwm_lock);
    180 	switch (mutex->wwm_state) {
    181 	case WW_UNLOCKED:
    182 		locked = false;
    183 		break;
    184 	case WW_OWNED:
    185 	case WW_CTX:
    186 	case WW_WANTOWN:
    187 		locked = true;
    188 		break;
    189 	default:
    190 		panic("wait/wound mutex %p in bad state: %d", mutex,
    191 		    (int)mutex->wwm_state);
    192 	}
    193 	mutex_exit(&mutex->wwm_lock);
    194 
    195 	return locked;
    196 }
    197 
    198 static inline void
    199 ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
    200 {
    201 
    202 	KASSERT(mutex->wwm_state == state);
    203 	do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
    204 	while (mutex->wwm_state == state);
    205 }
    206 
    207 static inline int
    208 ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
    209 {
    210 	int ret;
    211 
    212 	KASSERT(mutex->wwm_state == state);
    213 	do {
    214 		/* XXX errno NetBSD->Linux */
    215 		ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
    216 		if (ret)
    217 			break;
    218 	} while (mutex->wwm_state == state);
    219 
    220 	return ret;
    221 }
    222 
    223 static inline void
    224 ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    225 {
    226 	struct ww_acquire_ctx *collision __diagused;
    227 
    228 	KASSERT(mutex_owned(&mutex->wwm_lock));
    229 
    230 	KASSERT((mutex->wwm_state == WW_CTX) ||
    231 	    (mutex->wwm_state == WW_WANTOWN));
    232 	KASSERT(mutex->wwm_u.ctx != ctx);
    233 	KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
    234 	    "ww mutex class mismatch: %p != %p",
    235 	    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
    236 	KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
    237 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
    238 	    ctx->wwx_ticket, ctx,
    239 	    mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
    240 
    241 	collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
    242 	KASSERTMSG((collision == ctx),
    243 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
    244 	    ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
    245 
    246 	do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
    247 	while (!(((mutex->wwm_state == WW_CTX) ||
    248 		    (mutex->wwm_state == WW_WANTOWN)) &&
    249 		 (mutex->wwm_u.ctx == ctx)));
    250 
    251 	rb_tree_remove_node(&mutex->wwm_waiters, ctx);
    252 }
    253 
    254 static inline int
    255 ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    256 {
    257 	struct ww_acquire_ctx *collision __diagused;
    258 	int ret;
    259 
    260 	KASSERT(mutex_owned(&mutex->wwm_lock));
    261 
    262 	KASSERT((mutex->wwm_state == WW_CTX) ||
    263 	    (mutex->wwm_state == WW_WANTOWN));
    264 	KASSERT(mutex->wwm_u.ctx != ctx);
    265 	KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
    266 	    "ww mutex class mismatch: %p != %p",
    267 	    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
    268 	KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
    269 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
    270 	    ctx->wwx_ticket, ctx,
    271 	    mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
    272 
    273 	collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
    274 	KASSERTMSG((collision == ctx),
    275 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
    276 	    ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
    277 
    278 	do {
    279 		/* XXX errno NetBSD->Linux */
    280 		ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
    281 		if (ret)
    282 			goto out;
    283 	} while (!(((mutex->wwm_state == WW_CTX) ||
    284 		    (mutex->wwm_state == WW_WANTOWN)) &&
    285 		(mutex->wwm_u.ctx == ctx)));
    286 
    287 out:	rb_tree_remove_node(&mutex->wwm_waiters, ctx);
    288 	return ret;
    289 }
    290 
    291 static inline void
    292 ww_mutex_lock_noctx(struct ww_mutex *mutex)
    293 {
    294 
    295 	mutex_enter(&mutex->wwm_lock);
    296 retry:	switch (mutex->wwm_state) {
    297 	case WW_UNLOCKED:
    298 		mutex->wwm_state = WW_OWNED;
    299 		mutex->wwm_u.owner = curlwp;
    300 		break;
    301 	case WW_OWNED:
    302 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    303 		    "locking %p against myself: %p", mutex, curlwp);
    304 		ww_mutex_state_wait(mutex, WW_OWNED);
    305 		goto retry;
    306 	case WW_CTX:
    307 		KASSERT(mutex->wwm_u.ctx != NULL);
    308 		mutex->wwm_state = WW_WANTOWN;
    309 		/* FALLTHROUGH */
    310 	case WW_WANTOWN:
    311 		KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
    312 		    "locking %p against myself: %p", mutex, curlwp);
    313 		ww_mutex_state_wait(mutex, WW_WANTOWN);
    314 		goto retry;
    315 	default:
    316 		panic("wait/wound mutex %p in bad state: %d",
    317 		    mutex, (int)mutex->wwm_state);
    318 	}
    319 	KASSERT(mutex->wwm_state == WW_OWNED);
    320 	KASSERT(mutex->wwm_u.owner == curlwp);
    321 	mutex_exit(&mutex->wwm_lock);
    322 }
    323 
    324 static inline int
    325 ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
    326 {
    327 	int ret;
    328 
    329 	mutex_enter(&mutex->wwm_lock);
    330 retry:	switch (mutex->wwm_state) {
    331 	case WW_UNLOCKED:
    332 		mutex->wwm_state = WW_OWNED;
    333 		mutex->wwm_u.owner = curlwp;
    334 		break;
    335 	case WW_OWNED:
    336 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    337 		    "locking %p against myself: %p", mutex, curlwp);
    338 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
    339 		if (ret)
    340 			goto out;
    341 		goto retry;
    342 	case WW_CTX:
    343 		KASSERT(mutex->wwm_u.ctx != NULL);
    344 		mutex->wwm_state = WW_WANTOWN;
    345 		/* FALLTHROUGH */
    346 	case WW_WANTOWN:
    347 		KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
    348 		    "locking %p against myself: %p", mutex, curlwp);
    349 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
    350 		if (ret)
    351 			goto out;
    352 		goto retry;
    353 	default:
    354 		panic("wait/wound mutex %p in bad state: %d",
    355 		    mutex, (int)mutex->wwm_state);
    356 	}
    357 	KASSERT(mutex->wwm_state == WW_OWNED);
    358 	KASSERT(mutex->wwm_u.owner == curlwp);
    359 	ret = 0;
    360 out:	mutex_exit(&mutex->wwm_lock);
    361 	return ret;
    362 }
    363 
    364 static inline int
    365 ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    366 {
    367 
    368 	ASSERT_SLEEPABLE();
    369 
    370 	if (ctx == NULL) {
    371 		ww_mutex_lock_noctx(mutex);
    372 		return 0;
    373 	}
    374 
    375 	KASSERTMSG((ctx->wwx_owner == curlwp),
    376 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
    377 	KASSERTMSG(!ctx->wwx_acquire_done,
    378 	    "ctx %p done acquiring locks, can't acquire more", ctx);
    379 	KASSERTMSG((ctx->wwx_acquired != ~0U),
    380 	    "ctx %p finished, can't be used any more", ctx);
    381 	KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
    382 	    "ctx %p in class %p, mutex %p in class %p",
    383 	    ctx, ctx->wwx_class, mutex, mutex->wwm_class);
    384 
    385 	mutex_enter(&mutex->wwm_lock);
    386 retry:	switch (mutex->wwm_state) {
    387 	case WW_UNLOCKED:
    388 		mutex->wwm_state = WW_CTX;
    389 		mutex->wwm_u.ctx = ctx;
    390 		goto locked;
    391 	case WW_OWNED:
    392 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    393 		    "locking %p against myself: %p", mutex, curlwp);
    394 		ww_mutex_state_wait(mutex, WW_OWNED);
    395 		goto retry;
    396 	case WW_CTX:
    397 		break;
    398 	case WW_WANTOWN:
    399 		ww_mutex_state_wait(mutex, WW_WANTOWN);
    400 		goto retry;
    401 	default:
    402 		panic("wait/wound mutex %p in bad state: %d",
    403 		    mutex, (int)mutex->wwm_state);
    404 	}
    405 	KASSERT(mutex->wwm_state == WW_CTX);
    406 	KASSERT(mutex->wwm_u.ctx != NULL);
    407 	KASSERT((mutex->wwm_u.ctx == ctx) ||
    408 	    (mutex->wwm_u.ctx->wwx_owner != curlwp));
    409 	if (mutex->wwm_u.ctx == ctx) {
    410 		/*
    411 		 * We already own it.  Yes, this can happen correctly
    412 		 * for objects whose locking order is determined by
    413 		 * userland.
    414 		 */
    415 		mutex_exit(&mutex->wwm_lock);
    416 		return -EALREADY;
    417 	} else if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
    418 		/*
    419 		 * Owned by a higher-priority party.  Tell the caller
    420 		 * to unlock everything and start over.
    421 		 */
    422 		KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
    423 		    "ww mutex class mismatch: %p != %p",
    424 		    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
    425 		mutex_exit(&mutex->wwm_lock);
    426 		return -EDEADLK;
    427 	} else {
    428 		/*
    429 		 * Owned by a lower-priority party.  Ask that party to
    430 		 * wake us when it is done or it realizes it needs to
    431 		 * back off.
    432 		 */
    433 		ww_mutex_lock_wait(mutex, ctx);
    434 	}
    435 locked:	ctx->wwx_acquired++;
    436 	KASSERT((mutex->wwm_state == WW_CTX) ||
    437 	    (mutex->wwm_state == WW_WANTOWN));
    438 	KASSERT(mutex->wwm_u.ctx == ctx);
    439 	mutex_exit(&mutex->wwm_lock);
    440 	return 0;
    441 }
    442 
    443 static inline int
    444 ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    445 {
    446 	int ret;
    447 
    448 	ASSERT_SLEEPABLE();
    449 
    450 	if (ctx == NULL)
    451 		return ww_mutex_lock_noctx_sig(mutex);
    452 
    453 	KASSERTMSG((ctx->wwx_owner == curlwp),
    454 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
    455 	KASSERTMSG(!ctx->wwx_acquire_done,
    456 	    "ctx %p done acquiring locks, can't acquire more", ctx);
    457 	KASSERTMSG((ctx->wwx_acquired != ~0U),
    458 	    "ctx %p finished, can't be used any more", ctx);
    459 	KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
    460 	    "ctx %p in class %p, mutex %p in class %p",
    461 	    ctx, ctx->wwx_class, mutex, mutex->wwm_class);
    462 
    463 	mutex_enter(&mutex->wwm_lock);
    464 retry:	switch (mutex->wwm_state) {
    465 	case WW_UNLOCKED:
    466 		mutex->wwm_state = WW_CTX;
    467 		mutex->wwm_u.ctx = ctx;
    468 		goto locked;
    469 	case WW_OWNED:
    470 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    471 		    "locking %p against myself: %p", mutex, curlwp);
    472 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
    473 		if (ret)
    474 			goto out;
    475 		goto retry;
    476 	case WW_CTX:
    477 		break;
    478 	case WW_WANTOWN:
    479 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
    480 		if (ret)
    481 			goto out;
    482 		goto retry;
    483 	default:
    484 		panic("wait/wound mutex %p in bad state: %d",
    485 		    mutex, (int)mutex->wwm_state);
    486 	}
    487 	KASSERT(mutex->wwm_state == WW_CTX);
    488 	KASSERT(mutex->wwm_u.ctx != NULL);
    489 	KASSERT((mutex->wwm_u.ctx == ctx) ||
    490 	    (mutex->wwm_u.ctx->wwx_owner != curlwp));
    491 	if (mutex->wwm_u.ctx == ctx) {
    492 		/*
    493 		 * We already own it.  Yes, this can happen correctly
    494 		 * for objects whose locking order is determined by
    495 		 * userland.
    496 		 */
    497 		mutex_exit(&mutex->wwm_lock);
    498 		return -EALREADY;
    499 	} else if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
    500 		/*
    501 		 * Owned by a higher-priority party.  Tell the caller
    502 		 * to unlock everything and start over.
    503 		 */
    504 		KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
    505 		    "ww mutex class mismatch: %p != %p",
    506 		    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
    507 		mutex_exit(&mutex->wwm_lock);
    508 		return -EDEADLK;
    509 	} else {
    510 		/*
    511 		 * Owned by a lower-priority party.  Ask that party to
    512 		 * wake us when it is done or it realizes it needs to
    513 		 * back off.
    514 		 */
    515 		ret = ww_mutex_lock_wait_sig(mutex, ctx);
    516 		if (ret)
    517 			goto out;
    518 	}
    519 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
    520 	    (mutex->wwm_state == WW_WANTOWN));
    521 	KASSERT(mutex->wwm_u.ctx == ctx);
    522 	ctx->wwx_acquired++;
    523 	ret = 0;
    524 out:	mutex_exit(&mutex->wwm_lock);
    525 	return ret;
    526 }
    527 
    528 static inline void
    529 ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    530 {
    531 
    532 	ASSERT_SLEEPABLE();
    533 
    534 	if (ctx == NULL) {
    535 		ww_mutex_lock_noctx(mutex);
    536 		return;
    537 	}
    538 
    539 	KASSERTMSG((ctx->wwx_owner == curlwp),
    540 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
    541 	KASSERTMSG(!ctx->wwx_acquire_done,
    542 	    "ctx %p done acquiring locks, can't acquire more", ctx);
    543 	KASSERTMSG((ctx->wwx_acquired != ~0U),
    544 	    "ctx %p finished, can't be used any more", ctx);
    545 	KASSERTMSG((ctx->wwx_acquired == 0),
    546 	    "ctx %p still holds %u locks, not allowed in slow path",
    547 	    ctx, ctx->wwx_acquired);
    548 	KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
    549 	    "ctx %p in class %p, mutex %p in class %p",
    550 	    ctx, ctx->wwx_class, mutex, mutex->wwm_class);
    551 
    552 	mutex_enter(&mutex->wwm_lock);
    553 retry:	switch (mutex->wwm_state) {
    554 	case WW_UNLOCKED:
    555 		mutex->wwm_state = WW_CTX;
    556 		mutex->wwm_u.ctx = ctx;
    557 		goto locked;
    558 	case WW_OWNED:
    559 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    560 		    "locking %p against myself: %p", mutex, curlwp);
    561 		ww_mutex_state_wait(mutex, WW_OWNED);
    562 		goto retry;
    563 	case WW_CTX:
    564 		break;
    565 	case WW_WANTOWN:
    566 		ww_mutex_state_wait(mutex, WW_WANTOWN);
    567 		goto retry;
    568 	default:
    569 		panic("wait/wound mutex %p in bad state: %d",
    570 		    mutex, (int)mutex->wwm_state);
    571 	}
    572 	KASSERT(mutex->wwm_state == WW_CTX);
    573 	KASSERT(mutex->wwm_u.ctx != NULL);
    574 	KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
    575 	    "locking %p against myself: %p", mutex, curlwp);
    576 	/*
    577 	 * Owned by another party, of any priority.  Ask that party to
    578 	 * wake us when it's done.
    579 	 */
    580 	ww_mutex_lock_wait(mutex, ctx);
    581 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
    582 	    (mutex->wwm_state == WW_WANTOWN));
    583 	KASSERT(mutex->wwm_u.ctx == ctx);
    584 	ctx->wwx_acquired++;
    585 	mutex_exit(&mutex->wwm_lock);
    586 }
    587 
    588 static inline int
    589 ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
    590     struct ww_acquire_ctx *ctx)
    591 {
    592 	int ret;
    593 
    594 	ASSERT_SLEEPABLE();
    595 
    596 	if (ctx == NULL)
    597 		return ww_mutex_lock_noctx_sig(mutex);
    598 
    599 	KASSERTMSG((ctx->wwx_owner == curlwp),
    600 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
    601 	KASSERTMSG(!ctx->wwx_acquire_done,
    602 	    "ctx %p done acquiring locks, can't acquire more", ctx);
    603 	KASSERTMSG((ctx->wwx_acquired != ~0U),
    604 	    "ctx %p finished, can't be used any more", ctx);
    605 	KASSERTMSG((ctx->wwx_acquired == 0),
    606 	    "ctx %p still holds %u locks, not allowed in slow path",
    607 	    ctx, ctx->wwx_acquired);
    608 	KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
    609 	    "ctx %p in class %p, mutex %p in class %p",
    610 	    ctx, ctx->wwx_class, mutex, mutex->wwm_class);
    611 
    612 	mutex_enter(&mutex->wwm_lock);
    613 retry:	switch (mutex->wwm_state) {
    614 	case WW_UNLOCKED:
    615 		mutex->wwm_state = WW_CTX;
    616 		mutex->wwm_u.ctx = ctx;
    617 		goto locked;
    618 	case WW_OWNED:
    619 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    620 		    "locking %p against myself: %p", mutex, curlwp);
    621 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
    622 		if (ret)
    623 			goto out;
    624 		goto retry;
    625 	case WW_CTX:
    626 		break;
    627 	case WW_WANTOWN:
    628 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
    629 		if (ret)
    630 			goto out;
    631 		goto retry;
    632 	default:
    633 		panic("wait/wound mutex %p in bad state: %d",
    634 		    mutex, (int)mutex->wwm_state);
    635 	}
    636 	KASSERT(mutex->wwm_state == WW_CTX);
    637 	KASSERT(mutex->wwm_u.ctx != NULL);
    638 	KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
    639 	    "locking %p against myself: %p", mutex, curlwp);
    640 	/*
    641 	 * Owned by another party, of any priority.  Ask that party to
    642 	 * wake us when it's done.
    643 	 */
    644 	ret = ww_mutex_lock_wait_sig(mutex, ctx);
    645 	if (ret)
    646 		goto out;
    647 locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
    648 	    (mutex->wwm_state == WW_WANTOWN));
    649 	KASSERT(mutex->wwm_u.ctx == ctx);
    650 	ctx->wwx_acquired++;
    651 	ret = 0;
    652 out:	mutex_exit(&mutex->wwm_lock);
    653 	return ret;
    654 }
    655 
    656 static inline int
    657 ww_mutex_trylock(struct ww_mutex *mutex)
    658 {
    659 	int ret;
    660 
    661 	mutex_enter(&mutex->wwm_lock);
    662 	if (mutex->wwm_state == WW_UNLOCKED) {
    663 		mutex->wwm_state = WW_OWNED;
    664 		mutex->wwm_u.owner = curlwp;
    665 		ret = 1;
    666 	} else {
    667 		KASSERTMSG(((mutex->wwm_state != WW_OWNED) ||
    668 		    (mutex->wwm_u.owner != curlwp)),
    669 		    "locking %p against myself: %p", mutex, curlwp);
    670 		KASSERTMSG(((mutex->wwm_state != WW_CTX) ||
    671 		    (mutex->wwm_u.ctx->wwx_owner != curlwp)),
    672 		    "locking %p against myself: %p", mutex, curlwp);
    673 		KASSERTMSG(((mutex->wwm_state != WW_WANTOWN) ||
    674 		    (mutex->wwm_u.ctx->wwx_owner != curlwp)),
    675 		    "locking %p against myself: %p", mutex, curlwp);
    676 		ret = 0;
    677 	}
    678 	mutex_exit(&mutex->wwm_lock);
    679 
    680 	return ret;
    681 }
    682 
    683 static inline void
    684 ww_mutex_unlock_release(struct ww_mutex *mutex)
    685 {
    686 
    687 	KASSERT(mutex_owned(&mutex->wwm_lock));
    688 	KASSERT((mutex->wwm_state == WW_CTX) ||
    689 	    (mutex->wwm_state == WW_WANTOWN));
    690 	KASSERT(mutex->wwm_u.ctx != NULL);
    691 	KASSERTMSG((mutex->wwm_u.ctx->wwx_owner == curlwp),
    692 	    "ww_mutex %p ctx %p held by %p, not by self (%p)",
    693 	    mutex, mutex->wwm_u.ctx, mutex->wwm_u.ctx->wwx_owner,
    694 	    curlwp);
    695 	KASSERT(mutex->wwm_u.ctx->wwx_acquired != ~0U);
    696 	mutex->wwm_u.ctx->wwx_acquired--;
    697 	mutex->wwm_u.ctx = NULL;
    698 }
    699 
    700 static inline void
    701 ww_mutex_unlock(struct ww_mutex *mutex)
    702 {
    703 	struct ww_acquire_ctx *ctx;
    704 
    705 	mutex_enter(&mutex->wwm_lock);
    706 	KASSERT(mutex->wwm_state != WW_UNLOCKED);
    707 	switch (mutex->wwm_state) {
    708 	case WW_UNLOCKED:
    709 		panic("unlocking unlocked wait/wound mutex: %p", mutex);
    710 	case WW_OWNED:
    711 		/* Let the context lockers fight over it.  */
    712 		mutex->wwm_u.owner = NULL;
    713 		mutex->wwm_state = WW_UNLOCKED;
    714 		break;
    715 	case WW_CTX:
    716 		ww_mutex_unlock_release(mutex);
    717 		/*
    718 		 * If there are any waiters with contexts, grant the
    719 		 * lock to the highest-priority one.  Otherwise, just
    720 		 * unlock it.
    721 		 */
    722 		if ((ctx = RB_TREE_MIN(&mutex->wwm_waiters)) != NULL) {
    723 			mutex->wwm_state = WW_CTX;
    724 			mutex->wwm_u.ctx = ctx;
    725 		} else {
    726 			mutex->wwm_state = WW_UNLOCKED;
    727 		}
    728 		break;
    729 	case WW_WANTOWN:
    730 		ww_mutex_unlock_release(mutex);
    731 		/* Let the non-context lockers fight over it.  */
    732 		mutex->wwm_state = WW_UNLOCKED;
    733 		break;
    734 	}
    735 	cv_broadcast(&mutex->wwm_cv);
    736 	mutex_exit(&mutex->wwm_lock);
    737 }
    738 
    739 #endif  /* _ASM_WW_MUTEX_H_ */
    740