Home | History | Annotate | Line # | Download | only in linux
ww_mutex.h revision 1.1
      1 /*	$NetBSD: ww_mutex.h,v 1.1 2014/07/16 20:59:58 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Notes on porting:
     34  *
     35  * - We require a context for all locks, so ww_mutex_lock(m, NULL) is
     36  *   not kosher.  Locking without a context is too painful to
     37  *   contemplate.
     38  *
     39  * - We require passing the context to trylock and unlock.  Unlocking
     40  *   the wrong lock is too serious an error to pass up detection.
     41  */
     42 
     43 #ifndef _ASM_WW_MUTEX_H_
     44 #define _ASM_WW_MUTEX_H_
     45 
     46 #include <sys/rbtree.h>
     47 
     48 #include <linux/mutex.h>
     49 
     50 struct ww_class {
     51 	volatile uint64_t	wwc_ticket;
     52 };
     53 
     54 #define	DEFINE_WW_CLASS(CLASS)						      \
     55 	struct ww_class CLASS = {					      \
     56 		.wwc_ticket = 0,					      \
     57 	}
     58 
     59 struct ww_acquire_ctx {
     60 	struct ww_class	*wwx_class __diagused;
     61 	uint64_t	wwx_ticket;
     62 	unsigned	wwx_acquired;
     63 	bool		wwx_acquire_done;
     64 	struct rb_node	wwx_rb_node;
     65 };
     66 
     67 static inline int
     68 ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb)
     69 {
     70 	const struct ww_acquire_ctx *const ctx_a = va;
     71 	const struct ww_acquire_ctx *const ctx_b = vb;
     72 
     73 	if (ctx_a->wwx_ticket < ctx_b->wwx_ticket)
     74 		return -1;
     75 	if (ctx_a->wwx_ticket > ctx_b->wwx_ticket)
     76 		return -1;
     77 	return 0;
     78 }
     79 
     80 static inline int
     81 ww_acquire_ctx_compare_key(void *cookie __unused, const void *vn,
     82     const void *vk)
     83 {
     84 	const struct ww_acquire_ctx *const ctx = vn;
     85 	const uint64_t *const ticketp = vk, ticket = *ticketp;
     86 
     87 	if (ctx->wwx_ticket < ticket)
     88 		return -1;
     89 	if (ctx->wwx_ticket > ticket)
     90 		return -1;
     91 	return 0;
     92 }
     93 
     94 static const rb_tree_ops_t ww_acquire_ctx_rb_ops = {
     95 	.rbto_compare_nodes = &ww_acquire_ctx_compare,
     96 	.rbto_compare_key = &ww_acquire_ctx_compare_key,
     97 	.rbto_node_offset = offsetof(struct ww_acquire_ctx, wwx_rb_node),
     98 	.rbto_context = NULL,
     99 };
    100 
    101 static inline void
    102 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *class)
    103 {
    104 
    105 	ctx->wwx_class = class;
    106 	ctx->wwx_ticket = atomic_inc_64_nv(&class->wwc_ticket);
    107 	ctx->wwx_acquired = 0;
    108 	ctx->wwx_acquire_done = false;
    109 }
    110 
    111 static inline void
    112 ww_acquire_done(struct ww_acquire_ctx *ctx)
    113 {
    114 
    115 	ctx->wwx_acquire_done = true;
    116 }
    117 
    118 static inline void
    119 ww_acquire_fini(struct ww_acquire_ctx *ctx)
    120 {
    121 
    122 	KASSERT(ctx->wwx_acquired == 0);
    123 	ctx->wwx_acquired = ~0U;	/* Fail if called again. */
    124 }
    125 
    126 struct ww_mutex {
    127 	kmutex_t		wwm_lock;
    128 	enum ww_mutex_state {
    129 		WW_UNLOCKED,
    130 		WW_OWNED,
    131 		WW_CTX,
    132 		WW_WANTOWN,
    133 	}			wwm_state;
    134 	union {
    135 		struct lwp		*owner;
    136 		struct ww_acquire_ctx	*ctx;
    137 	}			wwm_u;
    138 	struct ww_class		*wwm_class;
    139 	struct rb_tree		wwm_waiters;
    140 	kcondvar_t		wwm_cv;
    141 };
    142 
    143 static inline void
    144 ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
    145 {
    146 
    147 	mutex_init(&mutex->wwm_lock, MUTEX_DEFAULT, IPL_NONE);
    148 	mutex->wwm_state = WW_UNLOCKED;
    149 	mutex->wwm_class = class;
    150 	rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
    151 	cv_init(&mutex->wwm_cv, "linuxwwm");
    152 }
    153 
    154 static inline void
    155 ww_mutex_destroy(struct ww_mutex *mutex)
    156 {
    157 
    158 	cv_destroy(&mutex->wwm_cv);
    159 #if 0
    160 	rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
    161 #endif
    162 	KASSERT(mutex->wwm_state == WW_UNLOCKED);
    163 	mutex_destroy(&mutex->wwm_lock);
    164 }
    165 
    166 /*
    167  * XXX WARNING: This returns true if it is locked by ANYONE.  Does not
    168  * mean `Do I hold this lock?' (answering which really requires an
    169  * acquire context).
    170  */
    171 static inline bool
    172 ww_mutex_is_locked(struct ww_mutex *mutex)
    173 {
    174 	int locked;
    175 
    176 	mutex_enter(&mutex->wwm_lock);
    177 	switch (mutex->wwm_state) {
    178 	case WW_UNLOCKED:
    179 		locked = false;
    180 		break;
    181 	case WW_OWNED:
    182 	case WW_CTX:
    183 	case WW_WANTOWN:
    184 		locked = true;
    185 		break;
    186 	default:
    187 		panic("wait/wound mutex %p in bad state: %d", mutex,
    188 		    (int)mutex->wwm_state);
    189 	}
    190 	mutex_exit(&mutex->wwm_lock);
    191 
    192 	return locked;
    193 }
    194 
    195 static inline void
    196 ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
    197 {
    198 
    199 	KASSERT(mutex->wwm_state == state);
    200 	do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
    201 	while (mutex->wwm_state == state);
    202 }
    203 
    204 static inline int
    205 ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
    206 {
    207 	int ret;
    208 
    209 	KASSERT(mutex->wwm_state == state);
    210 	do {
    211 		/* XXX errno NetBSD->Linux */
    212 		ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
    213 		if (ret)
    214 			break;
    215 	} while (mutex->wwm_state == state);
    216 
    217 	return ret;
    218 }
    219 
    220 static inline void
    221 ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    222 {
    223 	struct ww_acquire_ctx *collision __diagused;
    224 
    225 	KASSERT(mutex_owned(&mutex->wwm_lock));
    226 
    227 	KASSERT(mutex->wwm_state == WW_CTX);
    228 	KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
    229 	    "ww mutex class mismatch: %p != %p",
    230 	    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
    231 	KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
    232 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
    233 	    ctx->wwx_ticket, ctx, mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
    234 
    235 	collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
    236 	KASSERTMSG((collision == ctx),
    237 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
    238 	    ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
    239 
    240 	do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
    241 	while (!((mutex->wwm_state == WW_CTX) && (mutex->wwm_u.ctx == ctx)));
    242 
    243 	rb_tree_remove_node(&mutex->wwm_waiters, ctx);
    244 }
    245 
    246 static inline int
    247 ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    248 {
    249 	struct ww_acquire_ctx *collision __diagused;
    250 	int ret;
    251 
    252 	KASSERT(mutex_owned(&mutex->wwm_lock));
    253 
    254 	KASSERT(mutex->wwm_state == WW_CTX);
    255 	KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
    256 	    "ww mutex class mismatch: %p != %p",
    257 	    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
    258 	KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
    259 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
    260 	    ctx->wwx_ticket, ctx, mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
    261 
    262 	collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
    263 	KASSERTMSG((collision == ctx),
    264 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
    265 	    ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
    266 
    267 	do {
    268 		/* XXX errno NetBSD->Linux */
    269 		ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
    270 		if (ret)
    271 			goto out;
    272 	} while (!((mutex->wwm_state == WW_CTX) && (mutex->wwm_u.ctx == ctx)));
    273 
    274 out:	rb_tree_remove_node(&mutex->wwm_waiters, ctx);
    275 	return ret;
    276 }
    277 
    278 static inline void
    279 ww_mutex_lock_noctx(struct ww_mutex *mutex)
    280 {
    281 
    282 	mutex_enter(&mutex->wwm_lock);
    283 retry:	switch (mutex->wwm_state) {
    284 	case WW_UNLOCKED:
    285 		mutex->wwm_state = WW_OWNED;
    286 		mutex->wwm_u.owner = curlwp;
    287 		break;
    288 	case WW_OWNED:
    289 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    290 		    "locking against myself: %p", curlwp);
    291 		ww_mutex_state_wait(mutex, WW_OWNED);
    292 		goto retry;
    293 	case WW_CTX:
    294 		KASSERT(mutex->wwm_u.ctx != NULL);
    295 		mutex->wwm_state = WW_WANTOWN;
    296 	case WW_WANTOWN:
    297 		ww_mutex_state_wait(mutex, WW_WANTOWN);
    298 		goto retry;
    299 	default:
    300 		panic("wait/wound mutex %p in bad state: %d",
    301 		    mutex, (int)mutex->wwm_state);
    302 	}
    303 	KASSERT(mutex->wwm_state == WW_OWNED);
    304 	KASSERT(mutex->wwm_u.owner == curlwp);
    305 	mutex_exit(&mutex->wwm_lock);
    306 }
    307 
    308 static inline int
    309 ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
    310 {
    311 	int ret;
    312 
    313 	mutex_enter(&mutex->wwm_lock);
    314 retry:	switch (mutex->wwm_state) {
    315 	case WW_UNLOCKED:
    316 		mutex->wwm_state = WW_OWNED;
    317 		mutex->wwm_u.owner = curlwp;
    318 		break;
    319 	case WW_OWNED:
    320 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    321 		    "locking against myself: %p", curlwp);
    322 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
    323 		if (ret)
    324 			goto out;
    325 		goto retry;
    326 	case WW_CTX:
    327 		KASSERT(mutex->wwm_u.ctx != NULL);
    328 		mutex->wwm_state = WW_WANTOWN;
    329 	case WW_WANTOWN:
    330 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
    331 		if (ret)
    332 			goto out;
    333 		goto retry;
    334 	default:
    335 		panic("wait/wound mutex %p in bad state: %d",
    336 		    mutex, (int)mutex->wwm_state);
    337 	}
    338 	KASSERT(mutex->wwm_state == WW_OWNED);
    339 	KASSERT(mutex->wwm_u.owner == curlwp);
    340 	ret = 0;
    341 out:	mutex_exit(&mutex->wwm_lock);
    342 	return ret;
    343 }
    344 
    345 static inline int
    346 ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    347 {
    348 
    349 	ASSERT_SLEEPABLE();
    350 
    351 	if (ctx == NULL) {
    352 		ww_mutex_lock_noctx(mutex);
    353 		return 0;
    354 	}
    355 
    356 	KASSERT(!ctx->wwx_acquire_done);
    357 
    358 	mutex_enter(&mutex->wwm_lock);
    359 retry:	switch (mutex->wwm_state) {
    360 	case WW_UNLOCKED:
    361 		mutex->wwm_state = WW_CTX;
    362 		mutex->wwm_u.ctx = ctx;
    363 		goto locked;
    364 	case WW_OWNED:
    365 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    366 		    "locking against myself: %p", curlwp);
    367 		ww_mutex_state_wait(mutex, WW_OWNED);
    368 		goto retry;
    369 	case WW_CTX:
    370 		break;
    371 	case WW_WANTOWN:
    372 		ww_mutex_state_wait(mutex, WW_WANTOWN);
    373 		goto retry;
    374 	default:
    375 		panic("wait/wound mutex %p in bad state: %d",
    376 		    mutex, (int)mutex->wwm_state);
    377 	}
    378 	KASSERT(mutex->wwm_state == WW_CTX);
    379 	KASSERT(mutex->wwm_u.ctx != NULL);
    380 	if (mutex->wwm_u.ctx == ctx) {
    381 		/*
    382 		 * We already own it.  Yes, this can happen correctly
    383 		 * for objects whose locking order is determined by
    384 		 * userland.
    385 		 */
    386 		mutex_exit(&mutex->wwm_lock);
    387 		return -EALREADY;
    388 	} else if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
    389 		/*
    390 		 * Owned by a higher-priority party.  Tell the caller
    391 		 * to unlock everything and start over.
    392 		 */
    393 		KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
    394 		    "ww mutex class mismatch: %p != %p",
    395 		    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
    396 		mutex_exit(&mutex->wwm_lock);
    397 		return -EDEADLK;
    398 	} else {
    399 		/*
    400 		 * Owned by a lower-priority party.  Ask that party to
    401 		 * wake us when it is done or it realizes it needs to
    402 		 * back off.
    403 		 */
    404 		ww_mutex_lock_wait(mutex, ctx);
    405 	}
    406 locked:	ctx->wwx_acquired++;
    407 	KASSERT(mutex->wwm_state == WW_CTX);
    408 	KASSERT(mutex->wwm_u.ctx == ctx);
    409 	mutex_exit(&mutex->wwm_lock);
    410 	return 0;
    411 }
    412 
    413 static inline int
    414 ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    415 {
    416 	int ret;
    417 
    418 	ASSERT_SLEEPABLE();
    419 
    420 	if (ctx == NULL)
    421 		return ww_mutex_lock_noctx_sig(mutex);
    422 
    423 	KASSERT(!ctx->wwx_acquire_done);
    424 
    425 	mutex_enter(&mutex->wwm_lock);
    426 retry:	switch (mutex->wwm_state) {
    427 	case WW_UNLOCKED:
    428 		mutex->wwm_state = WW_CTX;
    429 		mutex->wwm_u.ctx = ctx;
    430 		goto locked;
    431 	case WW_OWNED:
    432 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    433 		    "locking against myself: %p", curlwp);
    434 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
    435 		if (ret)
    436 			goto out;
    437 		goto retry;
    438 	case WW_CTX:
    439 		break;
    440 	case WW_WANTOWN:
    441 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
    442 		if (ret)
    443 			goto out;
    444 		goto retry;
    445 	default:
    446 		panic("wait/wound mutex %p in bad state: %d",
    447 		    mutex, (int)mutex->wwm_state);
    448 	}
    449 	KASSERT(mutex->wwm_state == WW_CTX);
    450 	KASSERT(mutex->wwm_u.ctx != NULL);
    451 	if (mutex->wwm_u.ctx == ctx) {
    452 		/*
    453 		 * We already own it.  Yes, this can happen correctly
    454 		 * for objects whose locking order is determined by
    455 		 * userland.
    456 		 */
    457 		mutex_exit(&mutex->wwm_lock);
    458 		return -EALREADY;
    459 	} else if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
    460 		/*
    461 		 * Owned by a higher-priority party.  Tell the caller
    462 		 * to unlock everything and start over.
    463 		 */
    464 		KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
    465 		    "ww mutex class mismatch: %p != %p",
    466 		    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
    467 		mutex_exit(&mutex->wwm_lock);
    468 		return -EDEADLK;
    469 	} else {
    470 		/*
    471 		 * Owned by a lower-priority party.  Ask that party to
    472 		 * wake us when it is done or it realizes it needs to
    473 		 * back off.
    474 		 */
    475 		ret = ww_mutex_lock_wait_sig(mutex, ctx);
    476 		if (ret)
    477 			goto out;
    478 	}
    479 locked:	KASSERT(mutex->wwm_state == WW_CTX);
    480 	KASSERT(mutex->wwm_u.ctx == ctx);
    481 	ctx->wwx_acquired++;
    482 	ret = 0;
    483 out:	mutex_exit(&mutex->wwm_lock);
    484 	return ret;
    485 }
    486 
    487 static inline void
    488 ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    489 {
    490 
    491 	ASSERT_SLEEPABLE();
    492 
    493 	if (ctx == NULL) {
    494 		ww_mutex_lock_noctx(mutex);
    495 		return;
    496 	}
    497 
    498 	KASSERT(!ctx->wwx_acquire_done);
    499 
    500 	mutex_enter(&mutex->wwm_lock);
    501 retry:	switch (mutex->wwm_state) {
    502 	case WW_UNLOCKED:
    503 		mutex->wwm_state = WW_CTX;
    504 		mutex->wwm_u.ctx = ctx;
    505 		goto locked;
    506 	case WW_OWNED:
    507 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    508 		    "locking against myself: %p", curlwp);
    509 		ww_mutex_state_wait(mutex, WW_OWNED);
    510 		goto retry;
    511 	case WW_CTX:
    512 		break;
    513 	case WW_WANTOWN:
    514 		ww_mutex_state_wait(mutex, WW_WANTOWN);
    515 		goto retry;
    516 	default:
    517 		panic("wait/wound mutex %p in bad state: %d",
    518 		    mutex, (int)mutex->wwm_state);
    519 	}
    520 	KASSERT(mutex->wwm_state == WW_CTX);
    521 	KASSERT(mutex->wwm_u.ctx != NULL);
    522 	/*
    523 	 * Owned by another party, of any priority.  Ask that party to
    524 	 * wake us when it's done.
    525 	 */
    526 	ww_mutex_lock_wait(mutex, ctx);
    527 locked:	KASSERT(mutex->wwm_state == WW_CTX);
    528 	KASSERT(mutex->wwm_u.ctx == ctx);
    529 	ctx->wwx_acquired++;
    530 	mutex_exit(&mutex->wwm_lock);
    531 }
    532 
    533 static inline int
    534 ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
    535     struct ww_acquire_ctx *ctx)
    536 {
    537 	int ret;
    538 
    539 	ASSERT_SLEEPABLE();
    540 
    541 	if (ctx == NULL)
    542 		return ww_mutex_lock_noctx_sig(mutex);
    543 
    544 	KASSERT(!ctx->wwx_acquire_done);
    545 
    546 	mutex_enter(&mutex->wwm_lock);
    547 retry:	switch (mutex->wwm_state) {
    548 	case WW_UNLOCKED:
    549 		mutex->wwm_state = WW_CTX;
    550 		mutex->wwm_u.ctx = ctx;
    551 		goto locked;
    552 	case WW_OWNED:
    553 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    554 		    "locking against myself: %p", curlwp);
    555 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
    556 		if (ret)
    557 			goto out;
    558 		goto retry;
    559 	case WW_CTX:
    560 		break;
    561 	case WW_WANTOWN:
    562 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
    563 		if (ret)
    564 			goto out;
    565 		goto retry;
    566 	default:
    567 		panic("wait/wound mutex %p in bad state: %d",
    568 		    mutex, (int)mutex->wwm_state);
    569 	}
    570 	KASSERT(mutex->wwm_state == WW_CTX);
    571 	KASSERT(mutex->wwm_u.ctx != NULL);
    572 	/*
    573 	 * Owned by another party, of any priority.  Ask that party to
    574 	 * wake us when it's done.
    575 	 */
    576 	ret = ww_mutex_lock_wait_sig(mutex, ctx);
    577 	if (ret)
    578 		goto out;
    579 locked:	KASSERT(mutex->wwm_state == WW_CTX);
    580 	KASSERT(mutex->wwm_u.ctx == ctx);
    581 	ctx->wwx_acquired++;
    582 	ret = 0;
    583 out:	mutex_exit(&mutex->wwm_lock);
    584 	return ret;
    585 }
    586 
    587 static inline int
    588 ww_mutex_trylock(struct ww_mutex *mutex)
    589 {
    590 	int ret;
    591 
    592 	mutex_enter(&mutex->wwm_lock);
    593 	if (mutex->wwm_state == WW_UNLOCKED) {
    594 		mutex->wwm_state = WW_OWNED;
    595 		mutex->wwm_u.owner = curlwp;
    596 		ret = 1;
    597 	} else {
    598 		ret = 0;
    599 	}
    600 	mutex_exit(&mutex->wwm_lock);
    601 
    602 	return ret;
    603 }
    604 
    605 static inline void
    606 ww_mutex_unlock(struct ww_mutex *mutex)
    607 {
    608 	struct ww_acquire_ctx *ctx;
    609 
    610 	mutex_enter(&mutex->wwm_lock);
    611 	KASSERT(mutex->wwm_state != WW_UNLOCKED);
    612 	switch (mutex->wwm_state) {
    613 	case WW_UNLOCKED:
    614 		panic("unlocking unlocked wait/wound mutex: %p", mutex);
    615 	case WW_OWNED:
    616 		/* Let the context lockers fight over it.  */
    617 		mutex->wwm_u.owner = NULL;
    618 		mutex->wwm_state = WW_UNLOCKED;
    619 		break;
    620 	case WW_CTX:
    621 		mutex->wwm_u.ctx = NULL;
    622 		/*
    623 		 * If there are any waiters with contexts, grant the
    624 		 * lock to the highest-priority one.  Otherwise, just
    625 		 * unlock it.
    626 		 */
    627 		if ((ctx = RB_TREE_MIN(&mutex->wwm_waiters)) != NULL) {
    628 			mutex->wwm_state = WW_CTX;
    629 			mutex->wwm_u.ctx = ctx;
    630 		} else {
    631 			mutex->wwm_state = WW_UNLOCKED;
    632 		}
    633 		break;
    634 	case WW_WANTOWN:
    635 		/* Let the non-context lockers fight over it.  */
    636 		mutex->wwm_state = WW_UNLOCKED;
    637 		break;
    638 	}
    639 	cv_broadcast(&mutex->wwm_cv);
    640 	mutex_exit(&mutex->wwm_lock);
    641 }
    642 
    643 #endif  /* _ASM_WW_MUTEX_H_ */
    644