Home | History | Annotate | Line # | Download | only in linux
ww_mutex.h revision 1.4.2.2
      1 /*	$NetBSD: ww_mutex.h,v 1.4.2.2 2014/08/10 06:55:39 tls Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Notes on porting:
     34  *
     35  * - We require a context for all locks, so ww_mutex_lock(m, NULL) is
     36  *   not kosher.  Locking without a context is too painful to
     37  *   contemplate.
     38  *
     39  * - We require passing the context to trylock and unlock.  Unlocking
     40  *   the wrong lock is too serious an error to pass up detection.
     41  */
     42 
     43 #ifndef _ASM_WW_MUTEX_H_
     44 #define _ASM_WW_MUTEX_H_
     45 
     46 #include <sys/rbtree.h>
     47 
     48 #include <linux/mutex.h>
     49 
     50 struct ww_class {
     51 	volatile uint64_t	wwc_ticket;
     52 };
     53 
     54 #define	DEFINE_WW_CLASS(CLASS)						      \
     55 	struct ww_class CLASS = {					      \
     56 		.wwc_ticket = 0,					      \
     57 	}
     58 
     59 struct ww_acquire_ctx {
     60 	struct ww_class	*wwx_class __diagused;
     61 	uint64_t	wwx_ticket;
     62 	unsigned	wwx_acquired;
     63 	bool		wwx_acquire_done;
     64 	struct rb_node	wwx_rb_node;
     65 };
     66 
     67 static inline int
     68 ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb)
     69 {
     70 	const struct ww_acquire_ctx *const ctx_a = va;
     71 	const struct ww_acquire_ctx *const ctx_b = vb;
     72 
     73 	if (ctx_a->wwx_ticket < ctx_b->wwx_ticket)
     74 		return -1;
     75 	if (ctx_a->wwx_ticket > ctx_b->wwx_ticket)
     76 		return -1;
     77 	return 0;
     78 }
     79 
     80 static inline int
     81 ww_acquire_ctx_compare_key(void *cookie __unused, const void *vn,
     82     const void *vk)
     83 {
     84 	const struct ww_acquire_ctx *const ctx = vn;
     85 	const uint64_t *const ticketp = vk, ticket = *ticketp;
     86 
     87 	if (ctx->wwx_ticket < ticket)
     88 		return -1;
     89 	if (ctx->wwx_ticket > ticket)
     90 		return -1;
     91 	return 0;
     92 }
     93 
     94 static const rb_tree_ops_t ww_acquire_ctx_rb_ops = {
     95 	.rbto_compare_nodes = &ww_acquire_ctx_compare,
     96 	.rbto_compare_key = &ww_acquire_ctx_compare_key,
     97 	.rbto_node_offset = offsetof(struct ww_acquire_ctx, wwx_rb_node),
     98 	.rbto_context = NULL,
     99 };
    100 
    101 static inline void
    102 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *class)
    103 {
    104 
    105 	ctx->wwx_class = class;
    106 	ctx->wwx_ticket = atomic_inc_64_nv(&class->wwc_ticket);
    107 	ctx->wwx_acquired = 0;
    108 	ctx->wwx_acquire_done = false;
    109 }
    110 
    111 static inline void
    112 ww_acquire_done(struct ww_acquire_ctx *ctx)
    113 {
    114 
    115 	ctx->wwx_acquire_done = true;
    116 }
    117 
    118 static inline void
    119 ww_acquire_fini(struct ww_acquire_ctx *ctx)
    120 {
    121 
    122 	KASSERT(ctx->wwx_acquired == 0);
    123 	ctx->wwx_acquired = ~0U;	/* Fail if called again. */
    124 }
    125 
    126 struct ww_mutex {
    127 	kmutex_t		wwm_lock;
    128 	enum ww_mutex_state {
    129 		WW_UNLOCKED,
    130 		WW_OWNED,
    131 		WW_CTX,
    132 		WW_WANTOWN,
    133 	}			wwm_state;
    134 	union {
    135 		struct lwp		*owner;
    136 		struct ww_acquire_ctx	*ctx;
    137 	}			wwm_u;
    138 	struct ww_class		*wwm_class;
    139 	struct rb_tree		wwm_waiters;
    140 	kcondvar_t		wwm_cv;
    141 };
    142 
    143 static inline void
    144 ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
    145 {
    146 
    147 	/*
    148 	 * XXX Apparently Linux takes these with spin locks held.  That
    149 	 * strikes me as a bad idea, but so it is...
    150 	 */
    151 	mutex_init(&mutex->wwm_lock, MUTEX_DEFAULT, IPL_VM);
    152 	mutex->wwm_state = WW_UNLOCKED;
    153 	mutex->wwm_class = class;
    154 	rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
    155 	cv_init(&mutex->wwm_cv, "linuxwwm");
    156 }
    157 
    158 static inline void
    159 ww_mutex_destroy(struct ww_mutex *mutex)
    160 {
    161 
    162 	cv_destroy(&mutex->wwm_cv);
    163 #if 0
    164 	rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
    165 #endif
    166 	KASSERT(mutex->wwm_state == WW_UNLOCKED);
    167 	mutex_destroy(&mutex->wwm_lock);
    168 }
    169 
    170 /*
    171  * XXX WARNING: This returns true if it is locked by ANYONE.  Does not
    172  * mean `Do I hold this lock?' (answering which really requires an
    173  * acquire context).
    174  */
    175 static inline bool
    176 ww_mutex_is_locked(struct ww_mutex *mutex)
    177 {
    178 	int locked;
    179 
    180 	mutex_enter(&mutex->wwm_lock);
    181 	switch (mutex->wwm_state) {
    182 	case WW_UNLOCKED:
    183 		locked = false;
    184 		break;
    185 	case WW_OWNED:
    186 	case WW_CTX:
    187 	case WW_WANTOWN:
    188 		locked = true;
    189 		break;
    190 	default:
    191 		panic("wait/wound mutex %p in bad state: %d", mutex,
    192 		    (int)mutex->wwm_state);
    193 	}
    194 	mutex_exit(&mutex->wwm_lock);
    195 
    196 	return locked;
    197 }
    198 
    199 static inline void
    200 ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
    201 {
    202 
    203 	KASSERT(mutex->wwm_state == state);
    204 	do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
    205 	while (mutex->wwm_state == state);
    206 }
    207 
    208 static inline int
    209 ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
    210 {
    211 	int ret;
    212 
    213 	KASSERT(mutex->wwm_state == state);
    214 	do {
    215 		/* XXX errno NetBSD->Linux */
    216 		ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
    217 		if (ret)
    218 			break;
    219 	} while (mutex->wwm_state == state);
    220 
    221 	return ret;
    222 }
    223 
    224 static inline void
    225 ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    226 {
    227 	struct ww_acquire_ctx *collision __diagused;
    228 
    229 	KASSERT(mutex_owned(&mutex->wwm_lock));
    230 
    231 	KASSERT(mutex->wwm_state == WW_CTX);
    232 	KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
    233 	    "ww mutex class mismatch: %p != %p",
    234 	    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
    235 	KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
    236 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
    237 	    ctx->wwx_ticket, ctx,
    238 	    mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
    239 
    240 	collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
    241 	KASSERTMSG((collision == ctx),
    242 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
    243 	    ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
    244 
    245 	do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
    246 	while (!((mutex->wwm_state == WW_CTX) && (mutex->wwm_u.ctx == ctx)));
    247 
    248 	rb_tree_remove_node(&mutex->wwm_waiters, ctx);
    249 }
    250 
    251 static inline int
    252 ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    253 {
    254 	struct ww_acquire_ctx *collision __diagused;
    255 	int ret;
    256 
    257 	KASSERT(mutex_owned(&mutex->wwm_lock));
    258 
    259 	KASSERT(mutex->wwm_state == WW_CTX);
    260 	KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
    261 	    "ww mutex class mismatch: %p != %p",
    262 	    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
    263 	KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
    264 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
    265 	    ctx->wwx_ticket, ctx,
    266 	    mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
    267 
    268 	collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
    269 	KASSERTMSG((collision == ctx),
    270 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
    271 	    ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
    272 
    273 	do {
    274 		/* XXX errno NetBSD->Linux */
    275 		ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
    276 		if (ret)
    277 			goto out;
    278 	} while (!((mutex->wwm_state == WW_CTX) && (mutex->wwm_u.ctx == ctx)));
    279 
    280 out:	rb_tree_remove_node(&mutex->wwm_waiters, ctx);
    281 	return ret;
    282 }
    283 
    284 static inline void
    285 ww_mutex_lock_noctx(struct ww_mutex *mutex)
    286 {
    287 
    288 	mutex_enter(&mutex->wwm_lock);
    289 retry:	switch (mutex->wwm_state) {
    290 	case WW_UNLOCKED:
    291 		mutex->wwm_state = WW_OWNED;
    292 		mutex->wwm_u.owner = curlwp;
    293 		break;
    294 	case WW_OWNED:
    295 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    296 		    "locking against myself: %p", curlwp);
    297 		ww_mutex_state_wait(mutex, WW_OWNED);
    298 		goto retry;
    299 	case WW_CTX:
    300 		KASSERT(mutex->wwm_u.ctx != NULL);
    301 		mutex->wwm_state = WW_WANTOWN;
    302 	case WW_WANTOWN:
    303 		ww_mutex_state_wait(mutex, WW_WANTOWN);
    304 		goto retry;
    305 	default:
    306 		panic("wait/wound mutex %p in bad state: %d",
    307 		    mutex, (int)mutex->wwm_state);
    308 	}
    309 	KASSERT(mutex->wwm_state == WW_OWNED);
    310 	KASSERT(mutex->wwm_u.owner == curlwp);
    311 	mutex_exit(&mutex->wwm_lock);
    312 }
    313 
    314 static inline int
    315 ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
    316 {
    317 	int ret;
    318 
    319 	mutex_enter(&mutex->wwm_lock);
    320 retry:	switch (mutex->wwm_state) {
    321 	case WW_UNLOCKED:
    322 		mutex->wwm_state = WW_OWNED;
    323 		mutex->wwm_u.owner = curlwp;
    324 		break;
    325 	case WW_OWNED:
    326 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    327 		    "locking against myself: %p", curlwp);
    328 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
    329 		if (ret)
    330 			goto out;
    331 		goto retry;
    332 	case WW_CTX:
    333 		KASSERT(mutex->wwm_u.ctx != NULL);
    334 		mutex->wwm_state = WW_WANTOWN;
    335 	case WW_WANTOWN:
    336 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
    337 		if (ret)
    338 			goto out;
    339 		goto retry;
    340 	default:
    341 		panic("wait/wound mutex %p in bad state: %d",
    342 		    mutex, (int)mutex->wwm_state);
    343 	}
    344 	KASSERT(mutex->wwm_state == WW_OWNED);
    345 	KASSERT(mutex->wwm_u.owner == curlwp);
    346 	ret = 0;
    347 out:	mutex_exit(&mutex->wwm_lock);
    348 	return ret;
    349 }
    350 
    351 static inline int
    352 ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    353 {
    354 
    355 	ASSERT_SLEEPABLE();
    356 
    357 	if (ctx == NULL) {
    358 		ww_mutex_lock_noctx(mutex);
    359 		return 0;
    360 	}
    361 
    362 	KASSERT(!ctx->wwx_acquire_done);
    363 
    364 	mutex_enter(&mutex->wwm_lock);
    365 retry:	switch (mutex->wwm_state) {
    366 	case WW_UNLOCKED:
    367 		mutex->wwm_state = WW_CTX;
    368 		mutex->wwm_u.ctx = ctx;
    369 		goto locked;
    370 	case WW_OWNED:
    371 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    372 		    "locking against myself: %p", curlwp);
    373 		ww_mutex_state_wait(mutex, WW_OWNED);
    374 		goto retry;
    375 	case WW_CTX:
    376 		break;
    377 	case WW_WANTOWN:
    378 		ww_mutex_state_wait(mutex, WW_WANTOWN);
    379 		goto retry;
    380 	default:
    381 		panic("wait/wound mutex %p in bad state: %d",
    382 		    mutex, (int)mutex->wwm_state);
    383 	}
    384 	KASSERT(mutex->wwm_state == WW_CTX);
    385 	KASSERT(mutex->wwm_u.ctx != NULL);
    386 	if (mutex->wwm_u.ctx == ctx) {
    387 		/*
    388 		 * We already own it.  Yes, this can happen correctly
    389 		 * for objects whose locking order is determined by
    390 		 * userland.
    391 		 */
    392 		mutex_exit(&mutex->wwm_lock);
    393 		return -EALREADY;
    394 	} else if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
    395 		/*
    396 		 * Owned by a higher-priority party.  Tell the caller
    397 		 * to unlock everything and start over.
    398 		 */
    399 		KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
    400 		    "ww mutex class mismatch: %p != %p",
    401 		    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
    402 		mutex_exit(&mutex->wwm_lock);
    403 		return -EDEADLK;
    404 	} else {
    405 		/*
    406 		 * Owned by a lower-priority party.  Ask that party to
    407 		 * wake us when it is done or it realizes it needs to
    408 		 * back off.
    409 		 */
    410 		ww_mutex_lock_wait(mutex, ctx);
    411 	}
    412 locked:	ctx->wwx_acquired++;
    413 	KASSERT(mutex->wwm_state == WW_CTX);
    414 	KASSERT(mutex->wwm_u.ctx == ctx);
    415 	mutex_exit(&mutex->wwm_lock);
    416 	return 0;
    417 }
    418 
    419 static inline int
    420 ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    421 {
    422 	int ret;
    423 
    424 	ASSERT_SLEEPABLE();
    425 
    426 	if (ctx == NULL)
    427 		return ww_mutex_lock_noctx_sig(mutex);
    428 
    429 	KASSERT(!ctx->wwx_acquire_done);
    430 
    431 	mutex_enter(&mutex->wwm_lock);
    432 retry:	switch (mutex->wwm_state) {
    433 	case WW_UNLOCKED:
    434 		mutex->wwm_state = WW_CTX;
    435 		mutex->wwm_u.ctx = ctx;
    436 		goto locked;
    437 	case WW_OWNED:
    438 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    439 		    "locking against myself: %p", curlwp);
    440 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
    441 		if (ret)
    442 			goto out;
    443 		goto retry;
    444 	case WW_CTX:
    445 		break;
    446 	case WW_WANTOWN:
    447 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
    448 		if (ret)
    449 			goto out;
    450 		goto retry;
    451 	default:
    452 		panic("wait/wound mutex %p in bad state: %d",
    453 		    mutex, (int)mutex->wwm_state);
    454 	}
    455 	KASSERT(mutex->wwm_state == WW_CTX);
    456 	KASSERT(mutex->wwm_u.ctx != NULL);
    457 	if (mutex->wwm_u.ctx == ctx) {
    458 		/*
    459 		 * We already own it.  Yes, this can happen correctly
    460 		 * for objects whose locking order is determined by
    461 		 * userland.
    462 		 */
    463 		mutex_exit(&mutex->wwm_lock);
    464 		return -EALREADY;
    465 	} else if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
    466 		/*
    467 		 * Owned by a higher-priority party.  Tell the caller
    468 		 * to unlock everything and start over.
    469 		 */
    470 		KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
    471 		    "ww mutex class mismatch: %p != %p",
    472 		    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
    473 		mutex_exit(&mutex->wwm_lock);
    474 		return -EDEADLK;
    475 	} else {
    476 		/*
    477 		 * Owned by a lower-priority party.  Ask that party to
    478 		 * wake us when it is done or it realizes it needs to
    479 		 * back off.
    480 		 */
    481 		ret = ww_mutex_lock_wait_sig(mutex, ctx);
    482 		if (ret)
    483 			goto out;
    484 	}
    485 locked:	KASSERT(mutex->wwm_state == WW_CTX);
    486 	KASSERT(mutex->wwm_u.ctx == ctx);
    487 	ctx->wwx_acquired++;
    488 	ret = 0;
    489 out:	mutex_exit(&mutex->wwm_lock);
    490 	return ret;
    491 }
    492 
    493 static inline void
    494 ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    495 {
    496 
    497 	ASSERT_SLEEPABLE();
    498 
    499 	if (ctx == NULL) {
    500 		ww_mutex_lock_noctx(mutex);
    501 		return;
    502 	}
    503 
    504 	KASSERT(!ctx->wwx_acquire_done);
    505 
    506 	mutex_enter(&mutex->wwm_lock);
    507 retry:	switch (mutex->wwm_state) {
    508 	case WW_UNLOCKED:
    509 		mutex->wwm_state = WW_CTX;
    510 		mutex->wwm_u.ctx = ctx;
    511 		goto locked;
    512 	case WW_OWNED:
    513 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    514 		    "locking against myself: %p", curlwp);
    515 		ww_mutex_state_wait(mutex, WW_OWNED);
    516 		goto retry;
    517 	case WW_CTX:
    518 		break;
    519 	case WW_WANTOWN:
    520 		ww_mutex_state_wait(mutex, WW_WANTOWN);
    521 		goto retry;
    522 	default:
    523 		panic("wait/wound mutex %p in bad state: %d",
    524 		    mutex, (int)mutex->wwm_state);
    525 	}
    526 	KASSERT(mutex->wwm_state == WW_CTX);
    527 	KASSERT(mutex->wwm_u.ctx != NULL);
    528 	/*
    529 	 * Owned by another party, of any priority.  Ask that party to
    530 	 * wake us when it's done.
    531 	 */
    532 	ww_mutex_lock_wait(mutex, ctx);
    533 locked:	KASSERT(mutex->wwm_state == WW_CTX);
    534 	KASSERT(mutex->wwm_u.ctx == ctx);
    535 	ctx->wwx_acquired++;
    536 	mutex_exit(&mutex->wwm_lock);
    537 }
    538 
    539 static inline int
    540 ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
    541     struct ww_acquire_ctx *ctx)
    542 {
    543 	int ret;
    544 
    545 	ASSERT_SLEEPABLE();
    546 
    547 	if (ctx == NULL)
    548 		return ww_mutex_lock_noctx_sig(mutex);
    549 
    550 	KASSERT(!ctx->wwx_acquire_done);
    551 
    552 	mutex_enter(&mutex->wwm_lock);
    553 retry:	switch (mutex->wwm_state) {
    554 	case WW_UNLOCKED:
    555 		mutex->wwm_state = WW_CTX;
    556 		mutex->wwm_u.ctx = ctx;
    557 		goto locked;
    558 	case WW_OWNED:
    559 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    560 		    "locking against myself: %p", curlwp);
    561 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
    562 		if (ret)
    563 			goto out;
    564 		goto retry;
    565 	case WW_CTX:
    566 		break;
    567 	case WW_WANTOWN:
    568 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
    569 		if (ret)
    570 			goto out;
    571 		goto retry;
    572 	default:
    573 		panic("wait/wound mutex %p in bad state: %d",
    574 		    mutex, (int)mutex->wwm_state);
    575 	}
    576 	KASSERT(mutex->wwm_state == WW_CTX);
    577 	KASSERT(mutex->wwm_u.ctx != NULL);
    578 	/*
    579 	 * Owned by another party, of any priority.  Ask that party to
    580 	 * wake us when it's done.
    581 	 */
    582 	ret = ww_mutex_lock_wait_sig(mutex, ctx);
    583 	if (ret)
    584 		goto out;
    585 locked:	KASSERT(mutex->wwm_state == WW_CTX);
    586 	KASSERT(mutex->wwm_u.ctx == ctx);
    587 	ctx->wwx_acquired++;
    588 	ret = 0;
    589 out:	mutex_exit(&mutex->wwm_lock);
    590 	return ret;
    591 }
    592 
    593 static inline int
    594 ww_mutex_trylock(struct ww_mutex *mutex)
    595 {
    596 	int ret;
    597 
    598 	mutex_enter(&mutex->wwm_lock);
    599 	if (mutex->wwm_state == WW_UNLOCKED) {
    600 		mutex->wwm_state = WW_OWNED;
    601 		mutex->wwm_u.owner = curlwp;
    602 		ret = 1;
    603 	} else {
    604 		ret = 0;
    605 	}
    606 	mutex_exit(&mutex->wwm_lock);
    607 
    608 	return ret;
    609 }
    610 
    611 static inline void
    612 ww_mutex_unlock(struct ww_mutex *mutex)
    613 {
    614 	struct ww_acquire_ctx *ctx;
    615 
    616 	mutex_enter(&mutex->wwm_lock);
    617 	KASSERT(mutex->wwm_state != WW_UNLOCKED);
    618 	switch (mutex->wwm_state) {
    619 	case WW_UNLOCKED:
    620 		panic("unlocking unlocked wait/wound mutex: %p", mutex);
    621 	case WW_OWNED:
    622 		/* Let the context lockers fight over it.  */
    623 		mutex->wwm_u.owner = NULL;
    624 		mutex->wwm_state = WW_UNLOCKED;
    625 		break;
    626 	case WW_CTX:
    627 		KASSERT(mutex->wwm_u.ctx != NULL);
    628 		mutex->wwm_u.ctx->wwx_acquired--;
    629 		mutex->wwm_u.ctx = NULL;
    630 		/*
    631 		 * If there are any waiters with contexts, grant the
    632 		 * lock to the highest-priority one.  Otherwise, just
    633 		 * unlock it.
    634 		 */
    635 		if ((ctx = RB_TREE_MIN(&mutex->wwm_waiters)) != NULL) {
    636 			mutex->wwm_state = WW_CTX;
    637 			mutex->wwm_u.ctx = ctx;
    638 		} else {
    639 			mutex->wwm_state = WW_UNLOCKED;
    640 		}
    641 		break;
    642 	case WW_WANTOWN:
    643 		/* Let the non-context lockers fight over it.  */
    644 		mutex->wwm_state = WW_UNLOCKED;
    645 		break;
    646 	}
    647 	cv_broadcast(&mutex->wwm_cv);
    648 	mutex_exit(&mutex->wwm_lock);
    649 }
    650 
    651 #endif  /* _ASM_WW_MUTEX_H_ */
    652