Home | History | Annotate | Line # | Download | only in linux
linux_ww_mutex.c revision 1.14.4.2
      1  1.14.4.2    martin /*	$NetBSD: linux_ww_mutex.c,v 1.14.4.2 2023/08/01 16:55:03 martin Exp $	*/
      2       1.1  riastrad 
      3       1.1  riastrad /*-
      4       1.1  riastrad  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5       1.1  riastrad  * All rights reserved.
      6       1.1  riastrad  *
      7       1.1  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8       1.1  riastrad  * by Taylor R. Campbell.
      9       1.1  riastrad  *
     10       1.1  riastrad  * Redistribution and use in source and binary forms, with or without
     11       1.1  riastrad  * modification, are permitted provided that the following conditions
     12       1.1  riastrad  * are met:
     13       1.1  riastrad  * 1. Redistributions of source code must retain the above copyright
     14       1.1  riastrad  *    notice, this list of conditions and the following disclaimer.
     15       1.1  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.1  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17       1.1  riastrad  *    documentation and/or other materials provided with the distribution.
     18       1.1  riastrad  *
     19       1.1  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20       1.1  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21       1.1  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22       1.1  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23       1.1  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24       1.1  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25       1.1  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26       1.1  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27       1.1  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28       1.1  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29       1.1  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30       1.1  riastrad  */
     31       1.1  riastrad 
     32       1.1  riastrad #include <sys/cdefs.h>
     33  1.14.4.2    martin __KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.14.4.2 2023/08/01 16:55:03 martin Exp $");
     34       1.1  riastrad 
     35       1.1  riastrad #include <sys/types.h>
     36       1.1  riastrad #include <sys/atomic.h>
     37       1.1  riastrad #include <sys/condvar.h>
     38       1.2  riastrad #include <sys/lockdebug.h>
     39       1.1  riastrad #include <sys/lwp.h>
     40       1.1  riastrad #include <sys/mutex.h>
     41       1.1  riastrad #include <sys/rbtree.h>
     42       1.1  riastrad 
     43       1.1  riastrad #include <linux/ww_mutex.h>
     44       1.6       mrg #include <linux/errno.h>
     45       1.1  riastrad 
     46       1.2  riastrad #define	WW_WANTLOCK(WW)							      \
     47       1.2  riastrad 	LOCKDEBUG_WANTLOCK((WW)->wwm_debug, (WW),			      \
     48       1.2  riastrad 	    (uintptr_t)__builtin_return_address(0), 0)
     49       1.2  riastrad #define	WW_LOCKED(WW)							      \
     50       1.2  riastrad 	LOCKDEBUG_LOCKED((WW)->wwm_debug, (WW), NULL,			      \
     51       1.2  riastrad 	    (uintptr_t)__builtin_return_address(0), 0)
     52       1.2  riastrad #define	WW_UNLOCKED(WW)							      \
     53       1.2  riastrad 	LOCKDEBUG_UNLOCKED((WW)->wwm_debug, (WW),			      \
     54       1.2  riastrad 	    (uintptr_t)__builtin_return_address(0), 0)
     55       1.2  riastrad 
     56       1.1  riastrad static int
     57       1.1  riastrad ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb)
     58       1.1  riastrad {
     59       1.1  riastrad 	const struct ww_acquire_ctx *const ctx_a = va;
     60       1.1  riastrad 	const struct ww_acquire_ctx *const ctx_b = vb;
     61       1.1  riastrad 
     62       1.1  riastrad 	if (ctx_a->wwx_ticket < ctx_b->wwx_ticket)
     63       1.1  riastrad 		return -1;
     64       1.1  riastrad 	if (ctx_a->wwx_ticket > ctx_b->wwx_ticket)
     65  1.14.4.2    martin 		return +1;
     66       1.1  riastrad 	return 0;
     67       1.1  riastrad }
     68       1.1  riastrad 
     69       1.1  riastrad static int
     70       1.1  riastrad ww_acquire_ctx_compare_key(void *cookie __unused, const void *vn,
     71       1.1  riastrad     const void *vk)
     72       1.1  riastrad {
     73       1.1  riastrad 	const struct ww_acquire_ctx *const ctx = vn;
     74       1.1  riastrad 	const uint64_t *const ticketp = vk, ticket = *ticketp;
     75       1.1  riastrad 
     76       1.1  riastrad 	if (ctx->wwx_ticket < ticket)
     77       1.1  riastrad 		return -1;
     78       1.1  riastrad 	if (ctx->wwx_ticket > ticket)
     79  1.14.4.2    martin 		return +1;
     80       1.1  riastrad 	return 0;
     81       1.1  riastrad }
     82       1.1  riastrad 
     83       1.1  riastrad static const rb_tree_ops_t ww_acquire_ctx_rb_ops = {
     84       1.1  riastrad 	.rbto_compare_nodes = &ww_acquire_ctx_compare,
     85       1.1  riastrad 	.rbto_compare_key = &ww_acquire_ctx_compare_key,
     86       1.1  riastrad 	.rbto_node_offset = offsetof(struct ww_acquire_ctx, wwx_rb_node),
     87       1.1  riastrad 	.rbto_context = NULL,
     88       1.1  riastrad };
     89       1.1  riastrad 
     90       1.1  riastrad void
     91       1.1  riastrad ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *class)
     92       1.1  riastrad {
     93       1.1  riastrad 
     94       1.1  riastrad 	ctx->wwx_class = class;
     95       1.1  riastrad 	ctx->wwx_owner = curlwp;
     96       1.5  riastrad 	ctx->wwx_ticket = atomic64_inc_return(&class->wwc_ticket);
     97       1.1  riastrad 	ctx->wwx_acquired = 0;
     98       1.1  riastrad 	ctx->wwx_acquire_done = false;
     99       1.1  riastrad }
    100       1.1  riastrad 
    101       1.1  riastrad void
    102       1.1  riastrad ww_acquire_done(struct ww_acquire_ctx *ctx)
    103       1.1  riastrad {
    104       1.1  riastrad 
    105       1.1  riastrad 	KASSERTMSG((ctx->wwx_owner == curlwp),
    106       1.1  riastrad 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
    107       1.1  riastrad 
    108       1.1  riastrad 	ctx->wwx_acquire_done = true;
    109       1.1  riastrad }
    110       1.1  riastrad 
    111      1.13  riastrad static void
    112      1.13  riastrad ww_acquire_done_check(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    113      1.13  riastrad {
    114      1.13  riastrad 
    115      1.13  riastrad 	/*
    116      1.13  riastrad 	 * If caller has invoked ww_acquire_done, we must already hold
    117      1.13  riastrad 	 * this mutex.
    118      1.13  riastrad 	 */
    119      1.13  riastrad 	KASSERT(mutex_owned(&mutex->wwm_lock));
    120      1.13  riastrad 	KASSERTMSG((!ctx->wwx_acquire_done ||
    121      1.13  riastrad 		(mutex->wwm_state == WW_CTX && mutex->wwm_u.ctx == ctx)),
    122      1.13  riastrad 	    "ctx %p done acquiring locks, refusing to acquire %p",
    123      1.13  riastrad 	    ctx, mutex);
    124      1.13  riastrad }
    125      1.13  riastrad 
    126       1.1  riastrad void
    127       1.1  riastrad ww_acquire_fini(struct ww_acquire_ctx *ctx)
    128       1.1  riastrad {
    129       1.1  riastrad 
    130       1.1  riastrad 	KASSERTMSG((ctx->wwx_owner == curlwp),
    131       1.1  riastrad 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
    132       1.1  riastrad 	KASSERTMSG((ctx->wwx_acquired == 0), "ctx %p still holds %u locks",
    133       1.1  riastrad 	    ctx, ctx->wwx_acquired);
    134       1.1  riastrad 
    135       1.1  riastrad 	ctx->wwx_acquired = ~0U;	/* Fail if called again. */
    136       1.1  riastrad 	ctx->wwx_owner = NULL;
    137       1.1  riastrad }
    138       1.1  riastrad 
    139       1.2  riastrad #ifdef LOCKDEBUG
    140       1.2  riastrad static void
    141       1.7     ozaki ww_dump(const volatile void *cookie, lockop_printer_t pr)
    142       1.2  riastrad {
    143       1.4  christos 	const volatile struct ww_mutex *mutex = cookie;
    144       1.2  riastrad 
    145       1.7     ozaki 	pr("%-13s: ", "state");
    146       1.2  riastrad 	switch (mutex->wwm_state) {
    147       1.2  riastrad 	case WW_UNLOCKED:
    148       1.7     ozaki 		pr("unlocked\n");
    149       1.2  riastrad 		break;
    150       1.2  riastrad 	case WW_OWNED:
    151       1.7     ozaki 		pr("owned by lwp\n");
    152       1.7     ozaki 		pr("%-13s: %p\n", "owner", mutex->wwm_u.owner);
    153       1.7     ozaki 		pr("%-13s: %s\n", "waiters",
    154       1.4  christos 		    cv_has_waiters((void *)(intptr_t)&mutex->wwm_cv)
    155       1.2  riastrad 			? "yes" : "no");
    156       1.2  riastrad 		break;
    157       1.2  riastrad 	case WW_CTX:
    158       1.7     ozaki 		pr("owned via ctx\n");
    159       1.7     ozaki 		pr("%-13s: %p\n", "context", mutex->wwm_u.ctx);
    160       1.7     ozaki 		pr("%-13s: %p\n", "lwp",
    161       1.2  riastrad 		    mutex->wwm_u.ctx->wwx_owner);
    162       1.7     ozaki 		pr("%-13s: %s\n", "waiters",
    163       1.4  christos 		    cv_has_waiters((void *)(intptr_t)&mutex->wwm_cv)
    164       1.2  riastrad 			? "yes" : "no");
    165       1.2  riastrad 		break;
    166       1.2  riastrad 	case WW_WANTOWN:
    167       1.7     ozaki 		pr("owned via ctx\n");
    168       1.7     ozaki 		pr("%-13s: %p\n", "context", mutex->wwm_u.ctx);
    169       1.7     ozaki 		pr("%-13s: %p\n", "lwp",
    170       1.2  riastrad 		    mutex->wwm_u.ctx->wwx_owner);
    171       1.7     ozaki 		pr("%-13s: %s\n", "waiters", "yes (noctx)");
    172       1.2  riastrad 		break;
    173       1.2  riastrad 	default:
    174       1.7     ozaki 		pr("unknown\n");
    175       1.2  riastrad 		break;
    176       1.2  riastrad 	}
    177       1.2  riastrad }
    178       1.2  riastrad 
    179       1.2  riastrad static lockops_t ww_lockops = {
    180       1.2  riastrad 	.lo_name = "Wait/wound mutex",
    181       1.2  riastrad 	.lo_type = LOCKOPS_SLEEP,
    182       1.2  riastrad 	.lo_dump = ww_dump,
    183       1.2  riastrad };
    184       1.2  riastrad #endif
    185       1.2  riastrad 
    186      1.10  riastrad /*
    187      1.10  riastrad  * ww_mutex_init(mutex, class)
    188      1.10  riastrad  *
    189      1.10  riastrad  *	Initialize mutex in the given class.  Must precede any other
    190      1.10  riastrad  *	ww_mutex_* operations.  After done, mutex must be destroyed
    191      1.10  riastrad  *	with ww_mutex_destroy.
    192      1.10  riastrad  */
    193       1.1  riastrad void
    194       1.1  riastrad ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
    195       1.1  riastrad {
    196       1.1  riastrad 
    197       1.1  riastrad 	/*
    198       1.1  riastrad 	 * XXX Apparently Linux takes these with spin locks held.  That
    199       1.1  riastrad 	 * strikes me as a bad idea, but so it is...
    200       1.1  riastrad 	 */
    201       1.1  riastrad 	mutex_init(&mutex->wwm_lock, MUTEX_DEFAULT, IPL_VM);
    202       1.1  riastrad 	mutex->wwm_state = WW_UNLOCKED;
    203       1.1  riastrad 	mutex->wwm_class = class;
    204       1.1  riastrad 	rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
    205       1.1  riastrad 	cv_init(&mutex->wwm_cv, "linuxwwm");
    206       1.2  riastrad #ifdef LOCKDEBUG
    207       1.2  riastrad 	mutex->wwm_debug = LOCKDEBUG_ALLOC(mutex, &ww_lockops,
    208       1.2  riastrad 	    (uintptr_t)__builtin_return_address(0));
    209       1.2  riastrad #endif
    210       1.1  riastrad }
    211       1.1  riastrad 
    212      1.10  riastrad /*
    213      1.10  riastrad  * ww_mutex_destroy(mutex)
    214      1.10  riastrad  *
    215      1.10  riastrad  *	Destroy mutex initialized by ww_mutex_init.  Caller must not be
    216      1.10  riastrad  *	with any other ww_mutex_* operations except after
    217      1.10  riastrad  *	reinitializing with ww_mutex_init.
    218      1.10  riastrad  */
    219       1.1  riastrad void
    220       1.1  riastrad ww_mutex_destroy(struct ww_mutex *mutex)
    221       1.1  riastrad {
    222       1.1  riastrad 
    223       1.2  riastrad 	KASSERT(mutex->wwm_state == WW_UNLOCKED);
    224       1.2  riastrad 
    225       1.2  riastrad #ifdef LOCKDEBUG
    226       1.2  riastrad 	LOCKDEBUG_FREE(mutex->wwm_debug, mutex);
    227       1.2  riastrad #endif
    228       1.1  riastrad 	cv_destroy(&mutex->wwm_cv);
    229       1.1  riastrad #if 0
    230       1.1  riastrad 	rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
    231       1.1  riastrad #endif
    232       1.1  riastrad 	KASSERT(mutex->wwm_state == WW_UNLOCKED);
    233       1.1  riastrad 	mutex_destroy(&mutex->wwm_lock);
    234       1.1  riastrad }
    235       1.1  riastrad 
    236       1.1  riastrad /*
    237      1.10  riastrad  * ww_mutex_is_locked(mutex)
    238      1.10  riastrad  *
    239      1.10  riastrad  *	True if anyone holds mutex locked at the moment, false if not.
    240      1.10  riastrad  *	Answer is stale as soon returned unless mutex is held by
    241      1.10  riastrad  *	caller.
    242      1.10  riastrad  *
    243      1.10  riastrad  *	XXX WARNING: This returns true if it is locked by ANYONE.  Does
    244      1.10  riastrad  *	not mean `Do I hold this lock?' (answering which really
    245      1.10  riastrad  *	requires an acquire context).
    246       1.1  riastrad  */
    247       1.1  riastrad bool
    248       1.1  riastrad ww_mutex_is_locked(struct ww_mutex *mutex)
    249       1.1  riastrad {
    250       1.1  riastrad 	int locked;
    251       1.1  riastrad 
    252       1.1  riastrad 	mutex_enter(&mutex->wwm_lock);
    253       1.1  riastrad 	switch (mutex->wwm_state) {
    254       1.1  riastrad 	case WW_UNLOCKED:
    255       1.1  riastrad 		locked = false;
    256       1.1  riastrad 		break;
    257       1.1  riastrad 	case WW_OWNED:
    258       1.1  riastrad 	case WW_CTX:
    259       1.1  riastrad 	case WW_WANTOWN:
    260       1.1  riastrad 		locked = true;
    261       1.1  riastrad 		break;
    262       1.1  riastrad 	default:
    263       1.1  riastrad 		panic("wait/wound mutex %p in bad state: %d", mutex,
    264       1.1  riastrad 		    (int)mutex->wwm_state);
    265       1.1  riastrad 	}
    266       1.1  riastrad 	mutex_exit(&mutex->wwm_lock);
    267       1.1  riastrad 
    268       1.1  riastrad 	return locked;
    269       1.1  riastrad }
    270       1.1  riastrad 
    271      1.10  riastrad /*
    272      1.10  riastrad  * ww_mutex_state_wait(mutex, state)
    273      1.10  riastrad  *
    274      1.10  riastrad  *	Wait for mutex, which must be in the given state, to transition
    275      1.10  riastrad  *	to another state.  Uninterruptible; never fails.
    276      1.10  riastrad  *
    277      1.10  riastrad  *	Caller must hold mutex's internal lock.
    278      1.10  riastrad  *
    279      1.10  riastrad  *	May sleep.
    280      1.10  riastrad  *
    281      1.10  riastrad  *	Internal subroutine.
    282      1.10  riastrad  */
    283       1.1  riastrad static void
    284       1.1  riastrad ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
    285       1.1  riastrad {
    286       1.1  riastrad 
    287      1.10  riastrad 	KASSERT(mutex_owned(&mutex->wwm_lock));
    288       1.1  riastrad 	KASSERT(mutex->wwm_state == state);
    289  1.14.4.1    martin 
    290  1.14.4.1    martin 	for (;;) {
    291  1.14.4.1    martin 		cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
    292  1.14.4.1    martin 		if (mutex->wwm_state != state)
    293  1.14.4.1    martin 			break;
    294  1.14.4.1    martin 	}
    295  1.14.4.1    martin 
    296  1.14.4.1    martin 	KASSERT(mutex->wwm_state != state);
    297       1.1  riastrad }
    298       1.1  riastrad 
    299      1.10  riastrad /*
    300      1.10  riastrad  * ww_mutex_state_wait_sig(mutex, state)
    301      1.10  riastrad  *
    302      1.10  riastrad  *	Wait for mutex, which must be in the given state, to transition
    303      1.10  riastrad  *	to another state, or fail if interrupted by a signal.  Return 0
    304      1.10  riastrad  *	on success, -EINTR if interrupted by a signal.
    305      1.10  riastrad  *
    306      1.10  riastrad  *	Caller must hold mutex's internal lock.
    307      1.10  riastrad  *
    308      1.10  riastrad  *	May sleep.
    309      1.10  riastrad  *
    310      1.10  riastrad  *	Internal subroutine.
    311      1.10  riastrad  */
    312       1.1  riastrad static int
    313       1.1  riastrad ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
    314       1.1  riastrad {
    315       1.1  riastrad 	int ret;
    316       1.1  riastrad 
    317      1.10  riastrad 	KASSERT(mutex_owned(&mutex->wwm_lock));
    318       1.1  riastrad 	KASSERT(mutex->wwm_state == state);
    319  1.14.4.1    martin 
    320  1.14.4.1    martin 	for (;;) {
    321       1.1  riastrad 		/* XXX errno NetBSD->Linux */
    322       1.1  riastrad 		ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
    323  1.14.4.1    martin 		if (mutex->wwm_state != state) {
    324  1.14.4.1    martin 			ret = 0;
    325  1.14.4.1    martin 			break;
    326  1.14.4.1    martin 		}
    327      1.10  riastrad 		if (ret) {
    328      1.10  riastrad 			KASSERTMSG((ret == -EINTR || ret == -ERESTART),
    329      1.10  riastrad 			    "ret=%d", ret);
    330      1.10  riastrad 			ret = -EINTR;
    331       1.1  riastrad 			break;
    332      1.10  riastrad 		}
    333  1.14.4.1    martin 	}
    334       1.1  riastrad 
    335      1.10  riastrad 	KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
    336  1.14.4.1    martin 	KASSERTMSG(ret != 0 || mutex->wwm_state != state,
    337  1.14.4.1    martin 	    "ret=%d mutex=%p mutex->wwm_state=%d state=%d",
    338  1.14.4.1    martin 	    ret, mutex, mutex->wwm_state, state);
    339       1.1  riastrad 	return ret;
    340       1.1  riastrad }
    341       1.1  riastrad 
    342      1.10  riastrad /*
    343      1.10  riastrad  * ww_mutex_lock_wait(mutex, ctx)
    344      1.10  riastrad  *
    345      1.10  riastrad  *	With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
    346      1.10  riastrad  *	by another thread with an acquire context, wait to acquire
    347      1.10  riastrad  *	mutex.  While waiting, record ctx in the tree of waiters.  Does
    348      1.10  riastrad  *	not update the mutex state otherwise.
    349      1.10  riastrad  *
    350      1.10  riastrad  *	Caller must not already hold mutex.  Caller must hold mutex's
    351      1.10  riastrad  *	internal lock.  Uninterruptible; never fails.
    352      1.10  riastrad  *
    353      1.10  riastrad  *	May sleep.
    354      1.10  riastrad  *
    355      1.10  riastrad  *	Internal subroutine.
    356      1.10  riastrad  */
    357       1.1  riastrad static void
    358       1.1  riastrad ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    359       1.1  riastrad {
    360       1.1  riastrad 	struct ww_acquire_ctx *collision __diagused;
    361       1.1  riastrad 
    362       1.1  riastrad 	KASSERT(mutex_owned(&mutex->wwm_lock));
    363       1.1  riastrad 
    364       1.1  riastrad 	KASSERT((mutex->wwm_state == WW_CTX) ||
    365       1.1  riastrad 	    (mutex->wwm_state == WW_WANTOWN));
    366       1.1  riastrad 	KASSERT(mutex->wwm_u.ctx != ctx);
    367       1.1  riastrad 	KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
    368       1.1  riastrad 	    "ww mutex class mismatch: %p != %p",
    369       1.1  riastrad 	    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
    370       1.1  riastrad 	KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
    371       1.1  riastrad 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
    372       1.1  riastrad 	    ctx->wwx_ticket, ctx,
    373       1.1  riastrad 	    mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
    374       1.1  riastrad 
    375       1.1  riastrad 	collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
    376       1.1  riastrad 	KASSERTMSG((collision == ctx),
    377       1.1  riastrad 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
    378       1.1  riastrad 	    ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
    379       1.1  riastrad 
    380  1.14.4.1    martin 	for (;;) {
    381  1.14.4.1    martin 		cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
    382  1.14.4.1    martin 		if ((mutex->wwm_state == WW_CTX ||
    383  1.14.4.1    martin 			mutex->wwm_state == WW_WANTOWN) &&
    384  1.14.4.1    martin 		    mutex->wwm_u.ctx == ctx)
    385  1.14.4.1    martin 			break;
    386  1.14.4.1    martin 	}
    387       1.1  riastrad 
    388       1.1  riastrad 	rb_tree_remove_node(&mutex->wwm_waiters, ctx);
    389  1.14.4.1    martin 
    390  1.14.4.1    martin 	KASSERT(mutex->wwm_state == WW_CTX || mutex->wwm_state == WW_WANTOWN);
    391  1.14.4.1    martin 	KASSERT(mutex->wwm_u.ctx == ctx);
    392       1.1  riastrad }
    393       1.1  riastrad 
    394      1.10  riastrad /*
    395      1.10  riastrad  * ww_mutex_lock_wait_sig(mutex, ctx)
    396      1.10  riastrad  *
    397      1.10  riastrad  *	With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
    398      1.10  riastrad  *	by another thread with an acquire context, wait to acquire
    399      1.10  riastrad  *	mutex and return 0, or return -EINTR if interrupted by a
    400      1.10  riastrad  *	signal.  While waiting, record ctx in the tree of waiters.
    401      1.10  riastrad  *	Does not update the mutex state otherwise.
    402      1.10  riastrad  *
    403      1.10  riastrad  *	Caller must not already hold mutex.  Caller must hold mutex's
    404      1.10  riastrad  *	internal lock.
    405      1.10  riastrad  *
    406      1.10  riastrad  *	May sleep.
    407      1.10  riastrad  *
    408      1.10  riastrad  *	Internal subroutine.
    409      1.10  riastrad  */
    410       1.1  riastrad static int
    411       1.1  riastrad ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    412       1.1  riastrad {
    413       1.1  riastrad 	struct ww_acquire_ctx *collision __diagused;
    414       1.1  riastrad 	int ret;
    415       1.1  riastrad 
    416       1.1  riastrad 	KASSERT(mutex_owned(&mutex->wwm_lock));
    417       1.1  riastrad 
    418       1.1  riastrad 	KASSERT((mutex->wwm_state == WW_CTX) ||
    419       1.1  riastrad 	    (mutex->wwm_state == WW_WANTOWN));
    420       1.1  riastrad 	KASSERT(mutex->wwm_u.ctx != ctx);
    421       1.1  riastrad 	KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
    422       1.1  riastrad 	    "ww mutex class mismatch: %p != %p",
    423       1.1  riastrad 	    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
    424       1.1  riastrad 	KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
    425       1.1  riastrad 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
    426       1.1  riastrad 	    ctx->wwx_ticket, ctx,
    427       1.1  riastrad 	    mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
    428       1.1  riastrad 
    429       1.1  riastrad 	collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
    430       1.1  riastrad 	KASSERTMSG((collision == ctx),
    431       1.1  riastrad 	    "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
    432       1.1  riastrad 	    ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
    433       1.1  riastrad 
    434  1.14.4.1    martin 	for (;;) {
    435       1.1  riastrad 		/* XXX errno NetBSD->Linux */
    436       1.1  riastrad 		ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
    437  1.14.4.1    martin 		if ((mutex->wwm_state == WW_CTX ||
    438  1.14.4.1    martin 			mutex->wwm_state == WW_WANTOWN) &&
    439  1.14.4.1    martin 		    mutex->wwm_u.ctx == ctx) {
    440  1.14.4.1    martin 			ret = 0;
    441  1.14.4.1    martin 			break;
    442  1.14.4.1    martin 		}
    443      1.10  riastrad 		if (ret) {
    444      1.10  riastrad 			KASSERTMSG((ret == -EINTR || ret == -ERESTART),
    445      1.10  riastrad 			    "ret=%d", ret);
    446      1.10  riastrad 			ret = -EINTR;
    447  1.14.4.1    martin 			break;
    448      1.10  riastrad 		}
    449  1.14.4.1    martin 	}
    450  1.14.4.1    martin 
    451  1.14.4.1    martin 	rb_tree_remove_node(&mutex->wwm_waiters, ctx);
    452       1.1  riastrad 
    453      1.10  riastrad 	KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
    454  1.14.4.1    martin 	KASSERT(ret != 0 ||
    455  1.14.4.1    martin 	    mutex->wwm_state == WW_CTX || mutex->wwm_state == WW_WANTOWN);
    456  1.14.4.1    martin 	KASSERT(ret != 0 || mutex->wwm_u.ctx == ctx);
    457       1.1  riastrad 	return ret;
    458       1.1  riastrad }
    459       1.1  riastrad 
    460      1.10  riastrad /*
    461      1.10  riastrad  * ww_mutex_lock_noctx(mutex)
    462      1.10  riastrad  *
    463      1.10  riastrad  *	Acquire mutex without an acquire context.  Caller must not
    464      1.10  riastrad  *	already hold the mutex.  Uninterruptible; never fails.
    465      1.10  riastrad  *
    466      1.10  riastrad  *	May sleep.
    467      1.10  riastrad  *
    468      1.10  riastrad  *	Internal subroutine, implementing ww_mutex_lock(..., NULL).
    469      1.10  riastrad  */
    470       1.1  riastrad static void
    471       1.1  riastrad ww_mutex_lock_noctx(struct ww_mutex *mutex)
    472       1.1  riastrad {
    473       1.1  riastrad 
    474       1.1  riastrad 	mutex_enter(&mutex->wwm_lock);
    475       1.1  riastrad retry:	switch (mutex->wwm_state) {
    476       1.1  riastrad 	case WW_UNLOCKED:
    477       1.1  riastrad 		mutex->wwm_state = WW_OWNED;
    478       1.1  riastrad 		mutex->wwm_u.owner = curlwp;
    479       1.1  riastrad 		break;
    480       1.1  riastrad 	case WW_OWNED:
    481       1.1  riastrad 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    482       1.1  riastrad 		    "locking %p against myself: %p", mutex, curlwp);
    483       1.1  riastrad 		ww_mutex_state_wait(mutex, WW_OWNED);
    484       1.1  riastrad 		goto retry;
    485       1.1  riastrad 	case WW_CTX:
    486       1.1  riastrad 		KASSERT(mutex->wwm_u.ctx != NULL);
    487       1.1  riastrad 		mutex->wwm_state = WW_WANTOWN;
    488       1.1  riastrad 		/* FALLTHROUGH */
    489       1.1  riastrad 	case WW_WANTOWN:
    490       1.1  riastrad 		KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
    491       1.1  riastrad 		    "locking %p against myself: %p", mutex, curlwp);
    492       1.1  riastrad 		ww_mutex_state_wait(mutex, WW_WANTOWN);
    493       1.1  riastrad 		goto retry;
    494       1.1  riastrad 	default:
    495       1.1  riastrad 		panic("wait/wound mutex %p in bad state: %d",
    496       1.1  riastrad 		    mutex, (int)mutex->wwm_state);
    497       1.1  riastrad 	}
    498       1.1  riastrad 	KASSERT(mutex->wwm_state == WW_OWNED);
    499       1.1  riastrad 	KASSERT(mutex->wwm_u.owner == curlwp);
    500       1.3  riastrad 	WW_LOCKED(mutex);
    501       1.1  riastrad 	mutex_exit(&mutex->wwm_lock);
    502       1.1  riastrad }
    503       1.1  riastrad 
    504      1.10  riastrad /*
    505      1.10  riastrad  * ww_mutex_lock_noctx_sig(mutex)
    506      1.10  riastrad  *
    507      1.10  riastrad  *	Acquire mutex without an acquire context and return 0, or fail
    508      1.10  riastrad  *	and return -EINTR if interrupted by a signal.  Caller must not
    509      1.10  riastrad  *	already hold the mutex.
    510      1.10  riastrad  *
    511      1.10  riastrad  *	May sleep.
    512      1.10  riastrad  *
    513      1.10  riastrad  *	Internal subroutine, implementing
    514      1.10  riastrad  *	ww_mutex_lock_interruptible(..., NULL).
    515      1.10  riastrad  */
    516       1.1  riastrad static int
    517       1.1  riastrad ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
    518       1.1  riastrad {
    519       1.1  riastrad 	int ret;
    520       1.1  riastrad 
    521       1.1  riastrad 	mutex_enter(&mutex->wwm_lock);
    522       1.1  riastrad retry:	switch (mutex->wwm_state) {
    523       1.1  riastrad 	case WW_UNLOCKED:
    524       1.1  riastrad 		mutex->wwm_state = WW_OWNED;
    525       1.1  riastrad 		mutex->wwm_u.owner = curlwp;
    526       1.1  riastrad 		break;
    527       1.1  riastrad 	case WW_OWNED:
    528       1.1  riastrad 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    529       1.1  riastrad 		    "locking %p against myself: %p", mutex, curlwp);
    530       1.1  riastrad 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
    531      1.10  riastrad 		if (ret) {
    532      1.10  riastrad 			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
    533       1.1  riastrad 			goto out;
    534      1.10  riastrad 		}
    535       1.1  riastrad 		goto retry;
    536       1.1  riastrad 	case WW_CTX:
    537       1.1  riastrad 		KASSERT(mutex->wwm_u.ctx != NULL);
    538       1.1  riastrad 		mutex->wwm_state = WW_WANTOWN;
    539       1.1  riastrad 		/* FALLTHROUGH */
    540       1.1  riastrad 	case WW_WANTOWN:
    541       1.1  riastrad 		KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
    542       1.1  riastrad 		    "locking %p against myself: %p", mutex, curlwp);
    543       1.1  riastrad 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
    544      1.10  riastrad 		if (ret) {
    545      1.10  riastrad 			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
    546       1.1  riastrad 			goto out;
    547      1.10  riastrad 		}
    548       1.1  riastrad 		goto retry;
    549       1.1  riastrad 	default:
    550       1.1  riastrad 		panic("wait/wound mutex %p in bad state: %d",
    551       1.1  riastrad 		    mutex, (int)mutex->wwm_state);
    552       1.1  riastrad 	}
    553       1.1  riastrad 	KASSERT(mutex->wwm_state == WW_OWNED);
    554       1.1  riastrad 	KASSERT(mutex->wwm_u.owner == curlwp);
    555       1.3  riastrad 	WW_LOCKED(mutex);
    556       1.1  riastrad 	ret = 0;
    557       1.1  riastrad out:	mutex_exit(&mutex->wwm_lock);
    558      1.10  riastrad 	KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
    559       1.1  riastrad 	return ret;
    560       1.1  riastrad }
    561       1.1  riastrad 
    562      1.10  riastrad /*
    563      1.10  riastrad  * ww_mutex_lock(mutex, ctx)
    564      1.10  riastrad  *
    565      1.10  riastrad  *	Lock the mutex and return 0, or fail if impossible.
    566      1.10  riastrad  *
    567      1.10  riastrad  *	- If ctx is null, caller must not hold mutex, and ww_mutex_lock
    568      1.10  riastrad  *	  always succeeds and returns 0.
    569      1.10  riastrad  *
    570      1.10  riastrad  *	- If ctx is nonnull, then:
    571      1.10  riastrad  *	  . Fail with -EALREADY if caller already holds mutex.
    572      1.10  riastrad  *	  . Fail with -EDEADLK if someone else holds mutex but there is
    573      1.10  riastrad  *	    a cycle.
    574      1.10  riastrad  *
    575      1.10  riastrad  *	May sleep.
    576      1.10  riastrad  */
    577       1.1  riastrad int
    578       1.1  riastrad ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    579       1.1  riastrad {
    580      1.10  riastrad 	int ret;
    581       1.1  riastrad 
    582       1.2  riastrad 	/*
    583       1.2  riastrad 	 * We do not WW_WANTLOCK at the beginning because we may
    584       1.2  riastrad 	 * correctly already hold it, if we have a context, in which
    585       1.2  riastrad 	 * case we must return EALREADY to the caller.
    586       1.2  riastrad 	 */
    587       1.1  riastrad 	ASSERT_SLEEPABLE();
    588       1.1  riastrad 
    589       1.1  riastrad 	if (ctx == NULL) {
    590       1.2  riastrad 		WW_WANTLOCK(mutex);
    591       1.1  riastrad 		ww_mutex_lock_noctx(mutex);
    592      1.10  riastrad 		ret = 0;
    593      1.10  riastrad 		goto out;
    594       1.1  riastrad 	}
    595       1.1  riastrad 
    596       1.1  riastrad 	KASSERTMSG((ctx->wwx_owner == curlwp),
    597       1.1  riastrad 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
    598       1.1  riastrad 	KASSERTMSG((ctx->wwx_acquired != ~0U),
    599       1.1  riastrad 	    "ctx %p finished, can't be used any more", ctx);
    600       1.1  riastrad 	KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
    601       1.1  riastrad 	    "ctx %p in class %p, mutex %p in class %p",
    602       1.1  riastrad 	    ctx, ctx->wwx_class, mutex, mutex->wwm_class);
    603       1.1  riastrad 
    604       1.1  riastrad 	mutex_enter(&mutex->wwm_lock);
    605      1.13  riastrad 	ww_acquire_done_check(mutex, ctx);
    606       1.1  riastrad retry:	switch (mutex->wwm_state) {
    607       1.1  riastrad 	case WW_UNLOCKED:
    608       1.2  riastrad 		WW_WANTLOCK(mutex);
    609       1.1  riastrad 		mutex->wwm_state = WW_CTX;
    610       1.1  riastrad 		mutex->wwm_u.ctx = ctx;
    611       1.1  riastrad 		goto locked;
    612       1.1  riastrad 	case WW_OWNED:
    613       1.2  riastrad 		WW_WANTLOCK(mutex);
    614       1.1  riastrad 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    615       1.1  riastrad 		    "locking %p against myself: %p", mutex, curlwp);
    616       1.1  riastrad 		ww_mutex_state_wait(mutex, WW_OWNED);
    617       1.1  riastrad 		goto retry;
    618       1.1  riastrad 	case WW_CTX:
    619       1.1  riastrad 		break;
    620       1.1  riastrad 	case WW_WANTOWN:
    621       1.1  riastrad 		ww_mutex_state_wait(mutex, WW_WANTOWN);
    622       1.1  riastrad 		goto retry;
    623       1.1  riastrad 	default:
    624       1.1  riastrad 		panic("wait/wound mutex %p in bad state: %d",
    625       1.1  riastrad 		    mutex, (int)mutex->wwm_state);
    626       1.1  riastrad 	}
    627       1.2  riastrad 
    628       1.1  riastrad 	KASSERT(mutex->wwm_state == WW_CTX);
    629       1.1  riastrad 	KASSERT(mutex->wwm_u.ctx != NULL);
    630       1.1  riastrad 	KASSERT((mutex->wwm_u.ctx == ctx) ||
    631       1.1  riastrad 	    (mutex->wwm_u.ctx->wwx_owner != curlwp));
    632       1.2  riastrad 
    633       1.1  riastrad 	if (mutex->wwm_u.ctx == ctx) {
    634       1.1  riastrad 		/*
    635       1.1  riastrad 		 * We already own it.  Yes, this can happen correctly
    636       1.1  riastrad 		 * for objects whose locking order is determined by
    637       1.1  riastrad 		 * userland.
    638       1.1  riastrad 		 */
    639      1.10  riastrad 		ret = -EALREADY;
    640      1.10  riastrad 		goto out_unlock;
    641       1.2  riastrad 	}
    642       1.2  riastrad 
    643       1.2  riastrad 	/*
    644       1.2  riastrad 	 * We do not own it.  We can safely assert to LOCKDEBUG that we
    645       1.2  riastrad 	 * want it.
    646       1.2  riastrad 	 */
    647       1.2  riastrad 	WW_WANTLOCK(mutex);
    648       1.2  riastrad 
    649       1.2  riastrad 	if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
    650       1.1  riastrad 		/*
    651       1.1  riastrad 		 * Owned by a higher-priority party.  Tell the caller
    652       1.1  riastrad 		 * to unlock everything and start over.
    653       1.1  riastrad 		 */
    654       1.1  riastrad 		KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
    655       1.1  riastrad 		    "ww mutex class mismatch: %p != %p",
    656       1.1  riastrad 		    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
    657      1.10  riastrad 		ret = -EDEADLK;
    658      1.10  riastrad 		goto out_unlock;
    659       1.1  riastrad 	}
    660       1.2  riastrad 
    661       1.2  riastrad 	/*
    662       1.2  riastrad 	 * Owned by a lower-priority party.  Ask that party to wake us
    663       1.2  riastrad 	 * when it is done or it realizes it needs to back off.
    664       1.2  riastrad 	 */
    665       1.2  riastrad 	ww_mutex_lock_wait(mutex, ctx);
    666       1.2  riastrad 
    667       1.3  riastrad locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
    668       1.1  riastrad 	    (mutex->wwm_state == WW_WANTOWN));
    669       1.1  riastrad 	KASSERT(mutex->wwm_u.ctx == ctx);
    670       1.3  riastrad 	WW_LOCKED(mutex);
    671       1.3  riastrad 	ctx->wwx_acquired++;
    672      1.10  riastrad 	ret = 0;
    673      1.10  riastrad out_unlock:
    674       1.1  riastrad 	mutex_exit(&mutex->wwm_lock);
    675      1.10  riastrad out:	KASSERTMSG((ret == 0 || ret == -EALREADY || ret == -EDEADLK),
    676      1.10  riastrad 	    "ret=%d", ret);
    677      1.10  riastrad 	return ret;
    678       1.1  riastrad }
    679       1.1  riastrad 
    680      1.10  riastrad /*
    681      1.10  riastrad  * ww_mutex_lock_interruptible(mutex, ctx)
    682      1.10  riastrad  *
    683      1.10  riastrad  *	Lock the mutex and return 0, or fail if impossible or
    684      1.10  riastrad  *	interrupted.
    685      1.10  riastrad  *
    686      1.10  riastrad  *	- If ctx is null, caller must not hold mutex, and ww_mutex_lock
    687      1.10  riastrad  *	  always succeeds and returns 0.
    688      1.10  riastrad  *
    689      1.10  riastrad  *	- If ctx is nonnull, then:
    690      1.10  riastrad  *	  . Fail with -EALREADY if caller already holds mutex.
    691      1.10  riastrad  *	  . Fail with -EDEADLK if someone else holds mutex but there is
    692      1.10  riastrad  *	    a cycle.
    693      1.10  riastrad  *	  . Fail with -EINTR if interrupted by a signal.
    694      1.10  riastrad  *
    695      1.10  riastrad  *	May sleep.
    696      1.10  riastrad  */
    697       1.1  riastrad int
    698       1.1  riastrad ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    699       1.1  riastrad {
    700       1.1  riastrad 	int ret;
    701       1.1  riastrad 
    702       1.2  riastrad 	/*
    703       1.2  riastrad 	 * We do not WW_WANTLOCK at the beginning because we may
    704       1.2  riastrad 	 * correctly already hold it, if we have a context, in which
    705       1.2  riastrad 	 * case we must return EALREADY to the caller.
    706       1.2  riastrad 	 */
    707       1.1  riastrad 	ASSERT_SLEEPABLE();
    708       1.1  riastrad 
    709       1.2  riastrad 	if (ctx == NULL) {
    710       1.2  riastrad 		WW_WANTLOCK(mutex);
    711      1.10  riastrad 		ret = ww_mutex_lock_noctx_sig(mutex);
    712      1.10  riastrad 		KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
    713      1.10  riastrad 		goto out;
    714       1.2  riastrad 	}
    715       1.1  riastrad 
    716       1.1  riastrad 	KASSERTMSG((ctx->wwx_owner == curlwp),
    717       1.1  riastrad 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
    718       1.1  riastrad 	KASSERTMSG((ctx->wwx_acquired != ~0U),
    719       1.1  riastrad 	    "ctx %p finished, can't be used any more", ctx);
    720       1.1  riastrad 	KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
    721       1.1  riastrad 	    "ctx %p in class %p, mutex %p in class %p",
    722       1.1  riastrad 	    ctx, ctx->wwx_class, mutex, mutex->wwm_class);
    723       1.1  riastrad 
    724       1.1  riastrad 	mutex_enter(&mutex->wwm_lock);
    725      1.13  riastrad 	ww_acquire_done_check(mutex, ctx);
    726       1.1  riastrad retry:	switch (mutex->wwm_state) {
    727       1.1  riastrad 	case WW_UNLOCKED:
    728       1.2  riastrad 		WW_WANTLOCK(mutex);
    729       1.1  riastrad 		mutex->wwm_state = WW_CTX;
    730       1.1  riastrad 		mutex->wwm_u.ctx = ctx;
    731       1.1  riastrad 		goto locked;
    732       1.1  riastrad 	case WW_OWNED:
    733       1.2  riastrad 		WW_WANTLOCK(mutex);
    734       1.1  riastrad 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    735       1.1  riastrad 		    "locking %p against myself: %p", mutex, curlwp);
    736       1.1  riastrad 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
    737      1.10  riastrad 		if (ret) {
    738      1.10  riastrad 			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
    739      1.10  riastrad 			goto out_unlock;
    740      1.10  riastrad 		}
    741       1.1  riastrad 		goto retry;
    742       1.1  riastrad 	case WW_CTX:
    743       1.1  riastrad 		break;
    744       1.1  riastrad 	case WW_WANTOWN:
    745       1.1  riastrad 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
    746      1.10  riastrad 		if (ret) {
    747      1.10  riastrad 			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
    748      1.10  riastrad 			goto out_unlock;
    749      1.10  riastrad 		}
    750       1.1  riastrad 		goto retry;
    751       1.1  riastrad 	default:
    752       1.1  riastrad 		panic("wait/wound mutex %p in bad state: %d",
    753       1.1  riastrad 		    mutex, (int)mutex->wwm_state);
    754       1.1  riastrad 	}
    755       1.2  riastrad 
    756       1.1  riastrad 	KASSERT(mutex->wwm_state == WW_CTX);
    757       1.1  riastrad 	KASSERT(mutex->wwm_u.ctx != NULL);
    758       1.1  riastrad 	KASSERT((mutex->wwm_u.ctx == ctx) ||
    759       1.1  riastrad 	    (mutex->wwm_u.ctx->wwx_owner != curlwp));
    760       1.2  riastrad 
    761       1.1  riastrad 	if (mutex->wwm_u.ctx == ctx) {
    762       1.1  riastrad 		/*
    763       1.1  riastrad 		 * We already own it.  Yes, this can happen correctly
    764       1.1  riastrad 		 * for objects whose locking order is determined by
    765       1.1  riastrad 		 * userland.
    766       1.1  riastrad 		 */
    767      1.10  riastrad 		ret = -EALREADY;
    768      1.10  riastrad 		goto out_unlock;
    769       1.2  riastrad 	}
    770       1.2  riastrad 
    771       1.2  riastrad 	/*
    772       1.2  riastrad 	 * We do not own it.  We can safely assert to LOCKDEBUG that we
    773       1.2  riastrad 	 * want it.
    774       1.2  riastrad 	 */
    775       1.2  riastrad 	WW_WANTLOCK(mutex);
    776       1.2  riastrad 
    777       1.2  riastrad 	if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
    778       1.1  riastrad 		/*
    779       1.1  riastrad 		 * Owned by a higher-priority party.  Tell the caller
    780       1.1  riastrad 		 * to unlock everything and start over.
    781       1.1  riastrad 		 */
    782       1.1  riastrad 		KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
    783       1.1  riastrad 		    "ww mutex class mismatch: %p != %p",
    784       1.1  riastrad 		    ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
    785      1.10  riastrad 		ret = -EDEADLK;
    786      1.10  riastrad 		goto out_unlock;
    787       1.1  riastrad 	}
    788       1.2  riastrad 
    789       1.2  riastrad 	/*
    790       1.2  riastrad 	 * Owned by a lower-priority party.  Ask that party to wake us
    791       1.2  riastrad 	 * when it is done or it realizes it needs to back off.
    792       1.2  riastrad 	 */
    793       1.2  riastrad 	ret = ww_mutex_lock_wait_sig(mutex, ctx);
    794      1.10  riastrad 	if (ret) {
    795      1.10  riastrad 		KASSERTMSG(ret == -EINTR, "ret=%d", ret);
    796      1.10  riastrad 		goto out_unlock;
    797      1.10  riastrad 	}
    798       1.2  riastrad 
    799       1.1  riastrad locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
    800       1.1  riastrad 	    (mutex->wwm_state == WW_WANTOWN));
    801       1.1  riastrad 	KASSERT(mutex->wwm_u.ctx == ctx);
    802       1.3  riastrad 	WW_LOCKED(mutex);
    803       1.1  riastrad 	ctx->wwx_acquired++;
    804       1.1  riastrad 	ret = 0;
    805      1.10  riastrad out_unlock:
    806      1.10  riastrad 	mutex_exit(&mutex->wwm_lock);
    807      1.10  riastrad out:	KASSERTMSG((ret == 0 || ret == -EALREADY || ret == -EDEADLK ||
    808      1.10  riastrad 		ret == -EINTR), "ret=%d", ret);
    809       1.1  riastrad 	return ret;
    810       1.1  riastrad }
    811       1.1  riastrad 
    812      1.10  riastrad /*
    813      1.10  riastrad  * ww_mutex_lock_slow(mutex, ctx)
    814      1.10  riastrad  *
    815      1.10  riastrad  *	Slow path: After ww_mutex_lock* has failed with -EDEADLK, and
    816      1.10  riastrad  *	after the caller has ditched all its locks, wait for the owner
    817      1.10  riastrad  *	of mutex to relinquish mutex before the caller can start over
    818      1.10  riastrad  *	acquiring locks again.
    819      1.10  riastrad  *
    820      1.10  riastrad  *	Uninterruptible; never fails.
    821      1.10  riastrad  *
    822      1.10  riastrad  *	May sleep.
    823      1.10  riastrad  */
    824       1.1  riastrad void
    825       1.1  riastrad ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
    826       1.1  riastrad {
    827       1.1  riastrad 
    828       1.2  riastrad 	/* Caller must not try to lock against self here.  */
    829       1.2  riastrad 	WW_WANTLOCK(mutex);
    830       1.1  riastrad 	ASSERT_SLEEPABLE();
    831       1.1  riastrad 
    832       1.1  riastrad 	if (ctx == NULL) {
    833       1.1  riastrad 		ww_mutex_lock_noctx(mutex);
    834       1.1  riastrad 		return;
    835       1.1  riastrad 	}
    836       1.1  riastrad 
    837       1.1  riastrad 	KASSERTMSG((ctx->wwx_owner == curlwp),
    838       1.1  riastrad 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
    839       1.1  riastrad 	KASSERTMSG((ctx->wwx_acquired != ~0U),
    840       1.1  riastrad 	    "ctx %p finished, can't be used any more", ctx);
    841       1.1  riastrad 	KASSERTMSG((ctx->wwx_acquired == 0),
    842       1.1  riastrad 	    "ctx %p still holds %u locks, not allowed in slow path",
    843       1.1  riastrad 	    ctx, ctx->wwx_acquired);
    844       1.1  riastrad 	KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
    845       1.1  riastrad 	    "ctx %p in class %p, mutex %p in class %p",
    846       1.1  riastrad 	    ctx, ctx->wwx_class, mutex, mutex->wwm_class);
    847       1.1  riastrad 
    848       1.1  riastrad 	mutex_enter(&mutex->wwm_lock);
    849      1.13  riastrad 	ww_acquire_done_check(mutex, ctx);
    850       1.1  riastrad retry:	switch (mutex->wwm_state) {
    851       1.1  riastrad 	case WW_UNLOCKED:
    852       1.1  riastrad 		mutex->wwm_state = WW_CTX;
    853       1.1  riastrad 		mutex->wwm_u.ctx = ctx;
    854       1.1  riastrad 		goto locked;
    855       1.1  riastrad 	case WW_OWNED:
    856       1.1  riastrad 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    857       1.1  riastrad 		    "locking %p against myself: %p", mutex, curlwp);
    858       1.1  riastrad 		ww_mutex_state_wait(mutex, WW_OWNED);
    859       1.1  riastrad 		goto retry;
    860       1.1  riastrad 	case WW_CTX:
    861       1.1  riastrad 		break;
    862       1.1  riastrad 	case WW_WANTOWN:
    863       1.1  riastrad 		ww_mutex_state_wait(mutex, WW_WANTOWN);
    864       1.1  riastrad 		goto retry;
    865       1.1  riastrad 	default:
    866       1.1  riastrad 		panic("wait/wound mutex %p in bad state: %d",
    867       1.1  riastrad 		    mutex, (int)mutex->wwm_state);
    868       1.1  riastrad 	}
    869       1.2  riastrad 
    870       1.1  riastrad 	KASSERT(mutex->wwm_state == WW_CTX);
    871       1.1  riastrad 	KASSERT(mutex->wwm_u.ctx != NULL);
    872       1.1  riastrad 	KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
    873       1.1  riastrad 	    "locking %p against myself: %p", mutex, curlwp);
    874       1.2  riastrad 
    875       1.1  riastrad 	/*
    876       1.1  riastrad 	 * Owned by another party, of any priority.  Ask that party to
    877       1.1  riastrad 	 * wake us when it's done.
    878       1.1  riastrad 	 */
    879       1.1  riastrad 	ww_mutex_lock_wait(mutex, ctx);
    880       1.2  riastrad 
    881       1.1  riastrad locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
    882       1.1  riastrad 	    (mutex->wwm_state == WW_WANTOWN));
    883       1.1  riastrad 	KASSERT(mutex->wwm_u.ctx == ctx);
    884       1.3  riastrad 	WW_LOCKED(mutex);
    885       1.1  riastrad 	ctx->wwx_acquired++;
    886       1.1  riastrad 	mutex_exit(&mutex->wwm_lock);
    887       1.1  riastrad }
    888       1.1  riastrad 
    889      1.10  riastrad /*
    890      1.10  riastrad  * ww_mutex_lock_slow(mutex, ctx)
    891      1.10  riastrad  *
    892      1.10  riastrad  *	Slow path: After ww_mutex_lock* has failed with -EDEADLK, and
    893      1.10  riastrad  *	after the caller has ditched all its locks, wait for the owner
    894      1.10  riastrad  *	of mutex to relinquish mutex before the caller can start over
    895      1.10  riastrad  *	acquiring locks again, or fail with -EINTR if interrupted by a
    896      1.10  riastrad  *	signal.
    897      1.10  riastrad  *
    898      1.10  riastrad  *	May sleep.
    899      1.10  riastrad  */
    900       1.1  riastrad int
    901       1.1  riastrad ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
    902       1.1  riastrad     struct ww_acquire_ctx *ctx)
    903       1.1  riastrad {
    904       1.1  riastrad 	int ret;
    905       1.1  riastrad 
    906       1.2  riastrad 	WW_WANTLOCK(mutex);
    907       1.1  riastrad 	ASSERT_SLEEPABLE();
    908       1.1  riastrad 
    909      1.10  riastrad 	if (ctx == NULL) {
    910      1.10  riastrad 		ret = ww_mutex_lock_noctx_sig(mutex);
    911      1.10  riastrad 		KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
    912      1.10  riastrad 		goto out;
    913      1.10  riastrad 	}
    914       1.1  riastrad 
    915       1.1  riastrad 	KASSERTMSG((ctx->wwx_owner == curlwp),
    916       1.1  riastrad 	    "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
    917       1.1  riastrad 	KASSERTMSG((ctx->wwx_acquired != ~0U),
    918       1.1  riastrad 	    "ctx %p finished, can't be used any more", ctx);
    919       1.1  riastrad 	KASSERTMSG((ctx->wwx_acquired == 0),
    920       1.1  riastrad 	    "ctx %p still holds %u locks, not allowed in slow path",
    921       1.1  riastrad 	    ctx, ctx->wwx_acquired);
    922       1.1  riastrad 	KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
    923       1.1  riastrad 	    "ctx %p in class %p, mutex %p in class %p",
    924       1.1  riastrad 	    ctx, ctx->wwx_class, mutex, mutex->wwm_class);
    925       1.1  riastrad 
    926       1.1  riastrad 	mutex_enter(&mutex->wwm_lock);
    927      1.13  riastrad 	ww_acquire_done_check(mutex, ctx);
    928       1.1  riastrad retry:	switch (mutex->wwm_state) {
    929       1.1  riastrad 	case WW_UNLOCKED:
    930       1.1  riastrad 		mutex->wwm_state = WW_CTX;
    931       1.1  riastrad 		mutex->wwm_u.ctx = ctx;
    932       1.1  riastrad 		goto locked;
    933       1.1  riastrad 	case WW_OWNED:
    934       1.1  riastrad 		KASSERTMSG((mutex->wwm_u.owner != curlwp),
    935       1.1  riastrad 		    "locking %p against myself: %p", mutex, curlwp);
    936       1.1  riastrad 		ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
    937      1.10  riastrad 		if (ret) {
    938      1.10  riastrad 			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
    939      1.10  riastrad 			goto out_unlock;
    940      1.10  riastrad 		}
    941       1.1  riastrad 		goto retry;
    942       1.1  riastrad 	case WW_CTX:
    943       1.1  riastrad 		break;
    944       1.1  riastrad 	case WW_WANTOWN:
    945       1.1  riastrad 		ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
    946      1.10  riastrad 		if (ret) {
    947      1.10  riastrad 			KASSERTMSG(ret == -EINTR, "ret=%d", ret);
    948      1.10  riastrad 			goto out_unlock;
    949      1.10  riastrad 		}
    950       1.1  riastrad 		goto retry;
    951       1.1  riastrad 	default:
    952       1.1  riastrad 		panic("wait/wound mutex %p in bad state: %d",
    953       1.1  riastrad 		    mutex, (int)mutex->wwm_state);
    954       1.1  riastrad 	}
    955       1.2  riastrad 
    956       1.1  riastrad 	KASSERT(mutex->wwm_state == WW_CTX);
    957       1.1  riastrad 	KASSERT(mutex->wwm_u.ctx != NULL);
    958       1.1  riastrad 	KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
    959       1.1  riastrad 	    "locking %p against myself: %p", mutex, curlwp);
    960       1.2  riastrad 
    961       1.1  riastrad 	/*
    962       1.1  riastrad 	 * Owned by another party, of any priority.  Ask that party to
    963       1.1  riastrad 	 * wake us when it's done.
    964       1.1  riastrad 	 */
    965       1.1  riastrad 	ret = ww_mutex_lock_wait_sig(mutex, ctx);
    966      1.10  riastrad 	if (ret) {
    967      1.10  riastrad 		KASSERTMSG(ret == -EINTR, "ret=%d", ret);
    968      1.10  riastrad 		goto out_unlock;
    969      1.10  riastrad 	}
    970       1.2  riastrad 
    971       1.1  riastrad locked:	KASSERT((mutex->wwm_state == WW_CTX) ||
    972       1.1  riastrad 	    (mutex->wwm_state == WW_WANTOWN));
    973       1.1  riastrad 	KASSERT(mutex->wwm_u.ctx == ctx);
    974       1.3  riastrad 	WW_LOCKED(mutex);
    975       1.1  riastrad 	ctx->wwx_acquired++;
    976       1.1  riastrad 	ret = 0;
    977      1.10  riastrad out_unlock:
    978      1.10  riastrad 	mutex_exit(&mutex->wwm_lock);
    979      1.10  riastrad out:	KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
    980       1.1  riastrad 	return ret;
    981       1.1  riastrad }
    982       1.1  riastrad 
    983      1.10  riastrad /*
    984      1.10  riastrad  * ww_mutex_trylock(mutex)
    985      1.10  riastrad  *
    986      1.10  riastrad  *	Tro to acquire mutex and return 1, but if it can't be done
    987      1.10  riastrad  *	immediately, return 0.
    988      1.10  riastrad  */
    989       1.1  riastrad int
    990       1.1  riastrad ww_mutex_trylock(struct ww_mutex *mutex)
    991       1.1  riastrad {
    992       1.1  riastrad 	int ret;
    993       1.1  riastrad 
    994       1.1  riastrad 	mutex_enter(&mutex->wwm_lock);
    995       1.1  riastrad 	if (mutex->wwm_state == WW_UNLOCKED) {
    996       1.1  riastrad 		mutex->wwm_state = WW_OWNED;
    997       1.1  riastrad 		mutex->wwm_u.owner = curlwp;
    998       1.2  riastrad 		WW_WANTLOCK(mutex);
    999       1.2  riastrad 		WW_LOCKED(mutex);
   1000       1.1  riastrad 		ret = 1;
   1001       1.1  riastrad 	} else {
   1002       1.9  riastrad 		/*
   1003       1.9  riastrad 		 * It is tempting to assert that we do not hold the
   1004       1.9  riastrad 		 * mutex here, because trylock when we hold the lock
   1005       1.9  riastrad 		 * already generally indicates a bug in the design of
   1006       1.9  riastrad 		 * the code.  However, it seems that Linux relies on
   1007       1.9  riastrad 		 * this deep in ttm buffer reservation logic, so these
   1008       1.9  riastrad 		 * assertions are disabled until we find another way to
   1009       1.9  riastrad 		 * work around that or fix the bug that leads to it.
   1010       1.9  riastrad 		 *
   1011       1.9  riastrad 		 * That said: we should not be in the WW_WANTOWN state,
   1012       1.9  riastrad 		 * which happens only while we're in the ww mutex logic
   1013       1.9  riastrad 		 * waiting to acquire the lock.
   1014       1.9  riastrad 		 */
   1015       1.9  riastrad #if 0
   1016       1.1  riastrad 		KASSERTMSG(((mutex->wwm_state != WW_OWNED) ||
   1017       1.1  riastrad 		    (mutex->wwm_u.owner != curlwp)),
   1018       1.1  riastrad 		    "locking %p against myself: %p", mutex, curlwp);
   1019       1.1  riastrad 		KASSERTMSG(((mutex->wwm_state != WW_CTX) ||
   1020       1.1  riastrad 		    (mutex->wwm_u.ctx->wwx_owner != curlwp)),
   1021       1.1  riastrad 		    "locking %p against myself: %p", mutex, curlwp);
   1022       1.9  riastrad #endif
   1023       1.1  riastrad 		KASSERTMSG(((mutex->wwm_state != WW_WANTOWN) ||
   1024       1.1  riastrad 		    (mutex->wwm_u.ctx->wwx_owner != curlwp)),
   1025       1.1  riastrad 		    "locking %p against myself: %p", mutex, curlwp);
   1026       1.1  riastrad 		ret = 0;
   1027       1.1  riastrad 	}
   1028       1.1  riastrad 	mutex_exit(&mutex->wwm_lock);
   1029       1.1  riastrad 
   1030       1.1  riastrad 	return ret;
   1031       1.1  riastrad }
   1032       1.1  riastrad 
   1033      1.10  riastrad /*
   1034      1.10  riastrad  * ww_mutex_unlock_release(mutex)
   1035      1.10  riastrad  *
   1036      1.10  riastrad  *	Decrement the number of mutexes acquired in the current locking
   1037      1.10  riastrad  *	context of mutex, which must be held by the caller and in
   1038      1.10  riastrad  *	WW_CTX or WW_WANTOWN state, and clear the mutex's reference.
   1039      1.10  riastrad  *	Caller must hold the internal lock of mutex, and is responsible
   1040      1.10  riastrad  *	for notifying waiters.
   1041      1.10  riastrad  *
   1042      1.10  riastrad  *	Internal subroutine.
   1043      1.10  riastrad  */
   1044       1.1  riastrad static void
   1045       1.1  riastrad ww_mutex_unlock_release(struct ww_mutex *mutex)
   1046       1.1  riastrad {
   1047       1.1  riastrad 
   1048       1.1  riastrad 	KASSERT(mutex_owned(&mutex->wwm_lock));
   1049       1.1  riastrad 	KASSERT((mutex->wwm_state == WW_CTX) ||
   1050       1.1  riastrad 	    (mutex->wwm_state == WW_WANTOWN));
   1051       1.1  riastrad 	KASSERT(mutex->wwm_u.ctx != NULL);
   1052       1.1  riastrad 	KASSERTMSG((mutex->wwm_u.ctx->wwx_owner == curlwp),
   1053       1.1  riastrad 	    "ww_mutex %p ctx %p held by %p, not by self (%p)",
   1054       1.1  riastrad 	    mutex, mutex->wwm_u.ctx, mutex->wwm_u.ctx->wwx_owner,
   1055       1.1  riastrad 	    curlwp);
   1056       1.1  riastrad 	KASSERT(mutex->wwm_u.ctx->wwx_acquired != ~0U);
   1057       1.1  riastrad 	mutex->wwm_u.ctx->wwx_acquired--;
   1058       1.1  riastrad 	mutex->wwm_u.ctx = NULL;
   1059       1.1  riastrad }
   1060       1.1  riastrad 
   1061      1.10  riastrad /*
   1062      1.10  riastrad  * ww_mutex_unlock(mutex)
   1063      1.10  riastrad  *
   1064      1.10  riastrad  *	Release mutex and wake the next caller waiting, if any.
   1065      1.10  riastrad  */
   1066       1.1  riastrad void
   1067       1.1  riastrad ww_mutex_unlock(struct ww_mutex *mutex)
   1068       1.1  riastrad {
   1069       1.1  riastrad 	struct ww_acquire_ctx *ctx;
   1070       1.1  riastrad 
   1071       1.1  riastrad 	mutex_enter(&mutex->wwm_lock);
   1072      1.14  riastrad 	WW_UNLOCKED(mutex);
   1073      1.14  riastrad 	KASSERTMSG(mutex->wwm_state != WW_UNLOCKED, "mutex %p", mutex);
   1074       1.1  riastrad 	switch (mutex->wwm_state) {
   1075       1.1  riastrad 	case WW_UNLOCKED:
   1076       1.1  riastrad 		panic("unlocking unlocked wait/wound mutex: %p", mutex);
   1077       1.1  riastrad 	case WW_OWNED:
   1078       1.1  riastrad 		/* Let the context lockers fight over it.  */
   1079       1.1  riastrad 		mutex->wwm_u.owner = NULL;
   1080       1.1  riastrad 		mutex->wwm_state = WW_UNLOCKED;
   1081       1.1  riastrad 		break;
   1082       1.1  riastrad 	case WW_CTX:
   1083       1.1  riastrad 		ww_mutex_unlock_release(mutex);
   1084       1.1  riastrad 		/*
   1085       1.1  riastrad 		 * If there are any waiters with contexts, grant the
   1086       1.1  riastrad 		 * lock to the highest-priority one.  Otherwise, just
   1087       1.1  riastrad 		 * unlock it.
   1088       1.1  riastrad 		 */
   1089       1.1  riastrad 		if ((ctx = RB_TREE_MIN(&mutex->wwm_waiters)) != NULL) {
   1090       1.1  riastrad 			mutex->wwm_state = WW_CTX;
   1091       1.1  riastrad 			mutex->wwm_u.ctx = ctx;
   1092       1.1  riastrad 		} else {
   1093       1.1  riastrad 			mutex->wwm_state = WW_UNLOCKED;
   1094       1.1  riastrad 		}
   1095       1.1  riastrad 		break;
   1096       1.1  riastrad 	case WW_WANTOWN:
   1097       1.1  riastrad 		ww_mutex_unlock_release(mutex);
   1098       1.1  riastrad 		/* Let the non-context lockers fight over it.  */
   1099       1.1  riastrad 		mutex->wwm_state = WW_UNLOCKED;
   1100       1.1  riastrad 		break;
   1101       1.1  riastrad 	}
   1102       1.1  riastrad 	cv_broadcast(&mutex->wwm_cv);
   1103       1.1  riastrad 	mutex_exit(&mutex->wwm_lock);
   1104       1.1  riastrad }
   1105       1.8  riastrad 
   1106      1.10  riastrad /*
   1107      1.10  riastrad  * ww_mutex_locking_ctx(mutex)
   1108      1.10  riastrad  *
   1109      1.10  riastrad  *	Return the current acquire context of mutex.  Answer is stale
   1110      1.10  riastrad  *	as soon as returned unless mutex is held by caller.
   1111      1.10  riastrad  */
   1112       1.8  riastrad struct ww_acquire_ctx *
   1113       1.8  riastrad ww_mutex_locking_ctx(struct ww_mutex *mutex)
   1114       1.8  riastrad {
   1115       1.8  riastrad 	struct ww_acquire_ctx *ctx;
   1116       1.8  riastrad 
   1117       1.8  riastrad 	mutex_enter(&mutex->wwm_lock);
   1118       1.8  riastrad 	switch (mutex->wwm_state) {
   1119       1.8  riastrad 	case WW_UNLOCKED:
   1120       1.8  riastrad 	case WW_OWNED:
   1121       1.8  riastrad 		ctx = NULL;
   1122       1.8  riastrad 		break;
   1123       1.8  riastrad 	case WW_CTX:
   1124       1.8  riastrad 	case WW_WANTOWN:
   1125       1.8  riastrad 		ctx = mutex->wwm_u.ctx;
   1126       1.8  riastrad 		break;
   1127       1.8  riastrad 	default:
   1128       1.8  riastrad 		panic("wait/wound mutex %p in bad state: %d",
   1129       1.8  riastrad 		    mutex, (int)mutex->wwm_state);
   1130       1.8  riastrad 	}
   1131       1.8  riastrad 	mutex_exit(&mutex->wwm_lock);
   1132       1.8  riastrad 
   1133       1.8  riastrad 	return ctx;
   1134       1.8  riastrad }
   1135