linux_ww_mutex.c revision 1.8 1 1.8 riastrad /* $NetBSD: linux_ww_mutex.c,v 1.8 2021/12/19 10:38:14 riastradh Exp $ */
2 1.1 riastrad
3 1.1 riastrad /*-
4 1.1 riastrad * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 1.1 riastrad * All rights reserved.
6 1.1 riastrad *
7 1.1 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.1 riastrad * by Taylor R. Campbell.
9 1.1 riastrad *
10 1.1 riastrad * Redistribution and use in source and binary forms, with or without
11 1.1 riastrad * modification, are permitted provided that the following conditions
12 1.1 riastrad * are met:
13 1.1 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.1 riastrad * notice, this list of conditions and the following disclaimer.
15 1.1 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.1 riastrad * documentation and/or other materials provided with the distribution.
18 1.1 riastrad *
19 1.1 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.1 riastrad */
31 1.1 riastrad
32 1.1 riastrad #include <sys/cdefs.h>
33 1.8 riastrad __KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.8 2021/12/19 10:38:14 riastradh Exp $");
34 1.1 riastrad
35 1.1 riastrad #include <sys/types.h>
36 1.1 riastrad #include <sys/atomic.h>
37 1.1 riastrad #include <sys/condvar.h>
38 1.2 riastrad #include <sys/lockdebug.h>
39 1.1 riastrad #include <sys/lwp.h>
40 1.1 riastrad #include <sys/mutex.h>
41 1.1 riastrad #include <sys/rbtree.h>
42 1.1 riastrad
43 1.1 riastrad #include <linux/ww_mutex.h>
44 1.6 mrg #include <linux/errno.h>
45 1.1 riastrad
46 1.2 riastrad #define WW_WANTLOCK(WW) \
47 1.2 riastrad LOCKDEBUG_WANTLOCK((WW)->wwm_debug, (WW), \
48 1.2 riastrad (uintptr_t)__builtin_return_address(0), 0)
49 1.2 riastrad #define WW_LOCKED(WW) \
50 1.2 riastrad LOCKDEBUG_LOCKED((WW)->wwm_debug, (WW), NULL, \
51 1.2 riastrad (uintptr_t)__builtin_return_address(0), 0)
52 1.2 riastrad #define WW_UNLOCKED(WW) \
53 1.2 riastrad LOCKDEBUG_UNLOCKED((WW)->wwm_debug, (WW), \
54 1.2 riastrad (uintptr_t)__builtin_return_address(0), 0)
55 1.2 riastrad
56 1.1 riastrad static int
57 1.1 riastrad ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb)
58 1.1 riastrad {
59 1.1 riastrad const struct ww_acquire_ctx *const ctx_a = va;
60 1.1 riastrad const struct ww_acquire_ctx *const ctx_b = vb;
61 1.1 riastrad
62 1.1 riastrad if (ctx_a->wwx_ticket < ctx_b->wwx_ticket)
63 1.1 riastrad return -1;
64 1.1 riastrad if (ctx_a->wwx_ticket > ctx_b->wwx_ticket)
65 1.1 riastrad return -1;
66 1.1 riastrad return 0;
67 1.1 riastrad }
68 1.1 riastrad
69 1.1 riastrad static int
70 1.1 riastrad ww_acquire_ctx_compare_key(void *cookie __unused, const void *vn,
71 1.1 riastrad const void *vk)
72 1.1 riastrad {
73 1.1 riastrad const struct ww_acquire_ctx *const ctx = vn;
74 1.1 riastrad const uint64_t *const ticketp = vk, ticket = *ticketp;
75 1.1 riastrad
76 1.1 riastrad if (ctx->wwx_ticket < ticket)
77 1.1 riastrad return -1;
78 1.1 riastrad if (ctx->wwx_ticket > ticket)
79 1.1 riastrad return -1;
80 1.1 riastrad return 0;
81 1.1 riastrad }
82 1.1 riastrad
83 1.1 riastrad static const rb_tree_ops_t ww_acquire_ctx_rb_ops = {
84 1.1 riastrad .rbto_compare_nodes = &ww_acquire_ctx_compare,
85 1.1 riastrad .rbto_compare_key = &ww_acquire_ctx_compare_key,
86 1.1 riastrad .rbto_node_offset = offsetof(struct ww_acquire_ctx, wwx_rb_node),
87 1.1 riastrad .rbto_context = NULL,
88 1.1 riastrad };
89 1.1 riastrad
90 1.1 riastrad void
91 1.1 riastrad ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *class)
92 1.1 riastrad {
93 1.1 riastrad
94 1.1 riastrad ctx->wwx_class = class;
95 1.1 riastrad ctx->wwx_owner = curlwp;
96 1.5 riastrad ctx->wwx_ticket = atomic64_inc_return(&class->wwc_ticket);
97 1.1 riastrad ctx->wwx_acquired = 0;
98 1.1 riastrad ctx->wwx_acquire_done = false;
99 1.1 riastrad }
100 1.1 riastrad
101 1.1 riastrad void
102 1.1 riastrad ww_acquire_done(struct ww_acquire_ctx *ctx)
103 1.1 riastrad {
104 1.1 riastrad
105 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
106 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
107 1.1 riastrad
108 1.1 riastrad ctx->wwx_acquire_done = true;
109 1.1 riastrad }
110 1.1 riastrad
111 1.1 riastrad void
112 1.1 riastrad ww_acquire_fini(struct ww_acquire_ctx *ctx)
113 1.1 riastrad {
114 1.1 riastrad
115 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
116 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
117 1.1 riastrad KASSERTMSG((ctx->wwx_acquired == 0), "ctx %p still holds %u locks",
118 1.1 riastrad ctx, ctx->wwx_acquired);
119 1.1 riastrad
120 1.1 riastrad ctx->wwx_acquired = ~0U; /* Fail if called again. */
121 1.1 riastrad ctx->wwx_owner = NULL;
122 1.1 riastrad }
123 1.1 riastrad
124 1.2 riastrad #ifdef LOCKDEBUG
125 1.2 riastrad static void
126 1.7 ozaki ww_dump(const volatile void *cookie, lockop_printer_t pr)
127 1.2 riastrad {
128 1.4 christos const volatile struct ww_mutex *mutex = cookie;
129 1.2 riastrad
130 1.7 ozaki pr("%-13s: ", "state");
131 1.2 riastrad switch (mutex->wwm_state) {
132 1.2 riastrad case WW_UNLOCKED:
133 1.7 ozaki pr("unlocked\n");
134 1.2 riastrad break;
135 1.2 riastrad case WW_OWNED:
136 1.7 ozaki pr("owned by lwp\n");
137 1.7 ozaki pr("%-13s: %p\n", "owner", mutex->wwm_u.owner);
138 1.7 ozaki pr("%-13s: %s\n", "waiters",
139 1.4 christos cv_has_waiters((void *)(intptr_t)&mutex->wwm_cv)
140 1.2 riastrad ? "yes" : "no");
141 1.2 riastrad break;
142 1.2 riastrad case WW_CTX:
143 1.7 ozaki pr("owned via ctx\n");
144 1.7 ozaki pr("%-13s: %p\n", "context", mutex->wwm_u.ctx);
145 1.7 ozaki pr("%-13s: %p\n", "lwp",
146 1.2 riastrad mutex->wwm_u.ctx->wwx_owner);
147 1.7 ozaki pr("%-13s: %s\n", "waiters",
148 1.4 christos cv_has_waiters((void *)(intptr_t)&mutex->wwm_cv)
149 1.2 riastrad ? "yes" : "no");
150 1.2 riastrad break;
151 1.2 riastrad case WW_WANTOWN:
152 1.7 ozaki pr("owned via ctx\n");
153 1.7 ozaki pr("%-13s: %p\n", "context", mutex->wwm_u.ctx);
154 1.7 ozaki pr("%-13s: %p\n", "lwp",
155 1.2 riastrad mutex->wwm_u.ctx->wwx_owner);
156 1.7 ozaki pr("%-13s: %s\n", "waiters", "yes (noctx)");
157 1.2 riastrad break;
158 1.2 riastrad default:
159 1.7 ozaki pr("unknown\n");
160 1.2 riastrad break;
161 1.2 riastrad }
162 1.2 riastrad }
163 1.2 riastrad
164 1.2 riastrad static lockops_t ww_lockops = {
165 1.2 riastrad .lo_name = "Wait/wound mutex",
166 1.2 riastrad .lo_type = LOCKOPS_SLEEP,
167 1.2 riastrad .lo_dump = ww_dump,
168 1.2 riastrad };
169 1.2 riastrad #endif
170 1.2 riastrad
171 1.1 riastrad void
172 1.1 riastrad ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
173 1.1 riastrad {
174 1.1 riastrad
175 1.1 riastrad /*
176 1.1 riastrad * XXX Apparently Linux takes these with spin locks held. That
177 1.1 riastrad * strikes me as a bad idea, but so it is...
178 1.1 riastrad */
179 1.1 riastrad mutex_init(&mutex->wwm_lock, MUTEX_DEFAULT, IPL_VM);
180 1.1 riastrad mutex->wwm_state = WW_UNLOCKED;
181 1.1 riastrad mutex->wwm_class = class;
182 1.1 riastrad rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
183 1.1 riastrad cv_init(&mutex->wwm_cv, "linuxwwm");
184 1.2 riastrad #ifdef LOCKDEBUG
185 1.2 riastrad mutex->wwm_debug = LOCKDEBUG_ALLOC(mutex, &ww_lockops,
186 1.2 riastrad (uintptr_t)__builtin_return_address(0));
187 1.2 riastrad #endif
188 1.1 riastrad }
189 1.1 riastrad
190 1.1 riastrad void
191 1.1 riastrad ww_mutex_destroy(struct ww_mutex *mutex)
192 1.1 riastrad {
193 1.1 riastrad
194 1.2 riastrad KASSERT(mutex->wwm_state == WW_UNLOCKED);
195 1.2 riastrad
196 1.2 riastrad #ifdef LOCKDEBUG
197 1.2 riastrad LOCKDEBUG_FREE(mutex->wwm_debug, mutex);
198 1.2 riastrad #endif
199 1.1 riastrad cv_destroy(&mutex->wwm_cv);
200 1.1 riastrad #if 0
201 1.1 riastrad rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
202 1.1 riastrad #endif
203 1.1 riastrad KASSERT(mutex->wwm_state == WW_UNLOCKED);
204 1.1 riastrad mutex_destroy(&mutex->wwm_lock);
205 1.1 riastrad }
206 1.1 riastrad
207 1.1 riastrad /*
208 1.1 riastrad * XXX WARNING: This returns true if it is locked by ANYONE. Does not
209 1.1 riastrad * mean `Do I hold this lock?' (answering which really requires an
210 1.1 riastrad * acquire context).
211 1.1 riastrad */
212 1.1 riastrad bool
213 1.1 riastrad ww_mutex_is_locked(struct ww_mutex *mutex)
214 1.1 riastrad {
215 1.1 riastrad int locked;
216 1.1 riastrad
217 1.1 riastrad mutex_enter(&mutex->wwm_lock);
218 1.1 riastrad switch (mutex->wwm_state) {
219 1.1 riastrad case WW_UNLOCKED:
220 1.1 riastrad locked = false;
221 1.1 riastrad break;
222 1.1 riastrad case WW_OWNED:
223 1.1 riastrad case WW_CTX:
224 1.1 riastrad case WW_WANTOWN:
225 1.1 riastrad locked = true;
226 1.1 riastrad break;
227 1.1 riastrad default:
228 1.1 riastrad panic("wait/wound mutex %p in bad state: %d", mutex,
229 1.1 riastrad (int)mutex->wwm_state);
230 1.1 riastrad }
231 1.1 riastrad mutex_exit(&mutex->wwm_lock);
232 1.1 riastrad
233 1.1 riastrad return locked;
234 1.1 riastrad }
235 1.1 riastrad
236 1.1 riastrad static void
237 1.1 riastrad ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
238 1.1 riastrad {
239 1.1 riastrad
240 1.1 riastrad KASSERT(mutex->wwm_state == state);
241 1.1 riastrad do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
242 1.1 riastrad while (mutex->wwm_state == state);
243 1.1 riastrad }
244 1.1 riastrad
245 1.1 riastrad static int
246 1.1 riastrad ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
247 1.1 riastrad {
248 1.1 riastrad int ret;
249 1.1 riastrad
250 1.1 riastrad KASSERT(mutex->wwm_state == state);
251 1.1 riastrad do {
252 1.1 riastrad /* XXX errno NetBSD->Linux */
253 1.1 riastrad ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
254 1.6 mrg if (ret == -ERESTART)
255 1.6 mrg ret = -ERESTARTSYS;
256 1.1 riastrad if (ret)
257 1.1 riastrad break;
258 1.1 riastrad } while (mutex->wwm_state == state);
259 1.1 riastrad
260 1.1 riastrad return ret;
261 1.1 riastrad }
262 1.1 riastrad
263 1.1 riastrad static void
264 1.1 riastrad ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
265 1.1 riastrad {
266 1.1 riastrad struct ww_acquire_ctx *collision __diagused;
267 1.1 riastrad
268 1.1 riastrad KASSERT(mutex_owned(&mutex->wwm_lock));
269 1.1 riastrad
270 1.1 riastrad KASSERT((mutex->wwm_state == WW_CTX) ||
271 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
272 1.1 riastrad KASSERT(mutex->wwm_u.ctx != ctx);
273 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
274 1.1 riastrad "ww mutex class mismatch: %p != %p",
275 1.1 riastrad ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
276 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
277 1.1 riastrad "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
278 1.1 riastrad ctx->wwx_ticket, ctx,
279 1.1 riastrad mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
280 1.1 riastrad
281 1.1 riastrad collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
282 1.1 riastrad KASSERTMSG((collision == ctx),
283 1.1 riastrad "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
284 1.1 riastrad ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
285 1.1 riastrad
286 1.1 riastrad do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
287 1.1 riastrad while (!(((mutex->wwm_state == WW_CTX) ||
288 1.1 riastrad (mutex->wwm_state == WW_WANTOWN)) &&
289 1.1 riastrad (mutex->wwm_u.ctx == ctx)));
290 1.1 riastrad
291 1.1 riastrad rb_tree_remove_node(&mutex->wwm_waiters, ctx);
292 1.1 riastrad }
293 1.1 riastrad
294 1.1 riastrad static int
295 1.1 riastrad ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
296 1.1 riastrad {
297 1.1 riastrad struct ww_acquire_ctx *collision __diagused;
298 1.1 riastrad int ret;
299 1.1 riastrad
300 1.1 riastrad KASSERT(mutex_owned(&mutex->wwm_lock));
301 1.1 riastrad
302 1.1 riastrad KASSERT((mutex->wwm_state == WW_CTX) ||
303 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
304 1.1 riastrad KASSERT(mutex->wwm_u.ctx != ctx);
305 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
306 1.1 riastrad "ww mutex class mismatch: %p != %p",
307 1.1 riastrad ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
308 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
309 1.1 riastrad "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
310 1.1 riastrad ctx->wwx_ticket, ctx,
311 1.1 riastrad mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
312 1.1 riastrad
313 1.1 riastrad collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
314 1.1 riastrad KASSERTMSG((collision == ctx),
315 1.1 riastrad "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
316 1.1 riastrad ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
317 1.1 riastrad
318 1.1 riastrad do {
319 1.1 riastrad /* XXX errno NetBSD->Linux */
320 1.1 riastrad ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
321 1.6 mrg if (ret == -ERESTART)
322 1.6 mrg ret = -ERESTARTSYS;
323 1.1 riastrad if (ret)
324 1.1 riastrad goto out;
325 1.1 riastrad } while (!(((mutex->wwm_state == WW_CTX) ||
326 1.1 riastrad (mutex->wwm_state == WW_WANTOWN)) &&
327 1.1 riastrad (mutex->wwm_u.ctx == ctx)));
328 1.1 riastrad
329 1.1 riastrad out: rb_tree_remove_node(&mutex->wwm_waiters, ctx);
330 1.1 riastrad return ret;
331 1.1 riastrad }
332 1.1 riastrad
333 1.1 riastrad static void
334 1.1 riastrad ww_mutex_lock_noctx(struct ww_mutex *mutex)
335 1.1 riastrad {
336 1.1 riastrad
337 1.1 riastrad mutex_enter(&mutex->wwm_lock);
338 1.1 riastrad retry: switch (mutex->wwm_state) {
339 1.1 riastrad case WW_UNLOCKED:
340 1.1 riastrad mutex->wwm_state = WW_OWNED;
341 1.1 riastrad mutex->wwm_u.owner = curlwp;
342 1.1 riastrad break;
343 1.1 riastrad case WW_OWNED:
344 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
345 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
346 1.1 riastrad ww_mutex_state_wait(mutex, WW_OWNED);
347 1.1 riastrad goto retry;
348 1.1 riastrad case WW_CTX:
349 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
350 1.1 riastrad mutex->wwm_state = WW_WANTOWN;
351 1.1 riastrad /* FALLTHROUGH */
352 1.1 riastrad case WW_WANTOWN:
353 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
354 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
355 1.1 riastrad ww_mutex_state_wait(mutex, WW_WANTOWN);
356 1.1 riastrad goto retry;
357 1.1 riastrad default:
358 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
359 1.1 riastrad mutex, (int)mutex->wwm_state);
360 1.1 riastrad }
361 1.1 riastrad KASSERT(mutex->wwm_state == WW_OWNED);
362 1.1 riastrad KASSERT(mutex->wwm_u.owner == curlwp);
363 1.3 riastrad WW_LOCKED(mutex);
364 1.1 riastrad mutex_exit(&mutex->wwm_lock);
365 1.1 riastrad }
366 1.1 riastrad
367 1.1 riastrad static int
368 1.1 riastrad ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
369 1.1 riastrad {
370 1.1 riastrad int ret;
371 1.1 riastrad
372 1.1 riastrad mutex_enter(&mutex->wwm_lock);
373 1.1 riastrad retry: switch (mutex->wwm_state) {
374 1.1 riastrad case WW_UNLOCKED:
375 1.1 riastrad mutex->wwm_state = WW_OWNED;
376 1.1 riastrad mutex->wwm_u.owner = curlwp;
377 1.1 riastrad break;
378 1.1 riastrad case WW_OWNED:
379 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
380 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
381 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
382 1.1 riastrad if (ret)
383 1.1 riastrad goto out;
384 1.1 riastrad goto retry;
385 1.1 riastrad case WW_CTX:
386 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
387 1.1 riastrad mutex->wwm_state = WW_WANTOWN;
388 1.1 riastrad /* FALLTHROUGH */
389 1.1 riastrad case WW_WANTOWN:
390 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
391 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
392 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
393 1.1 riastrad if (ret)
394 1.1 riastrad goto out;
395 1.1 riastrad goto retry;
396 1.1 riastrad default:
397 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
398 1.1 riastrad mutex, (int)mutex->wwm_state);
399 1.1 riastrad }
400 1.1 riastrad KASSERT(mutex->wwm_state == WW_OWNED);
401 1.1 riastrad KASSERT(mutex->wwm_u.owner == curlwp);
402 1.3 riastrad WW_LOCKED(mutex);
403 1.1 riastrad ret = 0;
404 1.1 riastrad out: mutex_exit(&mutex->wwm_lock);
405 1.1 riastrad return ret;
406 1.1 riastrad }
407 1.1 riastrad
408 1.1 riastrad int
409 1.1 riastrad ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
410 1.1 riastrad {
411 1.1 riastrad
412 1.2 riastrad /*
413 1.2 riastrad * We do not WW_WANTLOCK at the beginning because we may
414 1.2 riastrad * correctly already hold it, if we have a context, in which
415 1.2 riastrad * case we must return EALREADY to the caller.
416 1.2 riastrad */
417 1.1 riastrad ASSERT_SLEEPABLE();
418 1.1 riastrad
419 1.1 riastrad if (ctx == NULL) {
420 1.2 riastrad WW_WANTLOCK(mutex);
421 1.1 riastrad ww_mutex_lock_noctx(mutex);
422 1.1 riastrad return 0;
423 1.1 riastrad }
424 1.1 riastrad
425 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
426 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
427 1.1 riastrad KASSERTMSG(!ctx->wwx_acquire_done,
428 1.1 riastrad "ctx %p done acquiring locks, can't acquire more", ctx);
429 1.1 riastrad KASSERTMSG((ctx->wwx_acquired != ~0U),
430 1.1 riastrad "ctx %p finished, can't be used any more", ctx);
431 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
432 1.1 riastrad "ctx %p in class %p, mutex %p in class %p",
433 1.1 riastrad ctx, ctx->wwx_class, mutex, mutex->wwm_class);
434 1.1 riastrad
435 1.1 riastrad mutex_enter(&mutex->wwm_lock);
436 1.1 riastrad retry: switch (mutex->wwm_state) {
437 1.1 riastrad case WW_UNLOCKED:
438 1.2 riastrad WW_WANTLOCK(mutex);
439 1.1 riastrad mutex->wwm_state = WW_CTX;
440 1.1 riastrad mutex->wwm_u.ctx = ctx;
441 1.1 riastrad goto locked;
442 1.1 riastrad case WW_OWNED:
443 1.2 riastrad WW_WANTLOCK(mutex);
444 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
445 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
446 1.1 riastrad ww_mutex_state_wait(mutex, WW_OWNED);
447 1.1 riastrad goto retry;
448 1.1 riastrad case WW_CTX:
449 1.1 riastrad break;
450 1.1 riastrad case WW_WANTOWN:
451 1.1 riastrad ww_mutex_state_wait(mutex, WW_WANTOWN);
452 1.1 riastrad goto retry;
453 1.1 riastrad default:
454 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
455 1.1 riastrad mutex, (int)mutex->wwm_state);
456 1.1 riastrad }
457 1.2 riastrad
458 1.1 riastrad KASSERT(mutex->wwm_state == WW_CTX);
459 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
460 1.1 riastrad KASSERT((mutex->wwm_u.ctx == ctx) ||
461 1.1 riastrad (mutex->wwm_u.ctx->wwx_owner != curlwp));
462 1.2 riastrad
463 1.1 riastrad if (mutex->wwm_u.ctx == ctx) {
464 1.1 riastrad /*
465 1.1 riastrad * We already own it. Yes, this can happen correctly
466 1.1 riastrad * for objects whose locking order is determined by
467 1.1 riastrad * userland.
468 1.1 riastrad */
469 1.1 riastrad mutex_exit(&mutex->wwm_lock);
470 1.1 riastrad return -EALREADY;
471 1.2 riastrad }
472 1.2 riastrad
473 1.2 riastrad /*
474 1.2 riastrad * We do not own it. We can safely assert to LOCKDEBUG that we
475 1.2 riastrad * want it.
476 1.2 riastrad */
477 1.2 riastrad WW_WANTLOCK(mutex);
478 1.2 riastrad
479 1.2 riastrad if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
480 1.1 riastrad /*
481 1.1 riastrad * Owned by a higher-priority party. Tell the caller
482 1.1 riastrad * to unlock everything and start over.
483 1.1 riastrad */
484 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
485 1.1 riastrad "ww mutex class mismatch: %p != %p",
486 1.1 riastrad ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
487 1.1 riastrad mutex_exit(&mutex->wwm_lock);
488 1.1 riastrad return -EDEADLK;
489 1.1 riastrad }
490 1.2 riastrad
491 1.2 riastrad /*
492 1.2 riastrad * Owned by a lower-priority party. Ask that party to wake us
493 1.2 riastrad * when it is done or it realizes it needs to back off.
494 1.2 riastrad */
495 1.2 riastrad ww_mutex_lock_wait(mutex, ctx);
496 1.2 riastrad
497 1.3 riastrad locked: KASSERT((mutex->wwm_state == WW_CTX) ||
498 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
499 1.1 riastrad KASSERT(mutex->wwm_u.ctx == ctx);
500 1.3 riastrad WW_LOCKED(mutex);
501 1.3 riastrad ctx->wwx_acquired++;
502 1.1 riastrad mutex_exit(&mutex->wwm_lock);
503 1.1 riastrad return 0;
504 1.1 riastrad }
505 1.1 riastrad
506 1.1 riastrad int
507 1.1 riastrad ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
508 1.1 riastrad {
509 1.1 riastrad int ret;
510 1.1 riastrad
511 1.2 riastrad /*
512 1.2 riastrad * We do not WW_WANTLOCK at the beginning because we may
513 1.2 riastrad * correctly already hold it, if we have a context, in which
514 1.2 riastrad * case we must return EALREADY to the caller.
515 1.2 riastrad */
516 1.1 riastrad ASSERT_SLEEPABLE();
517 1.1 riastrad
518 1.2 riastrad if (ctx == NULL) {
519 1.2 riastrad WW_WANTLOCK(mutex);
520 1.1 riastrad return ww_mutex_lock_noctx_sig(mutex);
521 1.2 riastrad }
522 1.1 riastrad
523 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
524 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
525 1.1 riastrad KASSERTMSG(!ctx->wwx_acquire_done,
526 1.1 riastrad "ctx %p done acquiring locks, can't acquire more", ctx);
527 1.1 riastrad KASSERTMSG((ctx->wwx_acquired != ~0U),
528 1.1 riastrad "ctx %p finished, can't be used any more", ctx);
529 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
530 1.1 riastrad "ctx %p in class %p, mutex %p in class %p",
531 1.1 riastrad ctx, ctx->wwx_class, mutex, mutex->wwm_class);
532 1.1 riastrad
533 1.1 riastrad mutex_enter(&mutex->wwm_lock);
534 1.1 riastrad retry: switch (mutex->wwm_state) {
535 1.1 riastrad case WW_UNLOCKED:
536 1.2 riastrad WW_WANTLOCK(mutex);
537 1.1 riastrad mutex->wwm_state = WW_CTX;
538 1.1 riastrad mutex->wwm_u.ctx = ctx;
539 1.1 riastrad goto locked;
540 1.1 riastrad case WW_OWNED:
541 1.2 riastrad WW_WANTLOCK(mutex);
542 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
543 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
544 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
545 1.1 riastrad if (ret)
546 1.1 riastrad goto out;
547 1.1 riastrad goto retry;
548 1.1 riastrad case WW_CTX:
549 1.1 riastrad break;
550 1.1 riastrad case WW_WANTOWN:
551 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
552 1.1 riastrad if (ret)
553 1.1 riastrad goto out;
554 1.1 riastrad goto retry;
555 1.1 riastrad default:
556 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
557 1.1 riastrad mutex, (int)mutex->wwm_state);
558 1.1 riastrad }
559 1.2 riastrad
560 1.1 riastrad KASSERT(mutex->wwm_state == WW_CTX);
561 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
562 1.1 riastrad KASSERT((mutex->wwm_u.ctx == ctx) ||
563 1.1 riastrad (mutex->wwm_u.ctx->wwx_owner != curlwp));
564 1.2 riastrad
565 1.1 riastrad if (mutex->wwm_u.ctx == ctx) {
566 1.1 riastrad /*
567 1.1 riastrad * We already own it. Yes, this can happen correctly
568 1.1 riastrad * for objects whose locking order is determined by
569 1.1 riastrad * userland.
570 1.1 riastrad */
571 1.1 riastrad mutex_exit(&mutex->wwm_lock);
572 1.1 riastrad return -EALREADY;
573 1.2 riastrad }
574 1.2 riastrad
575 1.2 riastrad /*
576 1.2 riastrad * We do not own it. We can safely assert to LOCKDEBUG that we
577 1.2 riastrad * want it.
578 1.2 riastrad */
579 1.2 riastrad WW_WANTLOCK(mutex);
580 1.2 riastrad
581 1.2 riastrad if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
582 1.1 riastrad /*
583 1.1 riastrad * Owned by a higher-priority party. Tell the caller
584 1.1 riastrad * to unlock everything and start over.
585 1.1 riastrad */
586 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
587 1.1 riastrad "ww mutex class mismatch: %p != %p",
588 1.1 riastrad ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
589 1.1 riastrad mutex_exit(&mutex->wwm_lock);
590 1.1 riastrad return -EDEADLK;
591 1.1 riastrad }
592 1.2 riastrad
593 1.2 riastrad /*
594 1.2 riastrad * Owned by a lower-priority party. Ask that party to wake us
595 1.2 riastrad * when it is done or it realizes it needs to back off.
596 1.2 riastrad */
597 1.2 riastrad ret = ww_mutex_lock_wait_sig(mutex, ctx);
598 1.2 riastrad if (ret)
599 1.2 riastrad goto out;
600 1.2 riastrad
601 1.1 riastrad locked: KASSERT((mutex->wwm_state == WW_CTX) ||
602 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
603 1.1 riastrad KASSERT(mutex->wwm_u.ctx == ctx);
604 1.3 riastrad WW_LOCKED(mutex);
605 1.1 riastrad ctx->wwx_acquired++;
606 1.1 riastrad ret = 0;
607 1.1 riastrad out: mutex_exit(&mutex->wwm_lock);
608 1.1 riastrad return ret;
609 1.1 riastrad }
610 1.1 riastrad
611 1.1 riastrad void
612 1.1 riastrad ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
613 1.1 riastrad {
614 1.1 riastrad
615 1.2 riastrad /* Caller must not try to lock against self here. */
616 1.2 riastrad WW_WANTLOCK(mutex);
617 1.1 riastrad ASSERT_SLEEPABLE();
618 1.1 riastrad
619 1.1 riastrad if (ctx == NULL) {
620 1.1 riastrad ww_mutex_lock_noctx(mutex);
621 1.1 riastrad return;
622 1.1 riastrad }
623 1.1 riastrad
624 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
625 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
626 1.1 riastrad KASSERTMSG(!ctx->wwx_acquire_done,
627 1.1 riastrad "ctx %p done acquiring locks, can't acquire more", ctx);
628 1.1 riastrad KASSERTMSG((ctx->wwx_acquired != ~0U),
629 1.1 riastrad "ctx %p finished, can't be used any more", ctx);
630 1.1 riastrad KASSERTMSG((ctx->wwx_acquired == 0),
631 1.1 riastrad "ctx %p still holds %u locks, not allowed in slow path",
632 1.1 riastrad ctx, ctx->wwx_acquired);
633 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
634 1.1 riastrad "ctx %p in class %p, mutex %p in class %p",
635 1.1 riastrad ctx, ctx->wwx_class, mutex, mutex->wwm_class);
636 1.1 riastrad
637 1.1 riastrad mutex_enter(&mutex->wwm_lock);
638 1.1 riastrad retry: switch (mutex->wwm_state) {
639 1.1 riastrad case WW_UNLOCKED:
640 1.1 riastrad mutex->wwm_state = WW_CTX;
641 1.1 riastrad mutex->wwm_u.ctx = ctx;
642 1.1 riastrad goto locked;
643 1.1 riastrad case WW_OWNED:
644 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
645 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
646 1.1 riastrad ww_mutex_state_wait(mutex, WW_OWNED);
647 1.1 riastrad goto retry;
648 1.1 riastrad case WW_CTX:
649 1.1 riastrad break;
650 1.1 riastrad case WW_WANTOWN:
651 1.1 riastrad ww_mutex_state_wait(mutex, WW_WANTOWN);
652 1.1 riastrad goto retry;
653 1.1 riastrad default:
654 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
655 1.1 riastrad mutex, (int)mutex->wwm_state);
656 1.1 riastrad }
657 1.2 riastrad
658 1.1 riastrad KASSERT(mutex->wwm_state == WW_CTX);
659 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
660 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
661 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
662 1.2 riastrad
663 1.1 riastrad /*
664 1.1 riastrad * Owned by another party, of any priority. Ask that party to
665 1.1 riastrad * wake us when it's done.
666 1.1 riastrad */
667 1.1 riastrad ww_mutex_lock_wait(mutex, ctx);
668 1.2 riastrad
669 1.1 riastrad locked: KASSERT((mutex->wwm_state == WW_CTX) ||
670 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
671 1.1 riastrad KASSERT(mutex->wwm_u.ctx == ctx);
672 1.3 riastrad WW_LOCKED(mutex);
673 1.1 riastrad ctx->wwx_acquired++;
674 1.1 riastrad mutex_exit(&mutex->wwm_lock);
675 1.1 riastrad }
676 1.1 riastrad
677 1.1 riastrad int
678 1.1 riastrad ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
679 1.1 riastrad struct ww_acquire_ctx *ctx)
680 1.1 riastrad {
681 1.1 riastrad int ret;
682 1.1 riastrad
683 1.2 riastrad WW_WANTLOCK(mutex);
684 1.1 riastrad ASSERT_SLEEPABLE();
685 1.1 riastrad
686 1.1 riastrad if (ctx == NULL)
687 1.1 riastrad return ww_mutex_lock_noctx_sig(mutex);
688 1.1 riastrad
689 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
690 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
691 1.1 riastrad KASSERTMSG(!ctx->wwx_acquire_done,
692 1.1 riastrad "ctx %p done acquiring locks, can't acquire more", ctx);
693 1.1 riastrad KASSERTMSG((ctx->wwx_acquired != ~0U),
694 1.1 riastrad "ctx %p finished, can't be used any more", ctx);
695 1.1 riastrad KASSERTMSG((ctx->wwx_acquired == 0),
696 1.1 riastrad "ctx %p still holds %u locks, not allowed in slow path",
697 1.1 riastrad ctx, ctx->wwx_acquired);
698 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
699 1.1 riastrad "ctx %p in class %p, mutex %p in class %p",
700 1.1 riastrad ctx, ctx->wwx_class, mutex, mutex->wwm_class);
701 1.1 riastrad
702 1.1 riastrad mutex_enter(&mutex->wwm_lock);
703 1.1 riastrad retry: switch (mutex->wwm_state) {
704 1.1 riastrad case WW_UNLOCKED:
705 1.1 riastrad mutex->wwm_state = WW_CTX;
706 1.1 riastrad mutex->wwm_u.ctx = ctx;
707 1.1 riastrad goto locked;
708 1.1 riastrad case WW_OWNED:
709 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
710 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
711 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
712 1.1 riastrad if (ret)
713 1.1 riastrad goto out;
714 1.1 riastrad goto retry;
715 1.1 riastrad case WW_CTX:
716 1.1 riastrad break;
717 1.1 riastrad case WW_WANTOWN:
718 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
719 1.1 riastrad if (ret)
720 1.1 riastrad goto out;
721 1.1 riastrad goto retry;
722 1.1 riastrad default:
723 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
724 1.1 riastrad mutex, (int)mutex->wwm_state);
725 1.1 riastrad }
726 1.2 riastrad
727 1.1 riastrad KASSERT(mutex->wwm_state == WW_CTX);
728 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
729 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
730 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
731 1.2 riastrad
732 1.1 riastrad /*
733 1.1 riastrad * Owned by another party, of any priority. Ask that party to
734 1.1 riastrad * wake us when it's done.
735 1.1 riastrad */
736 1.1 riastrad ret = ww_mutex_lock_wait_sig(mutex, ctx);
737 1.1 riastrad if (ret)
738 1.1 riastrad goto out;
739 1.2 riastrad
740 1.1 riastrad locked: KASSERT((mutex->wwm_state == WW_CTX) ||
741 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
742 1.1 riastrad KASSERT(mutex->wwm_u.ctx == ctx);
743 1.3 riastrad WW_LOCKED(mutex);
744 1.1 riastrad ctx->wwx_acquired++;
745 1.1 riastrad ret = 0;
746 1.1 riastrad out: mutex_exit(&mutex->wwm_lock);
747 1.1 riastrad return ret;
748 1.1 riastrad }
749 1.1 riastrad
750 1.1 riastrad int
751 1.1 riastrad ww_mutex_trylock(struct ww_mutex *mutex)
752 1.1 riastrad {
753 1.1 riastrad int ret;
754 1.1 riastrad
755 1.1 riastrad mutex_enter(&mutex->wwm_lock);
756 1.1 riastrad if (mutex->wwm_state == WW_UNLOCKED) {
757 1.1 riastrad mutex->wwm_state = WW_OWNED;
758 1.1 riastrad mutex->wwm_u.owner = curlwp;
759 1.2 riastrad WW_WANTLOCK(mutex);
760 1.2 riastrad WW_LOCKED(mutex);
761 1.1 riastrad ret = 1;
762 1.1 riastrad } else {
763 1.1 riastrad KASSERTMSG(((mutex->wwm_state != WW_OWNED) ||
764 1.1 riastrad (mutex->wwm_u.owner != curlwp)),
765 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
766 1.1 riastrad KASSERTMSG(((mutex->wwm_state != WW_CTX) ||
767 1.1 riastrad (mutex->wwm_u.ctx->wwx_owner != curlwp)),
768 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
769 1.1 riastrad KASSERTMSG(((mutex->wwm_state != WW_WANTOWN) ||
770 1.1 riastrad (mutex->wwm_u.ctx->wwx_owner != curlwp)),
771 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
772 1.1 riastrad ret = 0;
773 1.1 riastrad }
774 1.1 riastrad mutex_exit(&mutex->wwm_lock);
775 1.1 riastrad
776 1.1 riastrad return ret;
777 1.1 riastrad }
778 1.1 riastrad
779 1.1 riastrad static void
780 1.1 riastrad ww_mutex_unlock_release(struct ww_mutex *mutex)
781 1.1 riastrad {
782 1.1 riastrad
783 1.1 riastrad KASSERT(mutex_owned(&mutex->wwm_lock));
784 1.1 riastrad KASSERT((mutex->wwm_state == WW_CTX) ||
785 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
786 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
787 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner == curlwp),
788 1.1 riastrad "ww_mutex %p ctx %p held by %p, not by self (%p)",
789 1.1 riastrad mutex, mutex->wwm_u.ctx, mutex->wwm_u.ctx->wwx_owner,
790 1.1 riastrad curlwp);
791 1.1 riastrad KASSERT(mutex->wwm_u.ctx->wwx_acquired != ~0U);
792 1.1 riastrad mutex->wwm_u.ctx->wwx_acquired--;
793 1.1 riastrad mutex->wwm_u.ctx = NULL;
794 1.1 riastrad }
795 1.1 riastrad
796 1.1 riastrad void
797 1.1 riastrad ww_mutex_unlock(struct ww_mutex *mutex)
798 1.1 riastrad {
799 1.1 riastrad struct ww_acquire_ctx *ctx;
800 1.1 riastrad
801 1.1 riastrad mutex_enter(&mutex->wwm_lock);
802 1.1 riastrad KASSERT(mutex->wwm_state != WW_UNLOCKED);
803 1.1 riastrad switch (mutex->wwm_state) {
804 1.1 riastrad case WW_UNLOCKED:
805 1.1 riastrad panic("unlocking unlocked wait/wound mutex: %p", mutex);
806 1.1 riastrad case WW_OWNED:
807 1.1 riastrad /* Let the context lockers fight over it. */
808 1.1 riastrad mutex->wwm_u.owner = NULL;
809 1.1 riastrad mutex->wwm_state = WW_UNLOCKED;
810 1.1 riastrad break;
811 1.1 riastrad case WW_CTX:
812 1.1 riastrad ww_mutex_unlock_release(mutex);
813 1.1 riastrad /*
814 1.1 riastrad * If there are any waiters with contexts, grant the
815 1.1 riastrad * lock to the highest-priority one. Otherwise, just
816 1.1 riastrad * unlock it.
817 1.1 riastrad */
818 1.1 riastrad if ((ctx = RB_TREE_MIN(&mutex->wwm_waiters)) != NULL) {
819 1.1 riastrad mutex->wwm_state = WW_CTX;
820 1.1 riastrad mutex->wwm_u.ctx = ctx;
821 1.1 riastrad } else {
822 1.1 riastrad mutex->wwm_state = WW_UNLOCKED;
823 1.1 riastrad }
824 1.1 riastrad break;
825 1.1 riastrad case WW_WANTOWN:
826 1.1 riastrad ww_mutex_unlock_release(mutex);
827 1.1 riastrad /* Let the non-context lockers fight over it. */
828 1.1 riastrad mutex->wwm_state = WW_UNLOCKED;
829 1.1 riastrad break;
830 1.1 riastrad }
831 1.2 riastrad WW_UNLOCKED(mutex);
832 1.1 riastrad cv_broadcast(&mutex->wwm_cv);
833 1.1 riastrad mutex_exit(&mutex->wwm_lock);
834 1.1 riastrad }
835 1.8 riastrad
836 1.8 riastrad struct ww_acquire_ctx *
837 1.8 riastrad ww_mutex_locking_ctx(struct ww_mutex *mutex)
838 1.8 riastrad {
839 1.8 riastrad struct ww_acquire_ctx *ctx;
840 1.8 riastrad
841 1.8 riastrad mutex_enter(&mutex->wwm_lock);
842 1.8 riastrad switch (mutex->wwm_state) {
843 1.8 riastrad case WW_UNLOCKED:
844 1.8 riastrad case WW_OWNED:
845 1.8 riastrad ctx = NULL;
846 1.8 riastrad break;
847 1.8 riastrad case WW_CTX:
848 1.8 riastrad case WW_WANTOWN:
849 1.8 riastrad ctx = mutex->wwm_u.ctx;
850 1.8 riastrad break;
851 1.8 riastrad default:
852 1.8 riastrad panic("wait/wound mutex %p in bad state: %d",
853 1.8 riastrad mutex, (int)mutex->wwm_state);
854 1.8 riastrad }
855 1.8 riastrad mutex_exit(&mutex->wwm_lock);
856 1.8 riastrad
857 1.8 riastrad return ctx;
858 1.8 riastrad }
859