linux_ww_mutex.c revision 1.4 1 /* $NetBSD: linux_ww_mutex.c,v 1.4 2017/09/16 23:56:42 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.4 2017/09/16 23:56:42 christos Exp $");
34
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/condvar.h>
38 #include <sys/lockdebug.h>
39 #include <sys/lwp.h>
40 #include <sys/mutex.h>
41 #include <sys/rbtree.h>
42
43 #include <linux/ww_mutex.h>
44
45 #define WW_WANTLOCK(WW) \
46 LOCKDEBUG_WANTLOCK((WW)->wwm_debug, (WW), \
47 (uintptr_t)__builtin_return_address(0), 0)
48 #define WW_LOCKED(WW) \
49 LOCKDEBUG_LOCKED((WW)->wwm_debug, (WW), NULL, \
50 (uintptr_t)__builtin_return_address(0), 0)
51 #define WW_UNLOCKED(WW) \
52 LOCKDEBUG_UNLOCKED((WW)->wwm_debug, (WW), \
53 (uintptr_t)__builtin_return_address(0), 0)
54
55 static int
56 ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb)
57 {
58 const struct ww_acquire_ctx *const ctx_a = va;
59 const struct ww_acquire_ctx *const ctx_b = vb;
60
61 if (ctx_a->wwx_ticket < ctx_b->wwx_ticket)
62 return -1;
63 if (ctx_a->wwx_ticket > ctx_b->wwx_ticket)
64 return -1;
65 return 0;
66 }
67
68 static int
69 ww_acquire_ctx_compare_key(void *cookie __unused, const void *vn,
70 const void *vk)
71 {
72 const struct ww_acquire_ctx *const ctx = vn;
73 const uint64_t *const ticketp = vk, ticket = *ticketp;
74
75 if (ctx->wwx_ticket < ticket)
76 return -1;
77 if (ctx->wwx_ticket > ticket)
78 return -1;
79 return 0;
80 }
81
82 static const rb_tree_ops_t ww_acquire_ctx_rb_ops = {
83 .rbto_compare_nodes = &ww_acquire_ctx_compare,
84 .rbto_compare_key = &ww_acquire_ctx_compare_key,
85 .rbto_node_offset = offsetof(struct ww_acquire_ctx, wwx_rb_node),
86 .rbto_context = NULL,
87 };
88
89 void
90 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *class)
91 {
92
93 ctx->wwx_class = class;
94 ctx->wwx_owner = curlwp;
95 ctx->wwx_ticket = atomic_inc_64_nv(&class->wwc_ticket);
96 ctx->wwx_acquired = 0;
97 ctx->wwx_acquire_done = false;
98 }
99
100 void
101 ww_acquire_done(struct ww_acquire_ctx *ctx)
102 {
103
104 KASSERTMSG((ctx->wwx_owner == curlwp),
105 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
106
107 ctx->wwx_acquire_done = true;
108 }
109
110 void
111 ww_acquire_fini(struct ww_acquire_ctx *ctx)
112 {
113
114 KASSERTMSG((ctx->wwx_owner == curlwp),
115 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
116 KASSERTMSG((ctx->wwx_acquired == 0), "ctx %p still holds %u locks",
117 ctx, ctx->wwx_acquired);
118
119 ctx->wwx_acquired = ~0U; /* Fail if called again. */
120 ctx->wwx_owner = NULL;
121 }
122
123 #ifdef LOCKDEBUG
124 static void
125 ww_dump(const volatile void *cookie)
126 {
127 const volatile struct ww_mutex *mutex = cookie;
128
129 printf_nolog("%-13s: ", "state");
130 switch (mutex->wwm_state) {
131 case WW_UNLOCKED:
132 printf_nolog("unlocked\n");
133 break;
134 case WW_OWNED:
135 printf_nolog("owned by lwp\n");
136 printf_nolog("%-13s: %p\n", "owner", mutex->wwm_u.owner);
137 printf_nolog("%-13s: %s\n", "waiters",
138 cv_has_waiters((void *)(intptr_t)&mutex->wwm_cv)
139 ? "yes" : "no");
140 break;
141 case WW_CTX:
142 printf_nolog("owned via ctx\n");
143 printf_nolog("%-13s: %p\n", "context", mutex->wwm_u.ctx);
144 printf_nolog("%-13s: %p\n", "lwp",
145 mutex->wwm_u.ctx->wwx_owner);
146 printf_nolog("%-13s: %s\n", "waiters",
147 cv_has_waiters((void *)(intptr_t)&mutex->wwm_cv)
148 ? "yes" : "no");
149 break;
150 case WW_WANTOWN:
151 printf_nolog("owned via ctx\n");
152 printf_nolog("%-13s: %p\n", "context", mutex->wwm_u.ctx);
153 printf_nolog("%-13s: %p\n", "lwp",
154 mutex->wwm_u.ctx->wwx_owner);
155 printf_nolog("%-13s: %s\n", "waiters", "yes (noctx)");
156 break;
157 default:
158 printf_nolog("unknown\n");
159 break;
160 }
161 }
162
163 static lockops_t ww_lockops = {
164 .lo_name = "Wait/wound mutex",
165 .lo_type = LOCKOPS_SLEEP,
166 .lo_dump = ww_dump,
167 };
168 #endif
169
170 void
171 ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
172 {
173
174 /*
175 * XXX Apparently Linux takes these with spin locks held. That
176 * strikes me as a bad idea, but so it is...
177 */
178 mutex_init(&mutex->wwm_lock, MUTEX_DEFAULT, IPL_VM);
179 mutex->wwm_state = WW_UNLOCKED;
180 mutex->wwm_class = class;
181 rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
182 cv_init(&mutex->wwm_cv, "linuxwwm");
183 #ifdef LOCKDEBUG
184 mutex->wwm_debug = LOCKDEBUG_ALLOC(mutex, &ww_lockops,
185 (uintptr_t)__builtin_return_address(0));
186 #endif
187 }
188
189 void
190 ww_mutex_destroy(struct ww_mutex *mutex)
191 {
192
193 KASSERT(mutex->wwm_state == WW_UNLOCKED);
194
195 #ifdef LOCKDEBUG
196 LOCKDEBUG_FREE(mutex->wwm_debug, mutex);
197 #endif
198 cv_destroy(&mutex->wwm_cv);
199 #if 0
200 rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
201 #endif
202 KASSERT(mutex->wwm_state == WW_UNLOCKED);
203 mutex_destroy(&mutex->wwm_lock);
204 }
205
206 /*
207 * XXX WARNING: This returns true if it is locked by ANYONE. Does not
208 * mean `Do I hold this lock?' (answering which really requires an
209 * acquire context).
210 */
211 bool
212 ww_mutex_is_locked(struct ww_mutex *mutex)
213 {
214 int locked;
215
216 mutex_enter(&mutex->wwm_lock);
217 switch (mutex->wwm_state) {
218 case WW_UNLOCKED:
219 locked = false;
220 break;
221 case WW_OWNED:
222 case WW_CTX:
223 case WW_WANTOWN:
224 locked = true;
225 break;
226 default:
227 panic("wait/wound mutex %p in bad state: %d", mutex,
228 (int)mutex->wwm_state);
229 }
230 mutex_exit(&mutex->wwm_lock);
231
232 return locked;
233 }
234
235 static void
236 ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
237 {
238
239 KASSERT(mutex->wwm_state == state);
240 do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
241 while (mutex->wwm_state == state);
242 }
243
244 static int
245 ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
246 {
247 int ret;
248
249 KASSERT(mutex->wwm_state == state);
250 do {
251 /* XXX errno NetBSD->Linux */
252 ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
253 if (ret)
254 break;
255 } while (mutex->wwm_state == state);
256
257 return ret;
258 }
259
260 static void
261 ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
262 {
263 struct ww_acquire_ctx *collision __diagused;
264
265 KASSERT(mutex_owned(&mutex->wwm_lock));
266
267 KASSERT((mutex->wwm_state == WW_CTX) ||
268 (mutex->wwm_state == WW_WANTOWN));
269 KASSERT(mutex->wwm_u.ctx != ctx);
270 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
271 "ww mutex class mismatch: %p != %p",
272 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
273 KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
274 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
275 ctx->wwx_ticket, ctx,
276 mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
277
278 collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
279 KASSERTMSG((collision == ctx),
280 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
281 ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
282
283 do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
284 while (!(((mutex->wwm_state == WW_CTX) ||
285 (mutex->wwm_state == WW_WANTOWN)) &&
286 (mutex->wwm_u.ctx == ctx)));
287
288 rb_tree_remove_node(&mutex->wwm_waiters, ctx);
289 }
290
291 static int
292 ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
293 {
294 struct ww_acquire_ctx *collision __diagused;
295 int ret;
296
297 KASSERT(mutex_owned(&mutex->wwm_lock));
298
299 KASSERT((mutex->wwm_state == WW_CTX) ||
300 (mutex->wwm_state == WW_WANTOWN));
301 KASSERT(mutex->wwm_u.ctx != ctx);
302 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
303 "ww mutex class mismatch: %p != %p",
304 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
305 KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
306 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
307 ctx->wwx_ticket, ctx,
308 mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
309
310 collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
311 KASSERTMSG((collision == ctx),
312 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
313 ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
314
315 do {
316 /* XXX errno NetBSD->Linux */
317 ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
318 if (ret)
319 goto out;
320 } while (!(((mutex->wwm_state == WW_CTX) ||
321 (mutex->wwm_state == WW_WANTOWN)) &&
322 (mutex->wwm_u.ctx == ctx)));
323
324 out: rb_tree_remove_node(&mutex->wwm_waiters, ctx);
325 return ret;
326 }
327
328 static void
329 ww_mutex_lock_noctx(struct ww_mutex *mutex)
330 {
331
332 mutex_enter(&mutex->wwm_lock);
333 retry: switch (mutex->wwm_state) {
334 case WW_UNLOCKED:
335 mutex->wwm_state = WW_OWNED;
336 mutex->wwm_u.owner = curlwp;
337 break;
338 case WW_OWNED:
339 KASSERTMSG((mutex->wwm_u.owner != curlwp),
340 "locking %p against myself: %p", mutex, curlwp);
341 ww_mutex_state_wait(mutex, WW_OWNED);
342 goto retry;
343 case WW_CTX:
344 KASSERT(mutex->wwm_u.ctx != NULL);
345 mutex->wwm_state = WW_WANTOWN;
346 /* FALLTHROUGH */
347 case WW_WANTOWN:
348 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
349 "locking %p against myself: %p", mutex, curlwp);
350 ww_mutex_state_wait(mutex, WW_WANTOWN);
351 goto retry;
352 default:
353 panic("wait/wound mutex %p in bad state: %d",
354 mutex, (int)mutex->wwm_state);
355 }
356 KASSERT(mutex->wwm_state == WW_OWNED);
357 KASSERT(mutex->wwm_u.owner == curlwp);
358 WW_LOCKED(mutex);
359 mutex_exit(&mutex->wwm_lock);
360 }
361
362 static int
363 ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
364 {
365 int ret;
366
367 mutex_enter(&mutex->wwm_lock);
368 retry: switch (mutex->wwm_state) {
369 case WW_UNLOCKED:
370 mutex->wwm_state = WW_OWNED;
371 mutex->wwm_u.owner = curlwp;
372 break;
373 case WW_OWNED:
374 KASSERTMSG((mutex->wwm_u.owner != curlwp),
375 "locking %p against myself: %p", mutex, curlwp);
376 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
377 if (ret)
378 goto out;
379 goto retry;
380 case WW_CTX:
381 KASSERT(mutex->wwm_u.ctx != NULL);
382 mutex->wwm_state = WW_WANTOWN;
383 /* FALLTHROUGH */
384 case WW_WANTOWN:
385 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
386 "locking %p against myself: %p", mutex, curlwp);
387 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
388 if (ret)
389 goto out;
390 goto retry;
391 default:
392 panic("wait/wound mutex %p in bad state: %d",
393 mutex, (int)mutex->wwm_state);
394 }
395 KASSERT(mutex->wwm_state == WW_OWNED);
396 KASSERT(mutex->wwm_u.owner == curlwp);
397 WW_LOCKED(mutex);
398 ret = 0;
399 out: mutex_exit(&mutex->wwm_lock);
400 return ret;
401 }
402
403 int
404 ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
405 {
406
407 /*
408 * We do not WW_WANTLOCK at the beginning because we may
409 * correctly already hold it, if we have a context, in which
410 * case we must return EALREADY to the caller.
411 */
412 ASSERT_SLEEPABLE();
413
414 if (ctx == NULL) {
415 WW_WANTLOCK(mutex);
416 ww_mutex_lock_noctx(mutex);
417 return 0;
418 }
419
420 KASSERTMSG((ctx->wwx_owner == curlwp),
421 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
422 KASSERTMSG(!ctx->wwx_acquire_done,
423 "ctx %p done acquiring locks, can't acquire more", ctx);
424 KASSERTMSG((ctx->wwx_acquired != ~0U),
425 "ctx %p finished, can't be used any more", ctx);
426 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
427 "ctx %p in class %p, mutex %p in class %p",
428 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
429
430 mutex_enter(&mutex->wwm_lock);
431 retry: switch (mutex->wwm_state) {
432 case WW_UNLOCKED:
433 WW_WANTLOCK(mutex);
434 mutex->wwm_state = WW_CTX;
435 mutex->wwm_u.ctx = ctx;
436 goto locked;
437 case WW_OWNED:
438 WW_WANTLOCK(mutex);
439 KASSERTMSG((mutex->wwm_u.owner != curlwp),
440 "locking %p against myself: %p", mutex, curlwp);
441 ww_mutex_state_wait(mutex, WW_OWNED);
442 goto retry;
443 case WW_CTX:
444 break;
445 case WW_WANTOWN:
446 ww_mutex_state_wait(mutex, WW_WANTOWN);
447 goto retry;
448 default:
449 panic("wait/wound mutex %p in bad state: %d",
450 mutex, (int)mutex->wwm_state);
451 }
452
453 KASSERT(mutex->wwm_state == WW_CTX);
454 KASSERT(mutex->wwm_u.ctx != NULL);
455 KASSERT((mutex->wwm_u.ctx == ctx) ||
456 (mutex->wwm_u.ctx->wwx_owner != curlwp));
457
458 if (mutex->wwm_u.ctx == ctx) {
459 /*
460 * We already own it. Yes, this can happen correctly
461 * for objects whose locking order is determined by
462 * userland.
463 */
464 mutex_exit(&mutex->wwm_lock);
465 return -EALREADY;
466 }
467
468 /*
469 * We do not own it. We can safely assert to LOCKDEBUG that we
470 * want it.
471 */
472 WW_WANTLOCK(mutex);
473
474 if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
475 /*
476 * Owned by a higher-priority party. Tell the caller
477 * to unlock everything and start over.
478 */
479 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
480 "ww mutex class mismatch: %p != %p",
481 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
482 mutex_exit(&mutex->wwm_lock);
483 return -EDEADLK;
484 }
485
486 /*
487 * Owned by a lower-priority party. Ask that party to wake us
488 * when it is done or it realizes it needs to back off.
489 */
490 ww_mutex_lock_wait(mutex, ctx);
491
492 locked: KASSERT((mutex->wwm_state == WW_CTX) ||
493 (mutex->wwm_state == WW_WANTOWN));
494 KASSERT(mutex->wwm_u.ctx == ctx);
495 WW_LOCKED(mutex);
496 ctx->wwx_acquired++;
497 mutex_exit(&mutex->wwm_lock);
498 return 0;
499 }
500
501 int
502 ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
503 {
504 int ret;
505
506 /*
507 * We do not WW_WANTLOCK at the beginning because we may
508 * correctly already hold it, if we have a context, in which
509 * case we must return EALREADY to the caller.
510 */
511 ASSERT_SLEEPABLE();
512
513 if (ctx == NULL) {
514 WW_WANTLOCK(mutex);
515 return ww_mutex_lock_noctx_sig(mutex);
516 }
517
518 KASSERTMSG((ctx->wwx_owner == curlwp),
519 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
520 KASSERTMSG(!ctx->wwx_acquire_done,
521 "ctx %p done acquiring locks, can't acquire more", ctx);
522 KASSERTMSG((ctx->wwx_acquired != ~0U),
523 "ctx %p finished, can't be used any more", ctx);
524 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
525 "ctx %p in class %p, mutex %p in class %p",
526 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
527
528 mutex_enter(&mutex->wwm_lock);
529 retry: switch (mutex->wwm_state) {
530 case WW_UNLOCKED:
531 WW_WANTLOCK(mutex);
532 mutex->wwm_state = WW_CTX;
533 mutex->wwm_u.ctx = ctx;
534 goto locked;
535 case WW_OWNED:
536 WW_WANTLOCK(mutex);
537 KASSERTMSG((mutex->wwm_u.owner != curlwp),
538 "locking %p against myself: %p", mutex, curlwp);
539 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
540 if (ret)
541 goto out;
542 goto retry;
543 case WW_CTX:
544 break;
545 case WW_WANTOWN:
546 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
547 if (ret)
548 goto out;
549 goto retry;
550 default:
551 panic("wait/wound mutex %p in bad state: %d",
552 mutex, (int)mutex->wwm_state);
553 }
554
555 KASSERT(mutex->wwm_state == WW_CTX);
556 KASSERT(mutex->wwm_u.ctx != NULL);
557 KASSERT((mutex->wwm_u.ctx == ctx) ||
558 (mutex->wwm_u.ctx->wwx_owner != curlwp));
559
560 if (mutex->wwm_u.ctx == ctx) {
561 /*
562 * We already own it. Yes, this can happen correctly
563 * for objects whose locking order is determined by
564 * userland.
565 */
566 mutex_exit(&mutex->wwm_lock);
567 return -EALREADY;
568 }
569
570 /*
571 * We do not own it. We can safely assert to LOCKDEBUG that we
572 * want it.
573 */
574 WW_WANTLOCK(mutex);
575
576 if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
577 /*
578 * Owned by a higher-priority party. Tell the caller
579 * to unlock everything and start over.
580 */
581 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
582 "ww mutex class mismatch: %p != %p",
583 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
584 mutex_exit(&mutex->wwm_lock);
585 return -EDEADLK;
586 }
587
588 /*
589 * Owned by a lower-priority party. Ask that party to wake us
590 * when it is done or it realizes it needs to back off.
591 */
592 ret = ww_mutex_lock_wait_sig(mutex, ctx);
593 if (ret)
594 goto out;
595
596 locked: KASSERT((mutex->wwm_state == WW_CTX) ||
597 (mutex->wwm_state == WW_WANTOWN));
598 KASSERT(mutex->wwm_u.ctx == ctx);
599 WW_LOCKED(mutex);
600 ctx->wwx_acquired++;
601 ret = 0;
602 out: mutex_exit(&mutex->wwm_lock);
603 return ret;
604 }
605
606 void
607 ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
608 {
609
610 /* Caller must not try to lock against self here. */
611 WW_WANTLOCK(mutex);
612 ASSERT_SLEEPABLE();
613
614 if (ctx == NULL) {
615 ww_mutex_lock_noctx(mutex);
616 return;
617 }
618
619 KASSERTMSG((ctx->wwx_owner == curlwp),
620 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
621 KASSERTMSG(!ctx->wwx_acquire_done,
622 "ctx %p done acquiring locks, can't acquire more", ctx);
623 KASSERTMSG((ctx->wwx_acquired != ~0U),
624 "ctx %p finished, can't be used any more", ctx);
625 KASSERTMSG((ctx->wwx_acquired == 0),
626 "ctx %p still holds %u locks, not allowed in slow path",
627 ctx, ctx->wwx_acquired);
628 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
629 "ctx %p in class %p, mutex %p in class %p",
630 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
631
632 mutex_enter(&mutex->wwm_lock);
633 retry: switch (mutex->wwm_state) {
634 case WW_UNLOCKED:
635 mutex->wwm_state = WW_CTX;
636 mutex->wwm_u.ctx = ctx;
637 goto locked;
638 case WW_OWNED:
639 KASSERTMSG((mutex->wwm_u.owner != curlwp),
640 "locking %p against myself: %p", mutex, curlwp);
641 ww_mutex_state_wait(mutex, WW_OWNED);
642 goto retry;
643 case WW_CTX:
644 break;
645 case WW_WANTOWN:
646 ww_mutex_state_wait(mutex, WW_WANTOWN);
647 goto retry;
648 default:
649 panic("wait/wound mutex %p in bad state: %d",
650 mutex, (int)mutex->wwm_state);
651 }
652
653 KASSERT(mutex->wwm_state == WW_CTX);
654 KASSERT(mutex->wwm_u.ctx != NULL);
655 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
656 "locking %p against myself: %p", mutex, curlwp);
657
658 /*
659 * Owned by another party, of any priority. Ask that party to
660 * wake us when it's done.
661 */
662 ww_mutex_lock_wait(mutex, ctx);
663
664 locked: KASSERT((mutex->wwm_state == WW_CTX) ||
665 (mutex->wwm_state == WW_WANTOWN));
666 KASSERT(mutex->wwm_u.ctx == ctx);
667 WW_LOCKED(mutex);
668 ctx->wwx_acquired++;
669 mutex_exit(&mutex->wwm_lock);
670 }
671
672 int
673 ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
674 struct ww_acquire_ctx *ctx)
675 {
676 int ret;
677
678 WW_WANTLOCK(mutex);
679 ASSERT_SLEEPABLE();
680
681 if (ctx == NULL)
682 return ww_mutex_lock_noctx_sig(mutex);
683
684 KASSERTMSG((ctx->wwx_owner == curlwp),
685 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
686 KASSERTMSG(!ctx->wwx_acquire_done,
687 "ctx %p done acquiring locks, can't acquire more", ctx);
688 KASSERTMSG((ctx->wwx_acquired != ~0U),
689 "ctx %p finished, can't be used any more", ctx);
690 KASSERTMSG((ctx->wwx_acquired == 0),
691 "ctx %p still holds %u locks, not allowed in slow path",
692 ctx, ctx->wwx_acquired);
693 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
694 "ctx %p in class %p, mutex %p in class %p",
695 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
696
697 mutex_enter(&mutex->wwm_lock);
698 retry: switch (mutex->wwm_state) {
699 case WW_UNLOCKED:
700 mutex->wwm_state = WW_CTX;
701 mutex->wwm_u.ctx = ctx;
702 goto locked;
703 case WW_OWNED:
704 KASSERTMSG((mutex->wwm_u.owner != curlwp),
705 "locking %p against myself: %p", mutex, curlwp);
706 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
707 if (ret)
708 goto out;
709 goto retry;
710 case WW_CTX:
711 break;
712 case WW_WANTOWN:
713 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
714 if (ret)
715 goto out;
716 goto retry;
717 default:
718 panic("wait/wound mutex %p in bad state: %d",
719 mutex, (int)mutex->wwm_state);
720 }
721
722 KASSERT(mutex->wwm_state == WW_CTX);
723 KASSERT(mutex->wwm_u.ctx != NULL);
724 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
725 "locking %p against myself: %p", mutex, curlwp);
726
727 /*
728 * Owned by another party, of any priority. Ask that party to
729 * wake us when it's done.
730 */
731 ret = ww_mutex_lock_wait_sig(mutex, ctx);
732 if (ret)
733 goto out;
734
735 locked: KASSERT((mutex->wwm_state == WW_CTX) ||
736 (mutex->wwm_state == WW_WANTOWN));
737 KASSERT(mutex->wwm_u.ctx == ctx);
738 WW_LOCKED(mutex);
739 ctx->wwx_acquired++;
740 ret = 0;
741 out: mutex_exit(&mutex->wwm_lock);
742 return ret;
743 }
744
745 int
746 ww_mutex_trylock(struct ww_mutex *mutex)
747 {
748 int ret;
749
750 mutex_enter(&mutex->wwm_lock);
751 if (mutex->wwm_state == WW_UNLOCKED) {
752 mutex->wwm_state = WW_OWNED;
753 mutex->wwm_u.owner = curlwp;
754 WW_WANTLOCK(mutex);
755 WW_LOCKED(mutex);
756 ret = 1;
757 } else {
758 KASSERTMSG(((mutex->wwm_state != WW_OWNED) ||
759 (mutex->wwm_u.owner != curlwp)),
760 "locking %p against myself: %p", mutex, curlwp);
761 KASSERTMSG(((mutex->wwm_state != WW_CTX) ||
762 (mutex->wwm_u.ctx->wwx_owner != curlwp)),
763 "locking %p against myself: %p", mutex, curlwp);
764 KASSERTMSG(((mutex->wwm_state != WW_WANTOWN) ||
765 (mutex->wwm_u.ctx->wwx_owner != curlwp)),
766 "locking %p against myself: %p", mutex, curlwp);
767 ret = 0;
768 }
769 mutex_exit(&mutex->wwm_lock);
770
771 return ret;
772 }
773
774 static void
775 ww_mutex_unlock_release(struct ww_mutex *mutex)
776 {
777
778 KASSERT(mutex_owned(&mutex->wwm_lock));
779 KASSERT((mutex->wwm_state == WW_CTX) ||
780 (mutex->wwm_state == WW_WANTOWN));
781 KASSERT(mutex->wwm_u.ctx != NULL);
782 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner == curlwp),
783 "ww_mutex %p ctx %p held by %p, not by self (%p)",
784 mutex, mutex->wwm_u.ctx, mutex->wwm_u.ctx->wwx_owner,
785 curlwp);
786 KASSERT(mutex->wwm_u.ctx->wwx_acquired != ~0U);
787 mutex->wwm_u.ctx->wwx_acquired--;
788 mutex->wwm_u.ctx = NULL;
789 }
790
791 void
792 ww_mutex_unlock(struct ww_mutex *mutex)
793 {
794 struct ww_acquire_ctx *ctx;
795
796 mutex_enter(&mutex->wwm_lock);
797 KASSERT(mutex->wwm_state != WW_UNLOCKED);
798 switch (mutex->wwm_state) {
799 case WW_UNLOCKED:
800 panic("unlocking unlocked wait/wound mutex: %p", mutex);
801 case WW_OWNED:
802 /* Let the context lockers fight over it. */
803 mutex->wwm_u.owner = NULL;
804 mutex->wwm_state = WW_UNLOCKED;
805 break;
806 case WW_CTX:
807 ww_mutex_unlock_release(mutex);
808 /*
809 * If there are any waiters with contexts, grant the
810 * lock to the highest-priority one. Otherwise, just
811 * unlock it.
812 */
813 if ((ctx = RB_TREE_MIN(&mutex->wwm_waiters)) != NULL) {
814 mutex->wwm_state = WW_CTX;
815 mutex->wwm_u.ctx = ctx;
816 } else {
817 mutex->wwm_state = WW_UNLOCKED;
818 }
819 break;
820 case WW_WANTOWN:
821 ww_mutex_unlock_release(mutex);
822 /* Let the non-context lockers fight over it. */
823 mutex->wwm_state = WW_UNLOCKED;
824 break;
825 }
826 WW_UNLOCKED(mutex);
827 cv_broadcast(&mutex->wwm_cv);
828 mutex_exit(&mutex->wwm_lock);
829 }
830