linux_ww_mutex.c revision 1.11 1 /* $NetBSD: linux_ww_mutex.c,v 1.11 2021/12/24 15:22:20 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.11 2021/12/24 15:22:20 riastradh Exp $");
34
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/condvar.h>
38 #include <sys/lockdebug.h>
39 #include <sys/lwp.h>
40 #include <sys/mutex.h>
41 #include <sys/rbtree.h>
42
43 #include <linux/ww_mutex.h>
44 #include <linux/errno.h>
45
46 #define WW_WANTLOCK(WW) \
47 LOCKDEBUG_WANTLOCK((WW)->wwm_debug, (WW), \
48 (uintptr_t)__builtin_return_address(0), 0)
49 #define WW_LOCKED(WW) \
50 LOCKDEBUG_LOCKED((WW)->wwm_debug, (WW), NULL, \
51 (uintptr_t)__builtin_return_address(0), 0)
52 #define WW_UNLOCKED(WW) \
53 LOCKDEBUG_UNLOCKED((WW)->wwm_debug, (WW), \
54 (uintptr_t)__builtin_return_address(0), 0)
55
56 static int
57 ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb)
58 {
59 const struct ww_acquire_ctx *const ctx_a = va;
60 const struct ww_acquire_ctx *const ctx_b = vb;
61
62 if (ctx_a->wwx_ticket < ctx_b->wwx_ticket)
63 return -1;
64 if (ctx_a->wwx_ticket > ctx_b->wwx_ticket)
65 return -1;
66 return 0;
67 }
68
69 static int
70 ww_acquire_ctx_compare_key(void *cookie __unused, const void *vn,
71 const void *vk)
72 {
73 const struct ww_acquire_ctx *const ctx = vn;
74 const uint64_t *const ticketp = vk, ticket = *ticketp;
75
76 if (ctx->wwx_ticket < ticket)
77 return -1;
78 if (ctx->wwx_ticket > ticket)
79 return -1;
80 return 0;
81 }
82
83 static const rb_tree_ops_t ww_acquire_ctx_rb_ops = {
84 .rbto_compare_nodes = &ww_acquire_ctx_compare,
85 .rbto_compare_key = &ww_acquire_ctx_compare_key,
86 .rbto_node_offset = offsetof(struct ww_acquire_ctx, wwx_rb_node),
87 .rbto_context = NULL,
88 };
89
90 void
91 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *class)
92 {
93
94 ctx->wwx_class = class;
95 ctx->wwx_owner = curlwp;
96 ctx->wwx_ticket = atomic64_inc_return(&class->wwc_ticket);
97 ctx->wwx_acquired = 0;
98 ctx->wwx_acquire_done = false;
99 }
100
101 void
102 ww_acquire_done(struct ww_acquire_ctx *ctx)
103 {
104
105 KASSERTMSG((ctx->wwx_owner == curlwp),
106 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
107
108 ctx->wwx_acquire_done = true;
109 }
110
111 static void
112 ww_acquire_done_check(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
113 {
114
115 /*
116 * If caller has invoked ww_acquire_done, we must already hold
117 * this mutex.
118 */
119 KASSERT(mutex_owned(&mutex->wwm_lock));
120 KASSERT((!ctx->wwx_acquire_done ||
121 (mutex->wwm_state == WW_CTX && mutex->wwm_u.ctx == ctx)),
122 "ctx %p done acquiring locks, refusing to acquire %p",
123 ctx, mutex);
124 }
125
126 void
127 ww_acquire_fini(struct ww_acquire_ctx *ctx)
128 {
129
130 KASSERTMSG((ctx->wwx_owner == curlwp),
131 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
132 KASSERTMSG((ctx->wwx_acquired == 0), "ctx %p still holds %u locks",
133 ctx, ctx->wwx_acquired);
134
135 ctx->wwx_acquired = ~0U; /* Fail if called again. */
136 ctx->wwx_owner = NULL;
137 }
138
139 #ifdef LOCKDEBUG
140 static void
141 ww_dump(const volatile void *cookie, lockop_printer_t pr)
142 {
143 const volatile struct ww_mutex *mutex = cookie;
144
145 pr("%-13s: ", "state");
146 switch (mutex->wwm_state) {
147 case WW_UNLOCKED:
148 pr("unlocked\n");
149 break;
150 case WW_OWNED:
151 pr("owned by lwp\n");
152 pr("%-13s: %p\n", "owner", mutex->wwm_u.owner);
153 pr("%-13s: %s\n", "waiters",
154 cv_has_waiters((void *)(intptr_t)&mutex->wwm_cv)
155 ? "yes" : "no");
156 break;
157 case WW_CTX:
158 pr("owned via ctx\n");
159 pr("%-13s: %p\n", "context", mutex->wwm_u.ctx);
160 pr("%-13s: %p\n", "lwp",
161 mutex->wwm_u.ctx->wwx_owner);
162 pr("%-13s: %s\n", "waiters",
163 cv_has_waiters((void *)(intptr_t)&mutex->wwm_cv)
164 ? "yes" : "no");
165 break;
166 case WW_WANTOWN:
167 pr("owned via ctx\n");
168 pr("%-13s: %p\n", "context", mutex->wwm_u.ctx);
169 pr("%-13s: %p\n", "lwp",
170 mutex->wwm_u.ctx->wwx_owner);
171 pr("%-13s: %s\n", "waiters", "yes (noctx)");
172 break;
173 default:
174 pr("unknown\n");
175 break;
176 }
177 }
178
179 static lockops_t ww_lockops = {
180 .lo_name = "Wait/wound mutex",
181 .lo_type = LOCKOPS_SLEEP,
182 .lo_dump = ww_dump,
183 };
184 #endif
185
186 /*
187 * ww_mutex_init(mutex, class)
188 *
189 * Initialize mutex in the given class. Must precede any other
190 * ww_mutex_* operations. After done, mutex must be destroyed
191 * with ww_mutex_destroy.
192 */
193 void
194 ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
195 {
196
197 /*
198 * XXX Apparently Linux takes these with spin locks held. That
199 * strikes me as a bad idea, but so it is...
200 */
201 mutex_init(&mutex->wwm_lock, MUTEX_DEFAULT, IPL_VM);
202 mutex->wwm_state = WW_UNLOCKED;
203 mutex->wwm_class = class;
204 rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
205 cv_init(&mutex->wwm_cv, "linuxwwm");
206 #ifdef LOCKDEBUG
207 mutex->wwm_debug = LOCKDEBUG_ALLOC(mutex, &ww_lockops,
208 (uintptr_t)__builtin_return_address(0));
209 #endif
210 }
211
212 /*
213 * ww_mutex_destroy(mutex)
214 *
215 * Destroy mutex initialized by ww_mutex_init. Caller must not be
216 * with any other ww_mutex_* operations except after
217 * reinitializing with ww_mutex_init.
218 */
219 void
220 ww_mutex_destroy(struct ww_mutex *mutex)
221 {
222
223 KASSERT(mutex->wwm_state == WW_UNLOCKED);
224
225 #ifdef LOCKDEBUG
226 LOCKDEBUG_FREE(mutex->wwm_debug, mutex);
227 #endif
228 cv_destroy(&mutex->wwm_cv);
229 #if 0
230 rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
231 #endif
232 KASSERT(mutex->wwm_state == WW_UNLOCKED);
233 mutex_destroy(&mutex->wwm_lock);
234 }
235
236 /*
237 * ww_mutex_is_locked(mutex)
238 *
239 * True if anyone holds mutex locked at the moment, false if not.
240 * Answer is stale as soon returned unless mutex is held by
241 * caller.
242 *
243 * XXX WARNING: This returns true if it is locked by ANYONE. Does
244 * not mean `Do I hold this lock?' (answering which really
245 * requires an acquire context).
246 */
247 bool
248 ww_mutex_is_locked(struct ww_mutex *mutex)
249 {
250 int locked;
251
252 mutex_enter(&mutex->wwm_lock);
253 switch (mutex->wwm_state) {
254 case WW_UNLOCKED:
255 locked = false;
256 break;
257 case WW_OWNED:
258 case WW_CTX:
259 case WW_WANTOWN:
260 locked = true;
261 break;
262 default:
263 panic("wait/wound mutex %p in bad state: %d", mutex,
264 (int)mutex->wwm_state);
265 }
266 mutex_exit(&mutex->wwm_lock);
267
268 return locked;
269 }
270
271 /*
272 * ww_mutex_state_wait(mutex, state)
273 *
274 * Wait for mutex, which must be in the given state, to transition
275 * to another state. Uninterruptible; never fails.
276 *
277 * Caller must hold mutex's internal lock.
278 *
279 * May sleep.
280 *
281 * Internal subroutine.
282 */
283 static void
284 ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
285 {
286
287 KASSERT(mutex_owned(&mutex->wwm_lock));
288 KASSERT(mutex->wwm_state == state);
289 do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
290 while (mutex->wwm_state == state);
291 }
292
293 /*
294 * ww_mutex_state_wait_sig(mutex, state)
295 *
296 * Wait for mutex, which must be in the given state, to transition
297 * to another state, or fail if interrupted by a signal. Return 0
298 * on success, -EINTR if interrupted by a signal.
299 *
300 * Caller must hold mutex's internal lock.
301 *
302 * May sleep.
303 *
304 * Internal subroutine.
305 */
306 static int
307 ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
308 {
309 int ret;
310
311 KASSERT(mutex_owned(&mutex->wwm_lock));
312 KASSERT(mutex->wwm_state == state);
313 do {
314 /* XXX errno NetBSD->Linux */
315 ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
316 if (ret) {
317 KASSERTMSG((ret == -EINTR || ret == -ERESTART),
318 "ret=%d", ret);
319 ret = -EINTR;
320 break;
321 }
322 } while (mutex->wwm_state == state);
323
324 KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
325 return ret;
326 }
327
328 /*
329 * ww_mutex_lock_wait(mutex, ctx)
330 *
331 * With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
332 * by another thread with an acquire context, wait to acquire
333 * mutex. While waiting, record ctx in the tree of waiters. Does
334 * not update the mutex state otherwise.
335 *
336 * Caller must not already hold mutex. Caller must hold mutex's
337 * internal lock. Uninterruptible; never fails.
338 *
339 * May sleep.
340 *
341 * Internal subroutine.
342 */
343 static void
344 ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
345 {
346 struct ww_acquire_ctx *collision __diagused;
347
348 KASSERT(mutex_owned(&mutex->wwm_lock));
349
350 KASSERT((mutex->wwm_state == WW_CTX) ||
351 (mutex->wwm_state == WW_WANTOWN));
352 KASSERT(mutex->wwm_u.ctx != ctx);
353 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
354 "ww mutex class mismatch: %p != %p",
355 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
356 KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
357 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
358 ctx->wwx_ticket, ctx,
359 mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
360
361 collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
362 KASSERTMSG((collision == ctx),
363 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
364 ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
365
366 do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
367 while (!(((mutex->wwm_state == WW_CTX) ||
368 (mutex->wwm_state == WW_WANTOWN)) &&
369 (mutex->wwm_u.ctx == ctx)));
370
371 rb_tree_remove_node(&mutex->wwm_waiters, ctx);
372 }
373
374 /*
375 * ww_mutex_lock_wait_sig(mutex, ctx)
376 *
377 * With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
378 * by another thread with an acquire context, wait to acquire
379 * mutex and return 0, or return -EINTR if interrupted by a
380 * signal. While waiting, record ctx in the tree of waiters.
381 * Does not update the mutex state otherwise.
382 *
383 * Caller must not already hold mutex. Caller must hold mutex's
384 * internal lock.
385 *
386 * May sleep.
387 *
388 * Internal subroutine.
389 */
390 static int
391 ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
392 {
393 struct ww_acquire_ctx *collision __diagused;
394 int ret;
395
396 KASSERT(mutex_owned(&mutex->wwm_lock));
397
398 KASSERT((mutex->wwm_state == WW_CTX) ||
399 (mutex->wwm_state == WW_WANTOWN));
400 KASSERT(mutex->wwm_u.ctx != ctx);
401 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
402 "ww mutex class mismatch: %p != %p",
403 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
404 KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
405 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
406 ctx->wwx_ticket, ctx,
407 mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
408
409 collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
410 KASSERTMSG((collision == ctx),
411 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
412 ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
413
414 do {
415 /* XXX errno NetBSD->Linux */
416 ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
417 if (ret) {
418 KASSERTMSG((ret == -EINTR || ret == -ERESTART),
419 "ret=%d", ret);
420 ret = -EINTR;
421 goto out;
422 }
423 } while (!(((mutex->wwm_state == WW_CTX) ||
424 (mutex->wwm_state == WW_WANTOWN)) &&
425 (mutex->wwm_u.ctx == ctx)));
426
427 out: rb_tree_remove_node(&mutex->wwm_waiters, ctx);
428 KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
429 return ret;
430 }
431
432 /*
433 * ww_mutex_lock_noctx(mutex)
434 *
435 * Acquire mutex without an acquire context. Caller must not
436 * already hold the mutex. Uninterruptible; never fails.
437 *
438 * May sleep.
439 *
440 * Internal subroutine, implementing ww_mutex_lock(..., NULL).
441 */
442 static void
443 ww_mutex_lock_noctx(struct ww_mutex *mutex)
444 {
445
446 mutex_enter(&mutex->wwm_lock);
447 retry: switch (mutex->wwm_state) {
448 case WW_UNLOCKED:
449 mutex->wwm_state = WW_OWNED;
450 mutex->wwm_u.owner = curlwp;
451 break;
452 case WW_OWNED:
453 KASSERTMSG((mutex->wwm_u.owner != curlwp),
454 "locking %p against myself: %p", mutex, curlwp);
455 ww_mutex_state_wait(mutex, WW_OWNED);
456 goto retry;
457 case WW_CTX:
458 KASSERT(mutex->wwm_u.ctx != NULL);
459 mutex->wwm_state = WW_WANTOWN;
460 /* FALLTHROUGH */
461 case WW_WANTOWN:
462 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
463 "locking %p against myself: %p", mutex, curlwp);
464 ww_mutex_state_wait(mutex, WW_WANTOWN);
465 goto retry;
466 default:
467 panic("wait/wound mutex %p in bad state: %d",
468 mutex, (int)mutex->wwm_state);
469 }
470 KASSERT(mutex->wwm_state == WW_OWNED);
471 KASSERT(mutex->wwm_u.owner == curlwp);
472 WW_LOCKED(mutex);
473 mutex_exit(&mutex->wwm_lock);
474 }
475
476 /*
477 * ww_mutex_lock_noctx_sig(mutex)
478 *
479 * Acquire mutex without an acquire context and return 0, or fail
480 * and return -EINTR if interrupted by a signal. Caller must not
481 * already hold the mutex.
482 *
483 * May sleep.
484 *
485 * Internal subroutine, implementing
486 * ww_mutex_lock_interruptible(..., NULL).
487 */
488 static int
489 ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
490 {
491 int ret;
492
493 mutex_enter(&mutex->wwm_lock);
494 retry: switch (mutex->wwm_state) {
495 case WW_UNLOCKED:
496 mutex->wwm_state = WW_OWNED;
497 mutex->wwm_u.owner = curlwp;
498 break;
499 case WW_OWNED:
500 KASSERTMSG((mutex->wwm_u.owner != curlwp),
501 "locking %p against myself: %p", mutex, curlwp);
502 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
503 if (ret) {
504 KASSERTMSG(ret == -EINTR, "ret=%d", ret);
505 goto out;
506 }
507 goto retry;
508 case WW_CTX:
509 KASSERT(mutex->wwm_u.ctx != NULL);
510 mutex->wwm_state = WW_WANTOWN;
511 /* FALLTHROUGH */
512 case WW_WANTOWN:
513 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
514 "locking %p against myself: %p", mutex, curlwp);
515 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
516 if (ret) {
517 KASSERTMSG(ret == -EINTR, "ret=%d", ret);
518 goto out;
519 }
520 goto retry;
521 default:
522 panic("wait/wound mutex %p in bad state: %d",
523 mutex, (int)mutex->wwm_state);
524 }
525 KASSERT(mutex->wwm_state == WW_OWNED);
526 KASSERT(mutex->wwm_u.owner == curlwp);
527 WW_LOCKED(mutex);
528 ret = 0;
529 out: mutex_exit(&mutex->wwm_lock);
530 KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
531 return ret;
532 }
533
534 /*
535 * ww_mutex_lock(mutex, ctx)
536 *
537 * Lock the mutex and return 0, or fail if impossible.
538 *
539 * - If ctx is null, caller must not hold mutex, and ww_mutex_lock
540 * always succeeds and returns 0.
541 *
542 * - If ctx is nonnull, then:
543 * . Fail with -EALREADY if caller already holds mutex.
544 * . Fail with -EDEADLK if someone else holds mutex but there is
545 * a cycle.
546 *
547 * May sleep.
548 */
549 int
550 ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
551 {
552 int ret;
553
554 /*
555 * We do not WW_WANTLOCK at the beginning because we may
556 * correctly already hold it, if we have a context, in which
557 * case we must return EALREADY to the caller.
558 */
559 ASSERT_SLEEPABLE();
560
561 if (ctx == NULL) {
562 WW_WANTLOCK(mutex);
563 ww_mutex_lock_noctx(mutex);
564 ret = 0;
565 goto out;
566 }
567
568 KASSERTMSG((ctx->wwx_owner == curlwp),
569 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
570 KASSERTMSG((ctx->wwx_acquired != ~0U),
571 "ctx %p finished, can't be used any more", ctx);
572 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
573 "ctx %p in class %p, mutex %p in class %p",
574 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
575
576 mutex_enter(&mutex->wwm_lock);
577 ww_acquire_done_check(mutex, ctx);
578 retry: switch (mutex->wwm_state) {
579 case WW_UNLOCKED:
580 WW_WANTLOCK(mutex);
581 mutex->wwm_state = WW_CTX;
582 mutex->wwm_u.ctx = ctx;
583 goto locked;
584 case WW_OWNED:
585 WW_WANTLOCK(mutex);
586 KASSERTMSG((mutex->wwm_u.owner != curlwp),
587 "locking %p against myself: %p", mutex, curlwp);
588 ww_mutex_state_wait(mutex, WW_OWNED);
589 goto retry;
590 case WW_CTX:
591 break;
592 case WW_WANTOWN:
593 ww_mutex_state_wait(mutex, WW_WANTOWN);
594 goto retry;
595 default:
596 panic("wait/wound mutex %p in bad state: %d",
597 mutex, (int)mutex->wwm_state);
598 }
599
600 KASSERT(mutex->wwm_state == WW_CTX);
601 KASSERT(mutex->wwm_u.ctx != NULL);
602 KASSERT((mutex->wwm_u.ctx == ctx) ||
603 (mutex->wwm_u.ctx->wwx_owner != curlwp));
604
605 if (mutex->wwm_u.ctx == ctx) {
606 /*
607 * We already own it. Yes, this can happen correctly
608 * for objects whose locking order is determined by
609 * userland.
610 */
611 ret = -EALREADY;
612 goto out_unlock;
613 }
614
615 /*
616 * We do not own it. We can safely assert to LOCKDEBUG that we
617 * want it.
618 */
619 WW_WANTLOCK(mutex);
620
621 if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
622 /*
623 * Owned by a higher-priority party. Tell the caller
624 * to unlock everything and start over.
625 */
626 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
627 "ww mutex class mismatch: %p != %p",
628 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
629 ret = -EDEADLK;
630 goto out_unlock;
631 }
632
633 /*
634 * Owned by a lower-priority party. Ask that party to wake us
635 * when it is done or it realizes it needs to back off.
636 */
637 ww_mutex_lock_wait(mutex, ctx);
638
639 locked: KASSERT((mutex->wwm_state == WW_CTX) ||
640 (mutex->wwm_state == WW_WANTOWN));
641 KASSERT(mutex->wwm_u.ctx == ctx);
642 WW_LOCKED(mutex);
643 ctx->wwx_acquired++;
644 ret = 0;
645 out_unlock:
646 mutex_exit(&mutex->wwm_lock);
647 out: KASSERTMSG((ret == 0 || ret == -EALREADY || ret == -EDEADLK),
648 "ret=%d", ret);
649 return ret;
650 }
651
652 /*
653 * ww_mutex_lock_interruptible(mutex, ctx)
654 *
655 * Lock the mutex and return 0, or fail if impossible or
656 * interrupted.
657 *
658 * - If ctx is null, caller must not hold mutex, and ww_mutex_lock
659 * always succeeds and returns 0.
660 *
661 * - If ctx is nonnull, then:
662 * . Fail with -EALREADY if caller already holds mutex.
663 * . Fail with -EDEADLK if someone else holds mutex but there is
664 * a cycle.
665 * . Fail with -EINTR if interrupted by a signal.
666 *
667 * May sleep.
668 */
669 int
670 ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
671 {
672 int ret;
673
674 /*
675 * We do not WW_WANTLOCK at the beginning because we may
676 * correctly already hold it, if we have a context, in which
677 * case we must return EALREADY to the caller.
678 */
679 ASSERT_SLEEPABLE();
680
681 if (ctx == NULL) {
682 WW_WANTLOCK(mutex);
683 ret = ww_mutex_lock_noctx_sig(mutex);
684 KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
685 goto out;
686 }
687
688 KASSERTMSG((ctx->wwx_owner == curlwp),
689 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
690 KASSERTMSG((ctx->wwx_acquired != ~0U),
691 "ctx %p finished, can't be used any more", ctx);
692 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
693 "ctx %p in class %p, mutex %p in class %p",
694 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
695
696 mutex_enter(&mutex->wwm_lock);
697 ww_acquire_done_check(mutex, ctx);
698 retry: switch (mutex->wwm_state) {
699 case WW_UNLOCKED:
700 WW_WANTLOCK(mutex);
701 mutex->wwm_state = WW_CTX;
702 mutex->wwm_u.ctx = ctx;
703 goto locked;
704 case WW_OWNED:
705 WW_WANTLOCK(mutex);
706 KASSERTMSG((mutex->wwm_u.owner != curlwp),
707 "locking %p against myself: %p", mutex, curlwp);
708 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
709 if (ret) {
710 KASSERTMSG(ret == -EINTR, "ret=%d", ret);
711 goto out_unlock;
712 }
713 goto retry;
714 case WW_CTX:
715 break;
716 case WW_WANTOWN:
717 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
718 if (ret) {
719 KASSERTMSG(ret == -EINTR, "ret=%d", ret);
720 goto out_unlock;
721 }
722 goto retry;
723 default:
724 panic("wait/wound mutex %p in bad state: %d",
725 mutex, (int)mutex->wwm_state);
726 }
727
728 KASSERT(mutex->wwm_state == WW_CTX);
729 KASSERT(mutex->wwm_u.ctx != NULL);
730 KASSERT((mutex->wwm_u.ctx == ctx) ||
731 (mutex->wwm_u.ctx->wwx_owner != curlwp));
732
733 if (mutex->wwm_u.ctx == ctx) {
734 /*
735 * We already own it. Yes, this can happen correctly
736 * for objects whose locking order is determined by
737 * userland.
738 */
739 ret = -EALREADY;
740 goto out_unlock;
741 }
742
743 /*
744 * We do not own it. We can safely assert to LOCKDEBUG that we
745 * want it.
746 */
747 WW_WANTLOCK(mutex);
748
749 if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
750 /*
751 * Owned by a higher-priority party. Tell the caller
752 * to unlock everything and start over.
753 */
754 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
755 "ww mutex class mismatch: %p != %p",
756 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
757 ret = -EDEADLK;
758 goto out_unlock;
759 }
760
761 /*
762 * Owned by a lower-priority party. Ask that party to wake us
763 * when it is done or it realizes it needs to back off.
764 */
765 ret = ww_mutex_lock_wait_sig(mutex, ctx);
766 if (ret) {
767 KASSERTMSG(ret == -EINTR, "ret=%d", ret);
768 goto out_unlock;
769 }
770
771 locked: KASSERT((mutex->wwm_state == WW_CTX) ||
772 (mutex->wwm_state == WW_WANTOWN));
773 KASSERT(mutex->wwm_u.ctx == ctx);
774 WW_LOCKED(mutex);
775 ctx->wwx_acquired++;
776 ret = 0;
777 out_unlock:
778 mutex_exit(&mutex->wwm_lock);
779 out: KASSERTMSG((ret == 0 || ret == -EALREADY || ret == -EDEADLK ||
780 ret == -EINTR), "ret=%d", ret);
781 return ret;
782 }
783
784 /*
785 * ww_mutex_lock_slow(mutex, ctx)
786 *
787 * Slow path: After ww_mutex_lock* has failed with -EDEADLK, and
788 * after the caller has ditched all its locks, wait for the owner
789 * of mutex to relinquish mutex before the caller can start over
790 * acquiring locks again.
791 *
792 * Uninterruptible; never fails.
793 *
794 * May sleep.
795 */
796 void
797 ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
798 {
799
800 /* Caller must not try to lock against self here. */
801 WW_WANTLOCK(mutex);
802 ASSERT_SLEEPABLE();
803
804 if (ctx == NULL) {
805 ww_mutex_lock_noctx(mutex);
806 return;
807 }
808
809 KASSERTMSG((ctx->wwx_owner == curlwp),
810 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
811 KASSERTMSG((ctx->wwx_acquired != ~0U),
812 "ctx %p finished, can't be used any more", ctx);
813 KASSERTMSG((ctx->wwx_acquired == 0),
814 "ctx %p still holds %u locks, not allowed in slow path",
815 ctx, ctx->wwx_acquired);
816 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
817 "ctx %p in class %p, mutex %p in class %p",
818 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
819
820 mutex_enter(&mutex->wwm_lock);
821 ww_acquire_done_check(mutex, ctx);
822 retry: switch (mutex->wwm_state) {
823 case WW_UNLOCKED:
824 mutex->wwm_state = WW_CTX;
825 mutex->wwm_u.ctx = ctx;
826 goto locked;
827 case WW_OWNED:
828 KASSERTMSG((mutex->wwm_u.owner != curlwp),
829 "locking %p against myself: %p", mutex, curlwp);
830 ww_mutex_state_wait(mutex, WW_OWNED);
831 goto retry;
832 case WW_CTX:
833 break;
834 case WW_WANTOWN:
835 ww_mutex_state_wait(mutex, WW_WANTOWN);
836 goto retry;
837 default:
838 panic("wait/wound mutex %p in bad state: %d",
839 mutex, (int)mutex->wwm_state);
840 }
841
842 KASSERT(mutex->wwm_state == WW_CTX);
843 KASSERT(mutex->wwm_u.ctx != NULL);
844 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
845 "locking %p against myself: %p", mutex, curlwp);
846
847 /*
848 * Owned by another party, of any priority. Ask that party to
849 * wake us when it's done.
850 */
851 ww_mutex_lock_wait(mutex, ctx);
852
853 locked: KASSERT((mutex->wwm_state == WW_CTX) ||
854 (mutex->wwm_state == WW_WANTOWN));
855 KASSERT(mutex->wwm_u.ctx == ctx);
856 WW_LOCKED(mutex);
857 ctx->wwx_acquired++;
858 mutex_exit(&mutex->wwm_lock);
859 }
860
861 /*
862 * ww_mutex_lock_slow(mutex, ctx)
863 *
864 * Slow path: After ww_mutex_lock* has failed with -EDEADLK, and
865 * after the caller has ditched all its locks, wait for the owner
866 * of mutex to relinquish mutex before the caller can start over
867 * acquiring locks again, or fail with -EINTR if interrupted by a
868 * signal.
869 *
870 * May sleep.
871 */
872 int
873 ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
874 struct ww_acquire_ctx *ctx)
875 {
876 int ret;
877
878 WW_WANTLOCK(mutex);
879 ASSERT_SLEEPABLE();
880
881 if (ctx == NULL) {
882 ret = ww_mutex_lock_noctx_sig(mutex);
883 KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
884 goto out;
885 }
886
887 KASSERTMSG((ctx->wwx_owner == curlwp),
888 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
889 KASSERTMSG((ctx->wwx_acquired != ~0U),
890 "ctx %p finished, can't be used any more", ctx);
891 KASSERTMSG((ctx->wwx_acquired == 0),
892 "ctx %p still holds %u locks, not allowed in slow path",
893 ctx, ctx->wwx_acquired);
894 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
895 "ctx %p in class %p, mutex %p in class %p",
896 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
897
898 mutex_enter(&mutex->wwm_lock);
899 ww_acquire_done_check(mutex, ctx);
900 retry: switch (mutex->wwm_state) {
901 case WW_UNLOCKED:
902 mutex->wwm_state = WW_CTX;
903 mutex->wwm_u.ctx = ctx;
904 goto locked;
905 case WW_OWNED:
906 KASSERTMSG((mutex->wwm_u.owner != curlwp),
907 "locking %p against myself: %p", mutex, curlwp);
908 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
909 if (ret) {
910 KASSERTMSG(ret == -EINTR, "ret=%d", ret);
911 goto out_unlock;
912 }
913 goto retry;
914 case WW_CTX:
915 break;
916 case WW_WANTOWN:
917 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
918 if (ret) {
919 KASSERTMSG(ret == -EINTR, "ret=%d", ret);
920 goto out_unlock;
921 }
922 goto retry;
923 default:
924 panic("wait/wound mutex %p in bad state: %d",
925 mutex, (int)mutex->wwm_state);
926 }
927
928 KASSERT(mutex->wwm_state == WW_CTX);
929 KASSERT(mutex->wwm_u.ctx != NULL);
930 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
931 "locking %p against myself: %p", mutex, curlwp);
932
933 /*
934 * Owned by another party, of any priority. Ask that party to
935 * wake us when it's done.
936 */
937 ret = ww_mutex_lock_wait_sig(mutex, ctx);
938 if (ret) {
939 KASSERTMSG(ret == -EINTR, "ret=%d", ret);
940 goto out_unlock;
941 }
942
943 locked: KASSERT((mutex->wwm_state == WW_CTX) ||
944 (mutex->wwm_state == WW_WANTOWN));
945 KASSERT(mutex->wwm_u.ctx == ctx);
946 WW_LOCKED(mutex);
947 ctx->wwx_acquired++;
948 ret = 0;
949 out_unlock:
950 mutex_exit(&mutex->wwm_lock);
951 out: KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
952 return ret;
953 }
954
955 /*
956 * ww_mutex_trylock(mutex)
957 *
958 * Tro to acquire mutex and return 1, but if it can't be done
959 * immediately, return 0.
960 */
961 int
962 ww_mutex_trylock(struct ww_mutex *mutex)
963 {
964 int ret;
965
966 mutex_enter(&mutex->wwm_lock);
967 if (mutex->wwm_state == WW_UNLOCKED) {
968 mutex->wwm_state = WW_OWNED;
969 mutex->wwm_u.owner = curlwp;
970 WW_WANTLOCK(mutex);
971 WW_LOCKED(mutex);
972 ret = 1;
973 } else {
974 /*
975 * It is tempting to assert that we do not hold the
976 * mutex here, because trylock when we hold the lock
977 * already generally indicates a bug in the design of
978 * the code. However, it seems that Linux relies on
979 * this deep in ttm buffer reservation logic, so these
980 * assertions are disabled until we find another way to
981 * work around that or fix the bug that leads to it.
982 *
983 * That said: we should not be in the WW_WANTOWN state,
984 * which happens only while we're in the ww mutex logic
985 * waiting to acquire the lock.
986 */
987 #if 0
988 KASSERTMSG(((mutex->wwm_state != WW_OWNED) ||
989 (mutex->wwm_u.owner != curlwp)),
990 "locking %p against myself: %p", mutex, curlwp);
991 KASSERTMSG(((mutex->wwm_state != WW_CTX) ||
992 (mutex->wwm_u.ctx->wwx_owner != curlwp)),
993 "locking %p against myself: %p", mutex, curlwp);
994 #endif
995 KASSERTMSG(((mutex->wwm_state != WW_WANTOWN) ||
996 (mutex->wwm_u.ctx->wwx_owner != curlwp)),
997 "locking %p against myself: %p", mutex, curlwp);
998 ret = 0;
999 }
1000 mutex_exit(&mutex->wwm_lock);
1001
1002 return ret;
1003 }
1004
1005 /*
1006 * ww_mutex_unlock_release(mutex)
1007 *
1008 * Decrement the number of mutexes acquired in the current locking
1009 * context of mutex, which must be held by the caller and in
1010 * WW_CTX or WW_WANTOWN state, and clear the mutex's reference.
1011 * Caller must hold the internal lock of mutex, and is responsible
1012 * for notifying waiters.
1013 *
1014 * Internal subroutine.
1015 */
1016 static void
1017 ww_mutex_unlock_release(struct ww_mutex *mutex)
1018 {
1019
1020 KASSERT(mutex_owned(&mutex->wwm_lock));
1021 KASSERT((mutex->wwm_state == WW_CTX) ||
1022 (mutex->wwm_state == WW_WANTOWN));
1023 KASSERT(mutex->wwm_u.ctx != NULL);
1024 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner == curlwp),
1025 "ww_mutex %p ctx %p held by %p, not by self (%p)",
1026 mutex, mutex->wwm_u.ctx, mutex->wwm_u.ctx->wwx_owner,
1027 curlwp);
1028 KASSERT(mutex->wwm_u.ctx->wwx_acquired != ~0U);
1029 mutex->wwm_u.ctx->wwx_acquired--;
1030 mutex->wwm_u.ctx = NULL;
1031 }
1032
1033 /*
1034 * ww_mutex_unlock(mutex)
1035 *
1036 * Release mutex and wake the next caller waiting, if any.
1037 */
1038 void
1039 ww_mutex_unlock(struct ww_mutex *mutex)
1040 {
1041 struct ww_acquire_ctx *ctx;
1042
1043 mutex_enter(&mutex->wwm_lock);
1044 KASSERT(mutex->wwm_state != WW_UNLOCKED);
1045 switch (mutex->wwm_state) {
1046 case WW_UNLOCKED:
1047 panic("unlocking unlocked wait/wound mutex: %p", mutex);
1048 case WW_OWNED:
1049 /* Let the context lockers fight over it. */
1050 mutex->wwm_u.owner = NULL;
1051 mutex->wwm_state = WW_UNLOCKED;
1052 break;
1053 case WW_CTX:
1054 ww_mutex_unlock_release(mutex);
1055 /*
1056 * If there are any waiters with contexts, grant the
1057 * lock to the highest-priority one. Otherwise, just
1058 * unlock it.
1059 */
1060 if ((ctx = RB_TREE_MIN(&mutex->wwm_waiters)) != NULL) {
1061 mutex->wwm_state = WW_CTX;
1062 mutex->wwm_u.ctx = ctx;
1063 } else {
1064 mutex->wwm_state = WW_UNLOCKED;
1065 }
1066 break;
1067 case WW_WANTOWN:
1068 ww_mutex_unlock_release(mutex);
1069 /* Let the non-context lockers fight over it. */
1070 mutex->wwm_state = WW_UNLOCKED;
1071 break;
1072 }
1073 WW_UNLOCKED(mutex);
1074 cv_broadcast(&mutex->wwm_cv);
1075 mutex_exit(&mutex->wwm_lock);
1076 }
1077
1078 /*
1079 * ww_mutex_locking_ctx(mutex)
1080 *
1081 * Return the current acquire context of mutex. Answer is stale
1082 * as soon as returned unless mutex is held by caller.
1083 */
1084 struct ww_acquire_ctx *
1085 ww_mutex_locking_ctx(struct ww_mutex *mutex)
1086 {
1087 struct ww_acquire_ctx *ctx;
1088
1089 mutex_enter(&mutex->wwm_lock);
1090 switch (mutex->wwm_state) {
1091 case WW_UNLOCKED:
1092 case WW_OWNED:
1093 ctx = NULL;
1094 break;
1095 case WW_CTX:
1096 case WW_WANTOWN:
1097 ctx = mutex->wwm_u.ctx;
1098 break;
1099 default:
1100 panic("wait/wound mutex %p in bad state: %d",
1101 mutex, (int)mutex->wwm_state);
1102 }
1103 mutex_exit(&mutex->wwm_lock);
1104
1105 return ctx;
1106 }
1107