linux_ww_mutex.c revision 1.9 1 /* $NetBSD: linux_ww_mutex.c,v 1.9 2021/12/19 11:21:20 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.9 2021/12/19 11:21:20 riastradh Exp $");
34
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/condvar.h>
38 #include <sys/lockdebug.h>
39 #include <sys/lwp.h>
40 #include <sys/mutex.h>
41 #include <sys/rbtree.h>
42
43 #include <linux/ww_mutex.h>
44 #include <linux/errno.h>
45
46 #define WW_WANTLOCK(WW) \
47 LOCKDEBUG_WANTLOCK((WW)->wwm_debug, (WW), \
48 (uintptr_t)__builtin_return_address(0), 0)
49 #define WW_LOCKED(WW) \
50 LOCKDEBUG_LOCKED((WW)->wwm_debug, (WW), NULL, \
51 (uintptr_t)__builtin_return_address(0), 0)
52 #define WW_UNLOCKED(WW) \
53 LOCKDEBUG_UNLOCKED((WW)->wwm_debug, (WW), \
54 (uintptr_t)__builtin_return_address(0), 0)
55
56 static int
57 ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb)
58 {
59 const struct ww_acquire_ctx *const ctx_a = va;
60 const struct ww_acquire_ctx *const ctx_b = vb;
61
62 if (ctx_a->wwx_ticket < ctx_b->wwx_ticket)
63 return -1;
64 if (ctx_a->wwx_ticket > ctx_b->wwx_ticket)
65 return -1;
66 return 0;
67 }
68
69 static int
70 ww_acquire_ctx_compare_key(void *cookie __unused, const void *vn,
71 const void *vk)
72 {
73 const struct ww_acquire_ctx *const ctx = vn;
74 const uint64_t *const ticketp = vk, ticket = *ticketp;
75
76 if (ctx->wwx_ticket < ticket)
77 return -1;
78 if (ctx->wwx_ticket > ticket)
79 return -1;
80 return 0;
81 }
82
83 static const rb_tree_ops_t ww_acquire_ctx_rb_ops = {
84 .rbto_compare_nodes = &ww_acquire_ctx_compare,
85 .rbto_compare_key = &ww_acquire_ctx_compare_key,
86 .rbto_node_offset = offsetof(struct ww_acquire_ctx, wwx_rb_node),
87 .rbto_context = NULL,
88 };
89
90 void
91 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *class)
92 {
93
94 ctx->wwx_class = class;
95 ctx->wwx_owner = curlwp;
96 ctx->wwx_ticket = atomic64_inc_return(&class->wwc_ticket);
97 ctx->wwx_acquired = 0;
98 ctx->wwx_acquire_done = false;
99 }
100
101 void
102 ww_acquire_done(struct ww_acquire_ctx *ctx)
103 {
104
105 KASSERTMSG((ctx->wwx_owner == curlwp),
106 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
107
108 ctx->wwx_acquire_done = true;
109 }
110
111 void
112 ww_acquire_fini(struct ww_acquire_ctx *ctx)
113 {
114
115 KASSERTMSG((ctx->wwx_owner == curlwp),
116 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
117 KASSERTMSG((ctx->wwx_acquired == 0), "ctx %p still holds %u locks",
118 ctx, ctx->wwx_acquired);
119
120 ctx->wwx_acquired = ~0U; /* Fail if called again. */
121 ctx->wwx_owner = NULL;
122 }
123
124 #ifdef LOCKDEBUG
125 static void
126 ww_dump(const volatile void *cookie, lockop_printer_t pr)
127 {
128 const volatile struct ww_mutex *mutex = cookie;
129
130 pr("%-13s: ", "state");
131 switch (mutex->wwm_state) {
132 case WW_UNLOCKED:
133 pr("unlocked\n");
134 break;
135 case WW_OWNED:
136 pr("owned by lwp\n");
137 pr("%-13s: %p\n", "owner", mutex->wwm_u.owner);
138 pr("%-13s: %s\n", "waiters",
139 cv_has_waiters((void *)(intptr_t)&mutex->wwm_cv)
140 ? "yes" : "no");
141 break;
142 case WW_CTX:
143 pr("owned via ctx\n");
144 pr("%-13s: %p\n", "context", mutex->wwm_u.ctx);
145 pr("%-13s: %p\n", "lwp",
146 mutex->wwm_u.ctx->wwx_owner);
147 pr("%-13s: %s\n", "waiters",
148 cv_has_waiters((void *)(intptr_t)&mutex->wwm_cv)
149 ? "yes" : "no");
150 break;
151 case WW_WANTOWN:
152 pr("owned via ctx\n");
153 pr("%-13s: %p\n", "context", mutex->wwm_u.ctx);
154 pr("%-13s: %p\n", "lwp",
155 mutex->wwm_u.ctx->wwx_owner);
156 pr("%-13s: %s\n", "waiters", "yes (noctx)");
157 break;
158 default:
159 pr("unknown\n");
160 break;
161 }
162 }
163
164 static lockops_t ww_lockops = {
165 .lo_name = "Wait/wound mutex",
166 .lo_type = LOCKOPS_SLEEP,
167 .lo_dump = ww_dump,
168 };
169 #endif
170
171 void
172 ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
173 {
174
175 /*
176 * XXX Apparently Linux takes these with spin locks held. That
177 * strikes me as a bad idea, but so it is...
178 */
179 mutex_init(&mutex->wwm_lock, MUTEX_DEFAULT, IPL_VM);
180 mutex->wwm_state = WW_UNLOCKED;
181 mutex->wwm_class = class;
182 rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
183 cv_init(&mutex->wwm_cv, "linuxwwm");
184 #ifdef LOCKDEBUG
185 mutex->wwm_debug = LOCKDEBUG_ALLOC(mutex, &ww_lockops,
186 (uintptr_t)__builtin_return_address(0));
187 #endif
188 }
189
190 void
191 ww_mutex_destroy(struct ww_mutex *mutex)
192 {
193
194 KASSERT(mutex->wwm_state == WW_UNLOCKED);
195
196 #ifdef LOCKDEBUG
197 LOCKDEBUG_FREE(mutex->wwm_debug, mutex);
198 #endif
199 cv_destroy(&mutex->wwm_cv);
200 #if 0
201 rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
202 #endif
203 KASSERT(mutex->wwm_state == WW_UNLOCKED);
204 mutex_destroy(&mutex->wwm_lock);
205 }
206
207 /*
208 * XXX WARNING: This returns true if it is locked by ANYONE. Does not
209 * mean `Do I hold this lock?' (answering which really requires an
210 * acquire context).
211 */
212 bool
213 ww_mutex_is_locked(struct ww_mutex *mutex)
214 {
215 int locked;
216
217 mutex_enter(&mutex->wwm_lock);
218 switch (mutex->wwm_state) {
219 case WW_UNLOCKED:
220 locked = false;
221 break;
222 case WW_OWNED:
223 case WW_CTX:
224 case WW_WANTOWN:
225 locked = true;
226 break;
227 default:
228 panic("wait/wound mutex %p in bad state: %d", mutex,
229 (int)mutex->wwm_state);
230 }
231 mutex_exit(&mutex->wwm_lock);
232
233 return locked;
234 }
235
236 static void
237 ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
238 {
239
240 KASSERT(mutex->wwm_state == state);
241 do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
242 while (mutex->wwm_state == state);
243 }
244
245 static int
246 ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
247 {
248 int ret;
249
250 KASSERT(mutex->wwm_state == state);
251 do {
252 /* XXX errno NetBSD->Linux */
253 ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
254 if (ret == -ERESTART)
255 ret = -ERESTARTSYS;
256 if (ret)
257 break;
258 } while (mutex->wwm_state == state);
259
260 return ret;
261 }
262
263 static void
264 ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
265 {
266 struct ww_acquire_ctx *collision __diagused;
267
268 KASSERT(mutex_owned(&mutex->wwm_lock));
269
270 KASSERT((mutex->wwm_state == WW_CTX) ||
271 (mutex->wwm_state == WW_WANTOWN));
272 KASSERT(mutex->wwm_u.ctx != ctx);
273 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
274 "ww mutex class mismatch: %p != %p",
275 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
276 KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
277 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
278 ctx->wwx_ticket, ctx,
279 mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
280
281 collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
282 KASSERTMSG((collision == ctx),
283 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
284 ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
285
286 do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
287 while (!(((mutex->wwm_state == WW_CTX) ||
288 (mutex->wwm_state == WW_WANTOWN)) &&
289 (mutex->wwm_u.ctx == ctx)));
290
291 rb_tree_remove_node(&mutex->wwm_waiters, ctx);
292 }
293
294 static int
295 ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
296 {
297 struct ww_acquire_ctx *collision __diagused;
298 int ret;
299
300 KASSERT(mutex_owned(&mutex->wwm_lock));
301
302 KASSERT((mutex->wwm_state == WW_CTX) ||
303 (mutex->wwm_state == WW_WANTOWN));
304 KASSERT(mutex->wwm_u.ctx != ctx);
305 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
306 "ww mutex class mismatch: %p != %p",
307 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
308 KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
309 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
310 ctx->wwx_ticket, ctx,
311 mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
312
313 collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
314 KASSERTMSG((collision == ctx),
315 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
316 ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
317
318 do {
319 /* XXX errno NetBSD->Linux */
320 ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
321 if (ret == -ERESTART)
322 ret = -ERESTARTSYS;
323 if (ret)
324 goto out;
325 } while (!(((mutex->wwm_state == WW_CTX) ||
326 (mutex->wwm_state == WW_WANTOWN)) &&
327 (mutex->wwm_u.ctx == ctx)));
328
329 out: rb_tree_remove_node(&mutex->wwm_waiters, ctx);
330 return ret;
331 }
332
333 static void
334 ww_mutex_lock_noctx(struct ww_mutex *mutex)
335 {
336
337 mutex_enter(&mutex->wwm_lock);
338 retry: switch (mutex->wwm_state) {
339 case WW_UNLOCKED:
340 mutex->wwm_state = WW_OWNED;
341 mutex->wwm_u.owner = curlwp;
342 break;
343 case WW_OWNED:
344 KASSERTMSG((mutex->wwm_u.owner != curlwp),
345 "locking %p against myself: %p", mutex, curlwp);
346 ww_mutex_state_wait(mutex, WW_OWNED);
347 goto retry;
348 case WW_CTX:
349 KASSERT(mutex->wwm_u.ctx != NULL);
350 mutex->wwm_state = WW_WANTOWN;
351 /* FALLTHROUGH */
352 case WW_WANTOWN:
353 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
354 "locking %p against myself: %p", mutex, curlwp);
355 ww_mutex_state_wait(mutex, WW_WANTOWN);
356 goto retry;
357 default:
358 panic("wait/wound mutex %p in bad state: %d",
359 mutex, (int)mutex->wwm_state);
360 }
361 KASSERT(mutex->wwm_state == WW_OWNED);
362 KASSERT(mutex->wwm_u.owner == curlwp);
363 WW_LOCKED(mutex);
364 mutex_exit(&mutex->wwm_lock);
365 }
366
367 static int
368 ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
369 {
370 int ret;
371
372 mutex_enter(&mutex->wwm_lock);
373 retry: switch (mutex->wwm_state) {
374 case WW_UNLOCKED:
375 mutex->wwm_state = WW_OWNED;
376 mutex->wwm_u.owner = curlwp;
377 break;
378 case WW_OWNED:
379 KASSERTMSG((mutex->wwm_u.owner != curlwp),
380 "locking %p against myself: %p", mutex, curlwp);
381 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
382 if (ret)
383 goto out;
384 goto retry;
385 case WW_CTX:
386 KASSERT(mutex->wwm_u.ctx != NULL);
387 mutex->wwm_state = WW_WANTOWN;
388 /* FALLTHROUGH */
389 case WW_WANTOWN:
390 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
391 "locking %p against myself: %p", mutex, curlwp);
392 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
393 if (ret)
394 goto out;
395 goto retry;
396 default:
397 panic("wait/wound mutex %p in bad state: %d",
398 mutex, (int)mutex->wwm_state);
399 }
400 KASSERT(mutex->wwm_state == WW_OWNED);
401 KASSERT(mutex->wwm_u.owner == curlwp);
402 WW_LOCKED(mutex);
403 ret = 0;
404 out: mutex_exit(&mutex->wwm_lock);
405 return ret;
406 }
407
408 int
409 ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
410 {
411
412 /*
413 * We do not WW_WANTLOCK at the beginning because we may
414 * correctly already hold it, if we have a context, in which
415 * case we must return EALREADY to the caller.
416 */
417 ASSERT_SLEEPABLE();
418
419 if (ctx == NULL) {
420 WW_WANTLOCK(mutex);
421 ww_mutex_lock_noctx(mutex);
422 return 0;
423 }
424
425 KASSERTMSG((ctx->wwx_owner == curlwp),
426 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
427 KASSERTMSG(!ctx->wwx_acquire_done,
428 "ctx %p done acquiring locks, can't acquire more", ctx);
429 KASSERTMSG((ctx->wwx_acquired != ~0U),
430 "ctx %p finished, can't be used any more", ctx);
431 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
432 "ctx %p in class %p, mutex %p in class %p",
433 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
434
435 mutex_enter(&mutex->wwm_lock);
436 retry: switch (mutex->wwm_state) {
437 case WW_UNLOCKED:
438 WW_WANTLOCK(mutex);
439 mutex->wwm_state = WW_CTX;
440 mutex->wwm_u.ctx = ctx;
441 goto locked;
442 case WW_OWNED:
443 WW_WANTLOCK(mutex);
444 KASSERTMSG((mutex->wwm_u.owner != curlwp),
445 "locking %p against myself: %p", mutex, curlwp);
446 ww_mutex_state_wait(mutex, WW_OWNED);
447 goto retry;
448 case WW_CTX:
449 break;
450 case WW_WANTOWN:
451 ww_mutex_state_wait(mutex, WW_WANTOWN);
452 goto retry;
453 default:
454 panic("wait/wound mutex %p in bad state: %d",
455 mutex, (int)mutex->wwm_state);
456 }
457
458 KASSERT(mutex->wwm_state == WW_CTX);
459 KASSERT(mutex->wwm_u.ctx != NULL);
460 KASSERT((mutex->wwm_u.ctx == ctx) ||
461 (mutex->wwm_u.ctx->wwx_owner != curlwp));
462
463 if (mutex->wwm_u.ctx == ctx) {
464 /*
465 * We already own it. Yes, this can happen correctly
466 * for objects whose locking order is determined by
467 * userland.
468 */
469 mutex_exit(&mutex->wwm_lock);
470 return -EALREADY;
471 }
472
473 /*
474 * We do not own it. We can safely assert to LOCKDEBUG that we
475 * want it.
476 */
477 WW_WANTLOCK(mutex);
478
479 if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
480 /*
481 * Owned by a higher-priority party. Tell the caller
482 * to unlock everything and start over.
483 */
484 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
485 "ww mutex class mismatch: %p != %p",
486 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
487 mutex_exit(&mutex->wwm_lock);
488 return -EDEADLK;
489 }
490
491 /*
492 * Owned by a lower-priority party. Ask that party to wake us
493 * when it is done or it realizes it needs to back off.
494 */
495 ww_mutex_lock_wait(mutex, ctx);
496
497 locked: KASSERT((mutex->wwm_state == WW_CTX) ||
498 (mutex->wwm_state == WW_WANTOWN));
499 KASSERT(mutex->wwm_u.ctx == ctx);
500 WW_LOCKED(mutex);
501 ctx->wwx_acquired++;
502 mutex_exit(&mutex->wwm_lock);
503 return 0;
504 }
505
506 int
507 ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
508 {
509 int ret;
510
511 /*
512 * We do not WW_WANTLOCK at the beginning because we may
513 * correctly already hold it, if we have a context, in which
514 * case we must return EALREADY to the caller.
515 */
516 ASSERT_SLEEPABLE();
517
518 if (ctx == NULL) {
519 WW_WANTLOCK(mutex);
520 return ww_mutex_lock_noctx_sig(mutex);
521 }
522
523 KASSERTMSG((ctx->wwx_owner == curlwp),
524 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
525 KASSERTMSG(!ctx->wwx_acquire_done,
526 "ctx %p done acquiring locks, can't acquire more", ctx);
527 KASSERTMSG((ctx->wwx_acquired != ~0U),
528 "ctx %p finished, can't be used any more", ctx);
529 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
530 "ctx %p in class %p, mutex %p in class %p",
531 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
532
533 mutex_enter(&mutex->wwm_lock);
534 retry: switch (mutex->wwm_state) {
535 case WW_UNLOCKED:
536 WW_WANTLOCK(mutex);
537 mutex->wwm_state = WW_CTX;
538 mutex->wwm_u.ctx = ctx;
539 goto locked;
540 case WW_OWNED:
541 WW_WANTLOCK(mutex);
542 KASSERTMSG((mutex->wwm_u.owner != curlwp),
543 "locking %p against myself: %p", mutex, curlwp);
544 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
545 if (ret)
546 goto out;
547 goto retry;
548 case WW_CTX:
549 break;
550 case WW_WANTOWN:
551 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
552 if (ret)
553 goto out;
554 goto retry;
555 default:
556 panic("wait/wound mutex %p in bad state: %d",
557 mutex, (int)mutex->wwm_state);
558 }
559
560 KASSERT(mutex->wwm_state == WW_CTX);
561 KASSERT(mutex->wwm_u.ctx != NULL);
562 KASSERT((mutex->wwm_u.ctx == ctx) ||
563 (mutex->wwm_u.ctx->wwx_owner != curlwp));
564
565 if (mutex->wwm_u.ctx == ctx) {
566 /*
567 * We already own it. Yes, this can happen correctly
568 * for objects whose locking order is determined by
569 * userland.
570 */
571 mutex_exit(&mutex->wwm_lock);
572 return -EALREADY;
573 }
574
575 /*
576 * We do not own it. We can safely assert to LOCKDEBUG that we
577 * want it.
578 */
579 WW_WANTLOCK(mutex);
580
581 if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
582 /*
583 * Owned by a higher-priority party. Tell the caller
584 * to unlock everything and start over.
585 */
586 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
587 "ww mutex class mismatch: %p != %p",
588 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
589 mutex_exit(&mutex->wwm_lock);
590 return -EDEADLK;
591 }
592
593 /*
594 * Owned by a lower-priority party. Ask that party to wake us
595 * when it is done or it realizes it needs to back off.
596 */
597 ret = ww_mutex_lock_wait_sig(mutex, ctx);
598 if (ret)
599 goto out;
600
601 locked: KASSERT((mutex->wwm_state == WW_CTX) ||
602 (mutex->wwm_state == WW_WANTOWN));
603 KASSERT(mutex->wwm_u.ctx == ctx);
604 WW_LOCKED(mutex);
605 ctx->wwx_acquired++;
606 ret = 0;
607 out: mutex_exit(&mutex->wwm_lock);
608 return ret;
609 }
610
611 void
612 ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
613 {
614
615 /* Caller must not try to lock against self here. */
616 WW_WANTLOCK(mutex);
617 ASSERT_SLEEPABLE();
618
619 if (ctx == NULL) {
620 ww_mutex_lock_noctx(mutex);
621 return;
622 }
623
624 KASSERTMSG((ctx->wwx_owner == curlwp),
625 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
626 KASSERTMSG(!ctx->wwx_acquire_done,
627 "ctx %p done acquiring locks, can't acquire more", ctx);
628 KASSERTMSG((ctx->wwx_acquired != ~0U),
629 "ctx %p finished, can't be used any more", ctx);
630 KASSERTMSG((ctx->wwx_acquired == 0),
631 "ctx %p still holds %u locks, not allowed in slow path",
632 ctx, ctx->wwx_acquired);
633 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
634 "ctx %p in class %p, mutex %p in class %p",
635 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
636
637 mutex_enter(&mutex->wwm_lock);
638 retry: switch (mutex->wwm_state) {
639 case WW_UNLOCKED:
640 mutex->wwm_state = WW_CTX;
641 mutex->wwm_u.ctx = ctx;
642 goto locked;
643 case WW_OWNED:
644 KASSERTMSG((mutex->wwm_u.owner != curlwp),
645 "locking %p against myself: %p", mutex, curlwp);
646 ww_mutex_state_wait(mutex, WW_OWNED);
647 goto retry;
648 case WW_CTX:
649 break;
650 case WW_WANTOWN:
651 ww_mutex_state_wait(mutex, WW_WANTOWN);
652 goto retry;
653 default:
654 panic("wait/wound mutex %p in bad state: %d",
655 mutex, (int)mutex->wwm_state);
656 }
657
658 KASSERT(mutex->wwm_state == WW_CTX);
659 KASSERT(mutex->wwm_u.ctx != NULL);
660 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
661 "locking %p against myself: %p", mutex, curlwp);
662
663 /*
664 * Owned by another party, of any priority. Ask that party to
665 * wake us when it's done.
666 */
667 ww_mutex_lock_wait(mutex, ctx);
668
669 locked: KASSERT((mutex->wwm_state == WW_CTX) ||
670 (mutex->wwm_state == WW_WANTOWN));
671 KASSERT(mutex->wwm_u.ctx == ctx);
672 WW_LOCKED(mutex);
673 ctx->wwx_acquired++;
674 mutex_exit(&mutex->wwm_lock);
675 }
676
677 int
678 ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
679 struct ww_acquire_ctx *ctx)
680 {
681 int ret;
682
683 WW_WANTLOCK(mutex);
684 ASSERT_SLEEPABLE();
685
686 if (ctx == NULL)
687 return ww_mutex_lock_noctx_sig(mutex);
688
689 KASSERTMSG((ctx->wwx_owner == curlwp),
690 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
691 KASSERTMSG(!ctx->wwx_acquire_done,
692 "ctx %p done acquiring locks, can't acquire more", ctx);
693 KASSERTMSG((ctx->wwx_acquired != ~0U),
694 "ctx %p finished, can't be used any more", ctx);
695 KASSERTMSG((ctx->wwx_acquired == 0),
696 "ctx %p still holds %u locks, not allowed in slow path",
697 ctx, ctx->wwx_acquired);
698 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
699 "ctx %p in class %p, mutex %p in class %p",
700 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
701
702 mutex_enter(&mutex->wwm_lock);
703 retry: switch (mutex->wwm_state) {
704 case WW_UNLOCKED:
705 mutex->wwm_state = WW_CTX;
706 mutex->wwm_u.ctx = ctx;
707 goto locked;
708 case WW_OWNED:
709 KASSERTMSG((mutex->wwm_u.owner != curlwp),
710 "locking %p against myself: %p", mutex, curlwp);
711 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
712 if (ret)
713 goto out;
714 goto retry;
715 case WW_CTX:
716 break;
717 case WW_WANTOWN:
718 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
719 if (ret)
720 goto out;
721 goto retry;
722 default:
723 panic("wait/wound mutex %p in bad state: %d",
724 mutex, (int)mutex->wwm_state);
725 }
726
727 KASSERT(mutex->wwm_state == WW_CTX);
728 KASSERT(mutex->wwm_u.ctx != NULL);
729 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
730 "locking %p against myself: %p", mutex, curlwp);
731
732 /*
733 * Owned by another party, of any priority. Ask that party to
734 * wake us when it's done.
735 */
736 ret = ww_mutex_lock_wait_sig(mutex, ctx);
737 if (ret)
738 goto out;
739
740 locked: KASSERT((mutex->wwm_state == WW_CTX) ||
741 (mutex->wwm_state == WW_WANTOWN));
742 KASSERT(mutex->wwm_u.ctx == ctx);
743 WW_LOCKED(mutex);
744 ctx->wwx_acquired++;
745 ret = 0;
746 out: mutex_exit(&mutex->wwm_lock);
747 return ret;
748 }
749
750 int
751 ww_mutex_trylock(struct ww_mutex *mutex)
752 {
753 int ret;
754
755 mutex_enter(&mutex->wwm_lock);
756 if (mutex->wwm_state == WW_UNLOCKED) {
757 mutex->wwm_state = WW_OWNED;
758 mutex->wwm_u.owner = curlwp;
759 WW_WANTLOCK(mutex);
760 WW_LOCKED(mutex);
761 ret = 1;
762 } else {
763 /*
764 * It is tempting to assert that we do not hold the
765 * mutex here, because trylock when we hold the lock
766 * already generally indicates a bug in the design of
767 * the code. However, it seems that Linux relies on
768 * this deep in ttm buffer reservation logic, so these
769 * assertions are disabled until we find another way to
770 * work around that or fix the bug that leads to it.
771 *
772 * That said: we should not be in the WW_WANTOWN state,
773 * which happens only while we're in the ww mutex logic
774 * waiting to acquire the lock.
775 */
776 #if 0
777 KASSERTMSG(((mutex->wwm_state != WW_OWNED) ||
778 (mutex->wwm_u.owner != curlwp)),
779 "locking %p against myself: %p", mutex, curlwp);
780 KASSERTMSG(((mutex->wwm_state != WW_CTX) ||
781 (mutex->wwm_u.ctx->wwx_owner != curlwp)),
782 "locking %p against myself: %p", mutex, curlwp);
783 #endif
784 KASSERTMSG(((mutex->wwm_state != WW_WANTOWN) ||
785 (mutex->wwm_u.ctx->wwx_owner != curlwp)),
786 "locking %p against myself: %p", mutex, curlwp);
787 ret = 0;
788 }
789 mutex_exit(&mutex->wwm_lock);
790
791 return ret;
792 }
793
794 static void
795 ww_mutex_unlock_release(struct ww_mutex *mutex)
796 {
797
798 KASSERT(mutex_owned(&mutex->wwm_lock));
799 KASSERT((mutex->wwm_state == WW_CTX) ||
800 (mutex->wwm_state == WW_WANTOWN));
801 KASSERT(mutex->wwm_u.ctx != NULL);
802 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner == curlwp),
803 "ww_mutex %p ctx %p held by %p, not by self (%p)",
804 mutex, mutex->wwm_u.ctx, mutex->wwm_u.ctx->wwx_owner,
805 curlwp);
806 KASSERT(mutex->wwm_u.ctx->wwx_acquired != ~0U);
807 mutex->wwm_u.ctx->wwx_acquired--;
808 mutex->wwm_u.ctx = NULL;
809 }
810
811 void
812 ww_mutex_unlock(struct ww_mutex *mutex)
813 {
814 struct ww_acquire_ctx *ctx;
815
816 mutex_enter(&mutex->wwm_lock);
817 KASSERT(mutex->wwm_state != WW_UNLOCKED);
818 switch (mutex->wwm_state) {
819 case WW_UNLOCKED:
820 panic("unlocking unlocked wait/wound mutex: %p", mutex);
821 case WW_OWNED:
822 /* Let the context lockers fight over it. */
823 mutex->wwm_u.owner = NULL;
824 mutex->wwm_state = WW_UNLOCKED;
825 break;
826 case WW_CTX:
827 ww_mutex_unlock_release(mutex);
828 /*
829 * If there are any waiters with contexts, grant the
830 * lock to the highest-priority one. Otherwise, just
831 * unlock it.
832 */
833 if ((ctx = RB_TREE_MIN(&mutex->wwm_waiters)) != NULL) {
834 mutex->wwm_state = WW_CTX;
835 mutex->wwm_u.ctx = ctx;
836 } else {
837 mutex->wwm_state = WW_UNLOCKED;
838 }
839 break;
840 case WW_WANTOWN:
841 ww_mutex_unlock_release(mutex);
842 /* Let the non-context lockers fight over it. */
843 mutex->wwm_state = WW_UNLOCKED;
844 break;
845 }
846 WW_UNLOCKED(mutex);
847 cv_broadcast(&mutex->wwm_cv);
848 mutex_exit(&mutex->wwm_lock);
849 }
850
851 struct ww_acquire_ctx *
852 ww_mutex_locking_ctx(struct ww_mutex *mutex)
853 {
854 struct ww_acquire_ctx *ctx;
855
856 mutex_enter(&mutex->wwm_lock);
857 switch (mutex->wwm_state) {
858 case WW_UNLOCKED:
859 case WW_OWNED:
860 ctx = NULL;
861 break;
862 case WW_CTX:
863 case WW_WANTOWN:
864 ctx = mutex->wwm_u.ctx;
865 break;
866 default:
867 panic("wait/wound mutex %p in bad state: %d",
868 mutex, (int)mutex->wwm_state);
869 }
870 mutex_exit(&mutex->wwm_lock);
871
872 return ctx;
873 }
874