linux_ww_mutex.c revision 1.13 1 1.13 riastrad /* $NetBSD: linux_ww_mutex.c,v 1.13 2021/12/26 16:14:34 riastradh Exp $ */
2 1.1 riastrad
3 1.1 riastrad /*-
4 1.1 riastrad * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 1.1 riastrad * All rights reserved.
6 1.1 riastrad *
7 1.1 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.1 riastrad * by Taylor R. Campbell.
9 1.1 riastrad *
10 1.1 riastrad * Redistribution and use in source and binary forms, with or without
11 1.1 riastrad * modification, are permitted provided that the following conditions
12 1.1 riastrad * are met:
13 1.1 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.1 riastrad * notice, this list of conditions and the following disclaimer.
15 1.1 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.1 riastrad * documentation and/or other materials provided with the distribution.
18 1.1 riastrad *
19 1.1 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.1 riastrad */
31 1.1 riastrad
32 1.1 riastrad #include <sys/cdefs.h>
33 1.13 riastrad __KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.13 2021/12/26 16:14:34 riastradh Exp $");
34 1.1 riastrad
35 1.1 riastrad #include <sys/types.h>
36 1.1 riastrad #include <sys/atomic.h>
37 1.1 riastrad #include <sys/condvar.h>
38 1.2 riastrad #include <sys/lockdebug.h>
39 1.1 riastrad #include <sys/lwp.h>
40 1.1 riastrad #include <sys/mutex.h>
41 1.1 riastrad #include <sys/rbtree.h>
42 1.1 riastrad
43 1.1 riastrad #include <linux/ww_mutex.h>
44 1.6 mrg #include <linux/errno.h>
45 1.1 riastrad
46 1.2 riastrad #define WW_WANTLOCK(WW) \
47 1.2 riastrad LOCKDEBUG_WANTLOCK((WW)->wwm_debug, (WW), \
48 1.2 riastrad (uintptr_t)__builtin_return_address(0), 0)
49 1.2 riastrad #define WW_LOCKED(WW) \
50 1.2 riastrad LOCKDEBUG_LOCKED((WW)->wwm_debug, (WW), NULL, \
51 1.2 riastrad (uintptr_t)__builtin_return_address(0), 0)
52 1.2 riastrad #define WW_UNLOCKED(WW) \
53 1.2 riastrad LOCKDEBUG_UNLOCKED((WW)->wwm_debug, (WW), \
54 1.2 riastrad (uintptr_t)__builtin_return_address(0), 0)
55 1.2 riastrad
56 1.1 riastrad static int
57 1.1 riastrad ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb)
58 1.1 riastrad {
59 1.1 riastrad const struct ww_acquire_ctx *const ctx_a = va;
60 1.1 riastrad const struct ww_acquire_ctx *const ctx_b = vb;
61 1.1 riastrad
62 1.1 riastrad if (ctx_a->wwx_ticket < ctx_b->wwx_ticket)
63 1.1 riastrad return -1;
64 1.1 riastrad if (ctx_a->wwx_ticket > ctx_b->wwx_ticket)
65 1.1 riastrad return -1;
66 1.1 riastrad return 0;
67 1.1 riastrad }
68 1.1 riastrad
69 1.1 riastrad static int
70 1.1 riastrad ww_acquire_ctx_compare_key(void *cookie __unused, const void *vn,
71 1.1 riastrad const void *vk)
72 1.1 riastrad {
73 1.1 riastrad const struct ww_acquire_ctx *const ctx = vn;
74 1.1 riastrad const uint64_t *const ticketp = vk, ticket = *ticketp;
75 1.1 riastrad
76 1.1 riastrad if (ctx->wwx_ticket < ticket)
77 1.1 riastrad return -1;
78 1.1 riastrad if (ctx->wwx_ticket > ticket)
79 1.1 riastrad return -1;
80 1.1 riastrad return 0;
81 1.1 riastrad }
82 1.1 riastrad
83 1.1 riastrad static const rb_tree_ops_t ww_acquire_ctx_rb_ops = {
84 1.1 riastrad .rbto_compare_nodes = &ww_acquire_ctx_compare,
85 1.1 riastrad .rbto_compare_key = &ww_acquire_ctx_compare_key,
86 1.1 riastrad .rbto_node_offset = offsetof(struct ww_acquire_ctx, wwx_rb_node),
87 1.1 riastrad .rbto_context = NULL,
88 1.1 riastrad };
89 1.1 riastrad
90 1.1 riastrad void
91 1.1 riastrad ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *class)
92 1.1 riastrad {
93 1.1 riastrad
94 1.1 riastrad ctx->wwx_class = class;
95 1.1 riastrad ctx->wwx_owner = curlwp;
96 1.5 riastrad ctx->wwx_ticket = atomic64_inc_return(&class->wwc_ticket);
97 1.1 riastrad ctx->wwx_acquired = 0;
98 1.1 riastrad ctx->wwx_acquire_done = false;
99 1.1 riastrad }
100 1.1 riastrad
101 1.1 riastrad void
102 1.1 riastrad ww_acquire_done(struct ww_acquire_ctx *ctx)
103 1.1 riastrad {
104 1.1 riastrad
105 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
106 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
107 1.1 riastrad
108 1.1 riastrad ctx->wwx_acquire_done = true;
109 1.1 riastrad }
110 1.1 riastrad
111 1.13 riastrad static void
112 1.13 riastrad ww_acquire_done_check(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
113 1.13 riastrad {
114 1.13 riastrad
115 1.13 riastrad /*
116 1.13 riastrad * If caller has invoked ww_acquire_done, we must already hold
117 1.13 riastrad * this mutex.
118 1.13 riastrad */
119 1.13 riastrad KASSERT(mutex_owned(&mutex->wwm_lock));
120 1.13 riastrad KASSERTMSG((!ctx->wwx_acquire_done ||
121 1.13 riastrad (mutex->wwm_state == WW_CTX && mutex->wwm_u.ctx == ctx)),
122 1.13 riastrad "ctx %p done acquiring locks, refusing to acquire %p",
123 1.13 riastrad ctx, mutex);
124 1.13 riastrad }
125 1.13 riastrad
126 1.1 riastrad void
127 1.1 riastrad ww_acquire_fini(struct ww_acquire_ctx *ctx)
128 1.1 riastrad {
129 1.1 riastrad
130 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
131 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
132 1.1 riastrad KASSERTMSG((ctx->wwx_acquired == 0), "ctx %p still holds %u locks",
133 1.1 riastrad ctx, ctx->wwx_acquired);
134 1.1 riastrad
135 1.1 riastrad ctx->wwx_acquired = ~0U; /* Fail if called again. */
136 1.1 riastrad ctx->wwx_owner = NULL;
137 1.1 riastrad }
138 1.1 riastrad
139 1.2 riastrad #ifdef LOCKDEBUG
140 1.2 riastrad static void
141 1.7 ozaki ww_dump(const volatile void *cookie, lockop_printer_t pr)
142 1.2 riastrad {
143 1.4 christos const volatile struct ww_mutex *mutex = cookie;
144 1.2 riastrad
145 1.7 ozaki pr("%-13s: ", "state");
146 1.2 riastrad switch (mutex->wwm_state) {
147 1.2 riastrad case WW_UNLOCKED:
148 1.7 ozaki pr("unlocked\n");
149 1.2 riastrad break;
150 1.2 riastrad case WW_OWNED:
151 1.7 ozaki pr("owned by lwp\n");
152 1.7 ozaki pr("%-13s: %p\n", "owner", mutex->wwm_u.owner);
153 1.7 ozaki pr("%-13s: %s\n", "waiters",
154 1.4 christos cv_has_waiters((void *)(intptr_t)&mutex->wwm_cv)
155 1.2 riastrad ? "yes" : "no");
156 1.2 riastrad break;
157 1.2 riastrad case WW_CTX:
158 1.7 ozaki pr("owned via ctx\n");
159 1.7 ozaki pr("%-13s: %p\n", "context", mutex->wwm_u.ctx);
160 1.7 ozaki pr("%-13s: %p\n", "lwp",
161 1.2 riastrad mutex->wwm_u.ctx->wwx_owner);
162 1.7 ozaki pr("%-13s: %s\n", "waiters",
163 1.4 christos cv_has_waiters((void *)(intptr_t)&mutex->wwm_cv)
164 1.2 riastrad ? "yes" : "no");
165 1.2 riastrad break;
166 1.2 riastrad case WW_WANTOWN:
167 1.7 ozaki pr("owned via ctx\n");
168 1.7 ozaki pr("%-13s: %p\n", "context", mutex->wwm_u.ctx);
169 1.7 ozaki pr("%-13s: %p\n", "lwp",
170 1.2 riastrad mutex->wwm_u.ctx->wwx_owner);
171 1.7 ozaki pr("%-13s: %s\n", "waiters", "yes (noctx)");
172 1.2 riastrad break;
173 1.2 riastrad default:
174 1.7 ozaki pr("unknown\n");
175 1.2 riastrad break;
176 1.2 riastrad }
177 1.2 riastrad }
178 1.2 riastrad
179 1.2 riastrad static lockops_t ww_lockops = {
180 1.2 riastrad .lo_name = "Wait/wound mutex",
181 1.2 riastrad .lo_type = LOCKOPS_SLEEP,
182 1.2 riastrad .lo_dump = ww_dump,
183 1.2 riastrad };
184 1.2 riastrad #endif
185 1.2 riastrad
186 1.10 riastrad /*
187 1.10 riastrad * ww_mutex_init(mutex, class)
188 1.10 riastrad *
189 1.10 riastrad * Initialize mutex in the given class. Must precede any other
190 1.10 riastrad * ww_mutex_* operations. After done, mutex must be destroyed
191 1.10 riastrad * with ww_mutex_destroy.
192 1.10 riastrad */
193 1.1 riastrad void
194 1.1 riastrad ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
195 1.1 riastrad {
196 1.1 riastrad
197 1.1 riastrad /*
198 1.1 riastrad * XXX Apparently Linux takes these with spin locks held. That
199 1.1 riastrad * strikes me as a bad idea, but so it is...
200 1.1 riastrad */
201 1.1 riastrad mutex_init(&mutex->wwm_lock, MUTEX_DEFAULT, IPL_VM);
202 1.1 riastrad mutex->wwm_state = WW_UNLOCKED;
203 1.1 riastrad mutex->wwm_class = class;
204 1.1 riastrad rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
205 1.1 riastrad cv_init(&mutex->wwm_cv, "linuxwwm");
206 1.2 riastrad #ifdef LOCKDEBUG
207 1.2 riastrad mutex->wwm_debug = LOCKDEBUG_ALLOC(mutex, &ww_lockops,
208 1.2 riastrad (uintptr_t)__builtin_return_address(0));
209 1.2 riastrad #endif
210 1.1 riastrad }
211 1.1 riastrad
212 1.10 riastrad /*
213 1.10 riastrad * ww_mutex_destroy(mutex)
214 1.10 riastrad *
215 1.10 riastrad * Destroy mutex initialized by ww_mutex_init. Caller must not be
216 1.10 riastrad * with any other ww_mutex_* operations except after
217 1.10 riastrad * reinitializing with ww_mutex_init.
218 1.10 riastrad */
219 1.1 riastrad void
220 1.1 riastrad ww_mutex_destroy(struct ww_mutex *mutex)
221 1.1 riastrad {
222 1.1 riastrad
223 1.2 riastrad KASSERT(mutex->wwm_state == WW_UNLOCKED);
224 1.2 riastrad
225 1.2 riastrad #ifdef LOCKDEBUG
226 1.2 riastrad LOCKDEBUG_FREE(mutex->wwm_debug, mutex);
227 1.2 riastrad #endif
228 1.1 riastrad cv_destroy(&mutex->wwm_cv);
229 1.1 riastrad #if 0
230 1.1 riastrad rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
231 1.1 riastrad #endif
232 1.1 riastrad KASSERT(mutex->wwm_state == WW_UNLOCKED);
233 1.1 riastrad mutex_destroy(&mutex->wwm_lock);
234 1.1 riastrad }
235 1.1 riastrad
236 1.1 riastrad /*
237 1.10 riastrad * ww_mutex_is_locked(mutex)
238 1.10 riastrad *
239 1.10 riastrad * True if anyone holds mutex locked at the moment, false if not.
240 1.10 riastrad * Answer is stale as soon returned unless mutex is held by
241 1.10 riastrad * caller.
242 1.10 riastrad *
243 1.10 riastrad * XXX WARNING: This returns true if it is locked by ANYONE. Does
244 1.10 riastrad * not mean `Do I hold this lock?' (answering which really
245 1.10 riastrad * requires an acquire context).
246 1.1 riastrad */
247 1.1 riastrad bool
248 1.1 riastrad ww_mutex_is_locked(struct ww_mutex *mutex)
249 1.1 riastrad {
250 1.1 riastrad int locked;
251 1.1 riastrad
252 1.1 riastrad mutex_enter(&mutex->wwm_lock);
253 1.1 riastrad switch (mutex->wwm_state) {
254 1.1 riastrad case WW_UNLOCKED:
255 1.1 riastrad locked = false;
256 1.1 riastrad break;
257 1.1 riastrad case WW_OWNED:
258 1.1 riastrad case WW_CTX:
259 1.1 riastrad case WW_WANTOWN:
260 1.1 riastrad locked = true;
261 1.1 riastrad break;
262 1.1 riastrad default:
263 1.1 riastrad panic("wait/wound mutex %p in bad state: %d", mutex,
264 1.1 riastrad (int)mutex->wwm_state);
265 1.1 riastrad }
266 1.1 riastrad mutex_exit(&mutex->wwm_lock);
267 1.1 riastrad
268 1.1 riastrad return locked;
269 1.1 riastrad }
270 1.1 riastrad
271 1.10 riastrad /*
272 1.10 riastrad * ww_mutex_state_wait(mutex, state)
273 1.10 riastrad *
274 1.10 riastrad * Wait for mutex, which must be in the given state, to transition
275 1.10 riastrad * to another state. Uninterruptible; never fails.
276 1.10 riastrad *
277 1.10 riastrad * Caller must hold mutex's internal lock.
278 1.10 riastrad *
279 1.10 riastrad * May sleep.
280 1.10 riastrad *
281 1.10 riastrad * Internal subroutine.
282 1.10 riastrad */
283 1.1 riastrad static void
284 1.1 riastrad ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
285 1.1 riastrad {
286 1.1 riastrad
287 1.10 riastrad KASSERT(mutex_owned(&mutex->wwm_lock));
288 1.1 riastrad KASSERT(mutex->wwm_state == state);
289 1.1 riastrad do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
290 1.1 riastrad while (mutex->wwm_state == state);
291 1.1 riastrad }
292 1.1 riastrad
293 1.10 riastrad /*
294 1.10 riastrad * ww_mutex_state_wait_sig(mutex, state)
295 1.10 riastrad *
296 1.10 riastrad * Wait for mutex, which must be in the given state, to transition
297 1.10 riastrad * to another state, or fail if interrupted by a signal. Return 0
298 1.10 riastrad * on success, -EINTR if interrupted by a signal.
299 1.10 riastrad *
300 1.10 riastrad * Caller must hold mutex's internal lock.
301 1.10 riastrad *
302 1.10 riastrad * May sleep.
303 1.10 riastrad *
304 1.10 riastrad * Internal subroutine.
305 1.10 riastrad */
306 1.1 riastrad static int
307 1.1 riastrad ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
308 1.1 riastrad {
309 1.1 riastrad int ret;
310 1.1 riastrad
311 1.10 riastrad KASSERT(mutex_owned(&mutex->wwm_lock));
312 1.1 riastrad KASSERT(mutex->wwm_state == state);
313 1.1 riastrad do {
314 1.1 riastrad /* XXX errno NetBSD->Linux */
315 1.1 riastrad ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
316 1.10 riastrad if (ret) {
317 1.10 riastrad KASSERTMSG((ret == -EINTR || ret == -ERESTART),
318 1.10 riastrad "ret=%d", ret);
319 1.10 riastrad ret = -EINTR;
320 1.1 riastrad break;
321 1.10 riastrad }
322 1.1 riastrad } while (mutex->wwm_state == state);
323 1.1 riastrad
324 1.10 riastrad KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
325 1.1 riastrad return ret;
326 1.1 riastrad }
327 1.1 riastrad
328 1.10 riastrad /*
329 1.10 riastrad * ww_mutex_lock_wait(mutex, ctx)
330 1.10 riastrad *
331 1.10 riastrad * With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
332 1.10 riastrad * by another thread with an acquire context, wait to acquire
333 1.10 riastrad * mutex. While waiting, record ctx in the tree of waiters. Does
334 1.10 riastrad * not update the mutex state otherwise.
335 1.10 riastrad *
336 1.10 riastrad * Caller must not already hold mutex. Caller must hold mutex's
337 1.10 riastrad * internal lock. Uninterruptible; never fails.
338 1.10 riastrad *
339 1.10 riastrad * May sleep.
340 1.10 riastrad *
341 1.10 riastrad * Internal subroutine.
342 1.10 riastrad */
343 1.1 riastrad static void
344 1.1 riastrad ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
345 1.1 riastrad {
346 1.1 riastrad struct ww_acquire_ctx *collision __diagused;
347 1.1 riastrad
348 1.1 riastrad KASSERT(mutex_owned(&mutex->wwm_lock));
349 1.1 riastrad
350 1.1 riastrad KASSERT((mutex->wwm_state == WW_CTX) ||
351 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
352 1.1 riastrad KASSERT(mutex->wwm_u.ctx != ctx);
353 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
354 1.1 riastrad "ww mutex class mismatch: %p != %p",
355 1.1 riastrad ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
356 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
357 1.1 riastrad "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
358 1.1 riastrad ctx->wwx_ticket, ctx,
359 1.1 riastrad mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
360 1.1 riastrad
361 1.1 riastrad collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
362 1.1 riastrad KASSERTMSG((collision == ctx),
363 1.1 riastrad "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
364 1.1 riastrad ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
365 1.1 riastrad
366 1.1 riastrad do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
367 1.1 riastrad while (!(((mutex->wwm_state == WW_CTX) ||
368 1.1 riastrad (mutex->wwm_state == WW_WANTOWN)) &&
369 1.1 riastrad (mutex->wwm_u.ctx == ctx)));
370 1.1 riastrad
371 1.1 riastrad rb_tree_remove_node(&mutex->wwm_waiters, ctx);
372 1.1 riastrad }
373 1.1 riastrad
374 1.10 riastrad /*
375 1.10 riastrad * ww_mutex_lock_wait_sig(mutex, ctx)
376 1.10 riastrad *
377 1.10 riastrad * With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
378 1.10 riastrad * by another thread with an acquire context, wait to acquire
379 1.10 riastrad * mutex and return 0, or return -EINTR if interrupted by a
380 1.10 riastrad * signal. While waiting, record ctx in the tree of waiters.
381 1.10 riastrad * Does not update the mutex state otherwise.
382 1.10 riastrad *
383 1.10 riastrad * Caller must not already hold mutex. Caller must hold mutex's
384 1.10 riastrad * internal lock.
385 1.10 riastrad *
386 1.10 riastrad * May sleep.
387 1.10 riastrad *
388 1.10 riastrad * Internal subroutine.
389 1.10 riastrad */
390 1.1 riastrad static int
391 1.1 riastrad ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
392 1.1 riastrad {
393 1.1 riastrad struct ww_acquire_ctx *collision __diagused;
394 1.1 riastrad int ret;
395 1.1 riastrad
396 1.1 riastrad KASSERT(mutex_owned(&mutex->wwm_lock));
397 1.1 riastrad
398 1.1 riastrad KASSERT((mutex->wwm_state == WW_CTX) ||
399 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
400 1.1 riastrad KASSERT(mutex->wwm_u.ctx != ctx);
401 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
402 1.1 riastrad "ww mutex class mismatch: %p != %p",
403 1.1 riastrad ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
404 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
405 1.1 riastrad "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
406 1.1 riastrad ctx->wwx_ticket, ctx,
407 1.1 riastrad mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
408 1.1 riastrad
409 1.1 riastrad collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
410 1.1 riastrad KASSERTMSG((collision == ctx),
411 1.1 riastrad "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
412 1.1 riastrad ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
413 1.1 riastrad
414 1.1 riastrad do {
415 1.1 riastrad /* XXX errno NetBSD->Linux */
416 1.1 riastrad ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
417 1.10 riastrad if (ret) {
418 1.10 riastrad KASSERTMSG((ret == -EINTR || ret == -ERESTART),
419 1.10 riastrad "ret=%d", ret);
420 1.10 riastrad ret = -EINTR;
421 1.1 riastrad goto out;
422 1.10 riastrad }
423 1.1 riastrad } while (!(((mutex->wwm_state == WW_CTX) ||
424 1.1 riastrad (mutex->wwm_state == WW_WANTOWN)) &&
425 1.1 riastrad (mutex->wwm_u.ctx == ctx)));
426 1.1 riastrad
427 1.1 riastrad out: rb_tree_remove_node(&mutex->wwm_waiters, ctx);
428 1.10 riastrad KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
429 1.1 riastrad return ret;
430 1.1 riastrad }
431 1.1 riastrad
432 1.10 riastrad /*
433 1.10 riastrad * ww_mutex_lock_noctx(mutex)
434 1.10 riastrad *
435 1.10 riastrad * Acquire mutex without an acquire context. Caller must not
436 1.10 riastrad * already hold the mutex. Uninterruptible; never fails.
437 1.10 riastrad *
438 1.10 riastrad * May sleep.
439 1.10 riastrad *
440 1.10 riastrad * Internal subroutine, implementing ww_mutex_lock(..., NULL).
441 1.10 riastrad */
442 1.1 riastrad static void
443 1.1 riastrad ww_mutex_lock_noctx(struct ww_mutex *mutex)
444 1.1 riastrad {
445 1.1 riastrad
446 1.1 riastrad mutex_enter(&mutex->wwm_lock);
447 1.1 riastrad retry: switch (mutex->wwm_state) {
448 1.1 riastrad case WW_UNLOCKED:
449 1.1 riastrad mutex->wwm_state = WW_OWNED;
450 1.1 riastrad mutex->wwm_u.owner = curlwp;
451 1.1 riastrad break;
452 1.1 riastrad case WW_OWNED:
453 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
454 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
455 1.1 riastrad ww_mutex_state_wait(mutex, WW_OWNED);
456 1.1 riastrad goto retry;
457 1.1 riastrad case WW_CTX:
458 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
459 1.1 riastrad mutex->wwm_state = WW_WANTOWN;
460 1.1 riastrad /* FALLTHROUGH */
461 1.1 riastrad case WW_WANTOWN:
462 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
463 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
464 1.1 riastrad ww_mutex_state_wait(mutex, WW_WANTOWN);
465 1.1 riastrad goto retry;
466 1.1 riastrad default:
467 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
468 1.1 riastrad mutex, (int)mutex->wwm_state);
469 1.1 riastrad }
470 1.1 riastrad KASSERT(mutex->wwm_state == WW_OWNED);
471 1.1 riastrad KASSERT(mutex->wwm_u.owner == curlwp);
472 1.3 riastrad WW_LOCKED(mutex);
473 1.1 riastrad mutex_exit(&mutex->wwm_lock);
474 1.1 riastrad }
475 1.1 riastrad
476 1.10 riastrad /*
477 1.10 riastrad * ww_mutex_lock_noctx_sig(mutex)
478 1.10 riastrad *
479 1.10 riastrad * Acquire mutex without an acquire context and return 0, or fail
480 1.10 riastrad * and return -EINTR if interrupted by a signal. Caller must not
481 1.10 riastrad * already hold the mutex.
482 1.10 riastrad *
483 1.10 riastrad * May sleep.
484 1.10 riastrad *
485 1.10 riastrad * Internal subroutine, implementing
486 1.10 riastrad * ww_mutex_lock_interruptible(..., NULL).
487 1.10 riastrad */
488 1.1 riastrad static int
489 1.1 riastrad ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
490 1.1 riastrad {
491 1.1 riastrad int ret;
492 1.1 riastrad
493 1.1 riastrad mutex_enter(&mutex->wwm_lock);
494 1.1 riastrad retry: switch (mutex->wwm_state) {
495 1.1 riastrad case WW_UNLOCKED:
496 1.1 riastrad mutex->wwm_state = WW_OWNED;
497 1.1 riastrad mutex->wwm_u.owner = curlwp;
498 1.1 riastrad break;
499 1.1 riastrad case WW_OWNED:
500 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
501 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
502 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
503 1.10 riastrad if (ret) {
504 1.10 riastrad KASSERTMSG(ret == -EINTR, "ret=%d", ret);
505 1.1 riastrad goto out;
506 1.10 riastrad }
507 1.1 riastrad goto retry;
508 1.1 riastrad case WW_CTX:
509 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
510 1.1 riastrad mutex->wwm_state = WW_WANTOWN;
511 1.1 riastrad /* FALLTHROUGH */
512 1.1 riastrad case WW_WANTOWN:
513 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
514 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
515 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
516 1.10 riastrad if (ret) {
517 1.10 riastrad KASSERTMSG(ret == -EINTR, "ret=%d", ret);
518 1.1 riastrad goto out;
519 1.10 riastrad }
520 1.1 riastrad goto retry;
521 1.1 riastrad default:
522 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
523 1.1 riastrad mutex, (int)mutex->wwm_state);
524 1.1 riastrad }
525 1.1 riastrad KASSERT(mutex->wwm_state == WW_OWNED);
526 1.1 riastrad KASSERT(mutex->wwm_u.owner == curlwp);
527 1.3 riastrad WW_LOCKED(mutex);
528 1.1 riastrad ret = 0;
529 1.1 riastrad out: mutex_exit(&mutex->wwm_lock);
530 1.10 riastrad KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
531 1.1 riastrad return ret;
532 1.1 riastrad }
533 1.1 riastrad
534 1.10 riastrad /*
535 1.10 riastrad * ww_mutex_lock(mutex, ctx)
536 1.10 riastrad *
537 1.10 riastrad * Lock the mutex and return 0, or fail if impossible.
538 1.10 riastrad *
539 1.10 riastrad * - If ctx is null, caller must not hold mutex, and ww_mutex_lock
540 1.10 riastrad * always succeeds and returns 0.
541 1.10 riastrad *
542 1.10 riastrad * - If ctx is nonnull, then:
543 1.10 riastrad * . Fail with -EALREADY if caller already holds mutex.
544 1.10 riastrad * . Fail with -EDEADLK if someone else holds mutex but there is
545 1.10 riastrad * a cycle.
546 1.10 riastrad *
547 1.10 riastrad * May sleep.
548 1.10 riastrad */
549 1.1 riastrad int
550 1.1 riastrad ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
551 1.1 riastrad {
552 1.10 riastrad int ret;
553 1.1 riastrad
554 1.2 riastrad /*
555 1.2 riastrad * We do not WW_WANTLOCK at the beginning because we may
556 1.2 riastrad * correctly already hold it, if we have a context, in which
557 1.2 riastrad * case we must return EALREADY to the caller.
558 1.2 riastrad */
559 1.1 riastrad ASSERT_SLEEPABLE();
560 1.1 riastrad
561 1.1 riastrad if (ctx == NULL) {
562 1.2 riastrad WW_WANTLOCK(mutex);
563 1.1 riastrad ww_mutex_lock_noctx(mutex);
564 1.10 riastrad ret = 0;
565 1.10 riastrad goto out;
566 1.1 riastrad }
567 1.1 riastrad
568 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
569 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
570 1.1 riastrad KASSERTMSG((ctx->wwx_acquired != ~0U),
571 1.1 riastrad "ctx %p finished, can't be used any more", ctx);
572 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
573 1.1 riastrad "ctx %p in class %p, mutex %p in class %p",
574 1.1 riastrad ctx, ctx->wwx_class, mutex, mutex->wwm_class);
575 1.1 riastrad
576 1.1 riastrad mutex_enter(&mutex->wwm_lock);
577 1.13 riastrad ww_acquire_done_check(mutex, ctx);
578 1.1 riastrad retry: switch (mutex->wwm_state) {
579 1.1 riastrad case WW_UNLOCKED:
580 1.2 riastrad WW_WANTLOCK(mutex);
581 1.1 riastrad mutex->wwm_state = WW_CTX;
582 1.1 riastrad mutex->wwm_u.ctx = ctx;
583 1.1 riastrad goto locked;
584 1.1 riastrad case WW_OWNED:
585 1.2 riastrad WW_WANTLOCK(mutex);
586 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
587 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
588 1.1 riastrad ww_mutex_state_wait(mutex, WW_OWNED);
589 1.1 riastrad goto retry;
590 1.1 riastrad case WW_CTX:
591 1.1 riastrad break;
592 1.1 riastrad case WW_WANTOWN:
593 1.1 riastrad ww_mutex_state_wait(mutex, WW_WANTOWN);
594 1.1 riastrad goto retry;
595 1.1 riastrad default:
596 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
597 1.1 riastrad mutex, (int)mutex->wwm_state);
598 1.1 riastrad }
599 1.2 riastrad
600 1.1 riastrad KASSERT(mutex->wwm_state == WW_CTX);
601 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
602 1.1 riastrad KASSERT((mutex->wwm_u.ctx == ctx) ||
603 1.1 riastrad (mutex->wwm_u.ctx->wwx_owner != curlwp));
604 1.2 riastrad
605 1.1 riastrad if (mutex->wwm_u.ctx == ctx) {
606 1.1 riastrad /*
607 1.1 riastrad * We already own it. Yes, this can happen correctly
608 1.1 riastrad * for objects whose locking order is determined by
609 1.1 riastrad * userland.
610 1.1 riastrad */
611 1.10 riastrad ret = -EALREADY;
612 1.10 riastrad goto out_unlock;
613 1.2 riastrad }
614 1.2 riastrad
615 1.2 riastrad /*
616 1.2 riastrad * We do not own it. We can safely assert to LOCKDEBUG that we
617 1.2 riastrad * want it.
618 1.2 riastrad */
619 1.2 riastrad WW_WANTLOCK(mutex);
620 1.2 riastrad
621 1.2 riastrad if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
622 1.1 riastrad /*
623 1.1 riastrad * Owned by a higher-priority party. Tell the caller
624 1.1 riastrad * to unlock everything and start over.
625 1.1 riastrad */
626 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
627 1.1 riastrad "ww mutex class mismatch: %p != %p",
628 1.1 riastrad ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
629 1.10 riastrad ret = -EDEADLK;
630 1.10 riastrad goto out_unlock;
631 1.1 riastrad }
632 1.2 riastrad
633 1.2 riastrad /*
634 1.2 riastrad * Owned by a lower-priority party. Ask that party to wake us
635 1.2 riastrad * when it is done or it realizes it needs to back off.
636 1.2 riastrad */
637 1.2 riastrad ww_mutex_lock_wait(mutex, ctx);
638 1.2 riastrad
639 1.3 riastrad locked: KASSERT((mutex->wwm_state == WW_CTX) ||
640 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
641 1.1 riastrad KASSERT(mutex->wwm_u.ctx == ctx);
642 1.3 riastrad WW_LOCKED(mutex);
643 1.3 riastrad ctx->wwx_acquired++;
644 1.10 riastrad ret = 0;
645 1.10 riastrad out_unlock:
646 1.1 riastrad mutex_exit(&mutex->wwm_lock);
647 1.10 riastrad out: KASSERTMSG((ret == 0 || ret == -EALREADY || ret == -EDEADLK),
648 1.10 riastrad "ret=%d", ret);
649 1.10 riastrad return ret;
650 1.1 riastrad }
651 1.1 riastrad
652 1.10 riastrad /*
653 1.10 riastrad * ww_mutex_lock_interruptible(mutex, ctx)
654 1.10 riastrad *
655 1.10 riastrad * Lock the mutex and return 0, or fail if impossible or
656 1.10 riastrad * interrupted.
657 1.10 riastrad *
658 1.10 riastrad * - If ctx is null, caller must not hold mutex, and ww_mutex_lock
659 1.10 riastrad * always succeeds and returns 0.
660 1.10 riastrad *
661 1.10 riastrad * - If ctx is nonnull, then:
662 1.10 riastrad * . Fail with -EALREADY if caller already holds mutex.
663 1.10 riastrad * . Fail with -EDEADLK if someone else holds mutex but there is
664 1.10 riastrad * a cycle.
665 1.10 riastrad * . Fail with -EINTR if interrupted by a signal.
666 1.10 riastrad *
667 1.10 riastrad * May sleep.
668 1.10 riastrad */
669 1.1 riastrad int
670 1.1 riastrad ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
671 1.1 riastrad {
672 1.1 riastrad int ret;
673 1.1 riastrad
674 1.2 riastrad /*
675 1.2 riastrad * We do not WW_WANTLOCK at the beginning because we may
676 1.2 riastrad * correctly already hold it, if we have a context, in which
677 1.2 riastrad * case we must return EALREADY to the caller.
678 1.2 riastrad */
679 1.1 riastrad ASSERT_SLEEPABLE();
680 1.1 riastrad
681 1.2 riastrad if (ctx == NULL) {
682 1.2 riastrad WW_WANTLOCK(mutex);
683 1.10 riastrad ret = ww_mutex_lock_noctx_sig(mutex);
684 1.10 riastrad KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
685 1.10 riastrad goto out;
686 1.2 riastrad }
687 1.1 riastrad
688 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
689 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
690 1.1 riastrad KASSERTMSG((ctx->wwx_acquired != ~0U),
691 1.1 riastrad "ctx %p finished, can't be used any more", ctx);
692 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
693 1.1 riastrad "ctx %p in class %p, mutex %p in class %p",
694 1.1 riastrad ctx, ctx->wwx_class, mutex, mutex->wwm_class);
695 1.1 riastrad
696 1.1 riastrad mutex_enter(&mutex->wwm_lock);
697 1.13 riastrad ww_acquire_done_check(mutex, ctx);
698 1.1 riastrad retry: switch (mutex->wwm_state) {
699 1.1 riastrad case WW_UNLOCKED:
700 1.2 riastrad WW_WANTLOCK(mutex);
701 1.1 riastrad mutex->wwm_state = WW_CTX;
702 1.1 riastrad mutex->wwm_u.ctx = ctx;
703 1.1 riastrad goto locked;
704 1.1 riastrad case WW_OWNED:
705 1.2 riastrad WW_WANTLOCK(mutex);
706 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
707 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
708 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
709 1.10 riastrad if (ret) {
710 1.10 riastrad KASSERTMSG(ret == -EINTR, "ret=%d", ret);
711 1.10 riastrad goto out_unlock;
712 1.10 riastrad }
713 1.1 riastrad goto retry;
714 1.1 riastrad case WW_CTX:
715 1.1 riastrad break;
716 1.1 riastrad case WW_WANTOWN:
717 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
718 1.10 riastrad if (ret) {
719 1.10 riastrad KASSERTMSG(ret == -EINTR, "ret=%d", ret);
720 1.10 riastrad goto out_unlock;
721 1.10 riastrad }
722 1.1 riastrad goto retry;
723 1.1 riastrad default:
724 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
725 1.1 riastrad mutex, (int)mutex->wwm_state);
726 1.1 riastrad }
727 1.2 riastrad
728 1.1 riastrad KASSERT(mutex->wwm_state == WW_CTX);
729 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
730 1.1 riastrad KASSERT((mutex->wwm_u.ctx == ctx) ||
731 1.1 riastrad (mutex->wwm_u.ctx->wwx_owner != curlwp));
732 1.2 riastrad
733 1.1 riastrad if (mutex->wwm_u.ctx == ctx) {
734 1.1 riastrad /*
735 1.1 riastrad * We already own it. Yes, this can happen correctly
736 1.1 riastrad * for objects whose locking order is determined by
737 1.1 riastrad * userland.
738 1.1 riastrad */
739 1.10 riastrad ret = -EALREADY;
740 1.10 riastrad goto out_unlock;
741 1.2 riastrad }
742 1.2 riastrad
743 1.2 riastrad /*
744 1.2 riastrad * We do not own it. We can safely assert to LOCKDEBUG that we
745 1.2 riastrad * want it.
746 1.2 riastrad */
747 1.2 riastrad WW_WANTLOCK(mutex);
748 1.2 riastrad
749 1.2 riastrad if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
750 1.1 riastrad /*
751 1.1 riastrad * Owned by a higher-priority party. Tell the caller
752 1.1 riastrad * to unlock everything and start over.
753 1.1 riastrad */
754 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
755 1.1 riastrad "ww mutex class mismatch: %p != %p",
756 1.1 riastrad ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
757 1.10 riastrad ret = -EDEADLK;
758 1.10 riastrad goto out_unlock;
759 1.1 riastrad }
760 1.2 riastrad
761 1.2 riastrad /*
762 1.2 riastrad * Owned by a lower-priority party. Ask that party to wake us
763 1.2 riastrad * when it is done or it realizes it needs to back off.
764 1.2 riastrad */
765 1.2 riastrad ret = ww_mutex_lock_wait_sig(mutex, ctx);
766 1.10 riastrad if (ret) {
767 1.10 riastrad KASSERTMSG(ret == -EINTR, "ret=%d", ret);
768 1.10 riastrad goto out_unlock;
769 1.10 riastrad }
770 1.2 riastrad
771 1.1 riastrad locked: KASSERT((mutex->wwm_state == WW_CTX) ||
772 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
773 1.1 riastrad KASSERT(mutex->wwm_u.ctx == ctx);
774 1.3 riastrad WW_LOCKED(mutex);
775 1.1 riastrad ctx->wwx_acquired++;
776 1.1 riastrad ret = 0;
777 1.10 riastrad out_unlock:
778 1.10 riastrad mutex_exit(&mutex->wwm_lock);
779 1.10 riastrad out: KASSERTMSG((ret == 0 || ret == -EALREADY || ret == -EDEADLK ||
780 1.10 riastrad ret == -EINTR), "ret=%d", ret);
781 1.1 riastrad return ret;
782 1.1 riastrad }
783 1.1 riastrad
784 1.10 riastrad /*
785 1.10 riastrad * ww_mutex_lock_slow(mutex, ctx)
786 1.10 riastrad *
787 1.10 riastrad * Slow path: After ww_mutex_lock* has failed with -EDEADLK, and
788 1.10 riastrad * after the caller has ditched all its locks, wait for the owner
789 1.10 riastrad * of mutex to relinquish mutex before the caller can start over
790 1.10 riastrad * acquiring locks again.
791 1.10 riastrad *
792 1.10 riastrad * Uninterruptible; never fails.
793 1.10 riastrad *
794 1.10 riastrad * May sleep.
795 1.10 riastrad */
796 1.1 riastrad void
797 1.1 riastrad ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
798 1.1 riastrad {
799 1.1 riastrad
800 1.2 riastrad /* Caller must not try to lock against self here. */
801 1.2 riastrad WW_WANTLOCK(mutex);
802 1.1 riastrad ASSERT_SLEEPABLE();
803 1.1 riastrad
804 1.1 riastrad if (ctx == NULL) {
805 1.1 riastrad ww_mutex_lock_noctx(mutex);
806 1.1 riastrad return;
807 1.1 riastrad }
808 1.1 riastrad
809 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
810 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
811 1.1 riastrad KASSERTMSG((ctx->wwx_acquired != ~0U),
812 1.1 riastrad "ctx %p finished, can't be used any more", ctx);
813 1.1 riastrad KASSERTMSG((ctx->wwx_acquired == 0),
814 1.1 riastrad "ctx %p still holds %u locks, not allowed in slow path",
815 1.1 riastrad ctx, ctx->wwx_acquired);
816 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
817 1.1 riastrad "ctx %p in class %p, mutex %p in class %p",
818 1.1 riastrad ctx, ctx->wwx_class, mutex, mutex->wwm_class);
819 1.1 riastrad
820 1.1 riastrad mutex_enter(&mutex->wwm_lock);
821 1.13 riastrad ww_acquire_done_check(mutex, ctx);
822 1.1 riastrad retry: switch (mutex->wwm_state) {
823 1.1 riastrad case WW_UNLOCKED:
824 1.1 riastrad mutex->wwm_state = WW_CTX;
825 1.1 riastrad mutex->wwm_u.ctx = ctx;
826 1.1 riastrad goto locked;
827 1.1 riastrad case WW_OWNED:
828 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
829 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
830 1.1 riastrad ww_mutex_state_wait(mutex, WW_OWNED);
831 1.1 riastrad goto retry;
832 1.1 riastrad case WW_CTX:
833 1.1 riastrad break;
834 1.1 riastrad case WW_WANTOWN:
835 1.1 riastrad ww_mutex_state_wait(mutex, WW_WANTOWN);
836 1.1 riastrad goto retry;
837 1.1 riastrad default:
838 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
839 1.1 riastrad mutex, (int)mutex->wwm_state);
840 1.1 riastrad }
841 1.2 riastrad
842 1.1 riastrad KASSERT(mutex->wwm_state == WW_CTX);
843 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
844 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
845 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
846 1.2 riastrad
847 1.1 riastrad /*
848 1.1 riastrad * Owned by another party, of any priority. Ask that party to
849 1.1 riastrad * wake us when it's done.
850 1.1 riastrad */
851 1.1 riastrad ww_mutex_lock_wait(mutex, ctx);
852 1.2 riastrad
853 1.1 riastrad locked: KASSERT((mutex->wwm_state == WW_CTX) ||
854 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
855 1.1 riastrad KASSERT(mutex->wwm_u.ctx == ctx);
856 1.3 riastrad WW_LOCKED(mutex);
857 1.1 riastrad ctx->wwx_acquired++;
858 1.1 riastrad mutex_exit(&mutex->wwm_lock);
859 1.1 riastrad }
860 1.1 riastrad
861 1.10 riastrad /*
862 1.10 riastrad * ww_mutex_lock_slow(mutex, ctx)
863 1.10 riastrad *
864 1.10 riastrad * Slow path: After ww_mutex_lock* has failed with -EDEADLK, and
865 1.10 riastrad * after the caller has ditched all its locks, wait for the owner
866 1.10 riastrad * of mutex to relinquish mutex before the caller can start over
867 1.10 riastrad * acquiring locks again, or fail with -EINTR if interrupted by a
868 1.10 riastrad * signal.
869 1.10 riastrad *
870 1.10 riastrad * May sleep.
871 1.10 riastrad */
872 1.1 riastrad int
873 1.1 riastrad ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
874 1.1 riastrad struct ww_acquire_ctx *ctx)
875 1.1 riastrad {
876 1.1 riastrad int ret;
877 1.1 riastrad
878 1.2 riastrad WW_WANTLOCK(mutex);
879 1.1 riastrad ASSERT_SLEEPABLE();
880 1.1 riastrad
881 1.10 riastrad if (ctx == NULL) {
882 1.10 riastrad ret = ww_mutex_lock_noctx_sig(mutex);
883 1.10 riastrad KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
884 1.10 riastrad goto out;
885 1.10 riastrad }
886 1.1 riastrad
887 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
888 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
889 1.1 riastrad KASSERTMSG((ctx->wwx_acquired != ~0U),
890 1.1 riastrad "ctx %p finished, can't be used any more", ctx);
891 1.1 riastrad KASSERTMSG((ctx->wwx_acquired == 0),
892 1.1 riastrad "ctx %p still holds %u locks, not allowed in slow path",
893 1.1 riastrad ctx, ctx->wwx_acquired);
894 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
895 1.1 riastrad "ctx %p in class %p, mutex %p in class %p",
896 1.1 riastrad ctx, ctx->wwx_class, mutex, mutex->wwm_class);
897 1.1 riastrad
898 1.1 riastrad mutex_enter(&mutex->wwm_lock);
899 1.13 riastrad ww_acquire_done_check(mutex, ctx);
900 1.1 riastrad retry: switch (mutex->wwm_state) {
901 1.1 riastrad case WW_UNLOCKED:
902 1.1 riastrad mutex->wwm_state = WW_CTX;
903 1.1 riastrad mutex->wwm_u.ctx = ctx;
904 1.1 riastrad goto locked;
905 1.1 riastrad case WW_OWNED:
906 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
907 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
908 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
909 1.10 riastrad if (ret) {
910 1.10 riastrad KASSERTMSG(ret == -EINTR, "ret=%d", ret);
911 1.10 riastrad goto out_unlock;
912 1.10 riastrad }
913 1.1 riastrad goto retry;
914 1.1 riastrad case WW_CTX:
915 1.1 riastrad break;
916 1.1 riastrad case WW_WANTOWN:
917 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
918 1.10 riastrad if (ret) {
919 1.10 riastrad KASSERTMSG(ret == -EINTR, "ret=%d", ret);
920 1.10 riastrad goto out_unlock;
921 1.10 riastrad }
922 1.1 riastrad goto retry;
923 1.1 riastrad default:
924 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
925 1.1 riastrad mutex, (int)mutex->wwm_state);
926 1.1 riastrad }
927 1.2 riastrad
928 1.1 riastrad KASSERT(mutex->wwm_state == WW_CTX);
929 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
930 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
931 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
932 1.2 riastrad
933 1.1 riastrad /*
934 1.1 riastrad * Owned by another party, of any priority. Ask that party to
935 1.1 riastrad * wake us when it's done.
936 1.1 riastrad */
937 1.1 riastrad ret = ww_mutex_lock_wait_sig(mutex, ctx);
938 1.10 riastrad if (ret) {
939 1.10 riastrad KASSERTMSG(ret == -EINTR, "ret=%d", ret);
940 1.10 riastrad goto out_unlock;
941 1.10 riastrad }
942 1.2 riastrad
943 1.1 riastrad locked: KASSERT((mutex->wwm_state == WW_CTX) ||
944 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
945 1.1 riastrad KASSERT(mutex->wwm_u.ctx == ctx);
946 1.3 riastrad WW_LOCKED(mutex);
947 1.1 riastrad ctx->wwx_acquired++;
948 1.1 riastrad ret = 0;
949 1.10 riastrad out_unlock:
950 1.10 riastrad mutex_exit(&mutex->wwm_lock);
951 1.10 riastrad out: KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
952 1.1 riastrad return ret;
953 1.1 riastrad }
954 1.1 riastrad
955 1.10 riastrad /*
956 1.10 riastrad * ww_mutex_trylock(mutex)
957 1.10 riastrad *
958 1.10 riastrad * Tro to acquire mutex and return 1, but if it can't be done
959 1.10 riastrad * immediately, return 0.
960 1.10 riastrad */
961 1.1 riastrad int
962 1.1 riastrad ww_mutex_trylock(struct ww_mutex *mutex)
963 1.1 riastrad {
964 1.1 riastrad int ret;
965 1.1 riastrad
966 1.1 riastrad mutex_enter(&mutex->wwm_lock);
967 1.1 riastrad if (mutex->wwm_state == WW_UNLOCKED) {
968 1.1 riastrad mutex->wwm_state = WW_OWNED;
969 1.1 riastrad mutex->wwm_u.owner = curlwp;
970 1.2 riastrad WW_WANTLOCK(mutex);
971 1.2 riastrad WW_LOCKED(mutex);
972 1.1 riastrad ret = 1;
973 1.1 riastrad } else {
974 1.9 riastrad /*
975 1.9 riastrad * It is tempting to assert that we do not hold the
976 1.9 riastrad * mutex here, because trylock when we hold the lock
977 1.9 riastrad * already generally indicates a bug in the design of
978 1.9 riastrad * the code. However, it seems that Linux relies on
979 1.9 riastrad * this deep in ttm buffer reservation logic, so these
980 1.9 riastrad * assertions are disabled until we find another way to
981 1.9 riastrad * work around that or fix the bug that leads to it.
982 1.9 riastrad *
983 1.9 riastrad * That said: we should not be in the WW_WANTOWN state,
984 1.9 riastrad * which happens only while we're in the ww mutex logic
985 1.9 riastrad * waiting to acquire the lock.
986 1.9 riastrad */
987 1.9 riastrad #if 0
988 1.1 riastrad KASSERTMSG(((mutex->wwm_state != WW_OWNED) ||
989 1.1 riastrad (mutex->wwm_u.owner != curlwp)),
990 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
991 1.1 riastrad KASSERTMSG(((mutex->wwm_state != WW_CTX) ||
992 1.1 riastrad (mutex->wwm_u.ctx->wwx_owner != curlwp)),
993 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
994 1.9 riastrad #endif
995 1.1 riastrad KASSERTMSG(((mutex->wwm_state != WW_WANTOWN) ||
996 1.1 riastrad (mutex->wwm_u.ctx->wwx_owner != curlwp)),
997 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
998 1.1 riastrad ret = 0;
999 1.1 riastrad }
1000 1.1 riastrad mutex_exit(&mutex->wwm_lock);
1001 1.1 riastrad
1002 1.1 riastrad return ret;
1003 1.1 riastrad }
1004 1.1 riastrad
1005 1.10 riastrad /*
1006 1.10 riastrad * ww_mutex_unlock_release(mutex)
1007 1.10 riastrad *
1008 1.10 riastrad * Decrement the number of mutexes acquired in the current locking
1009 1.10 riastrad * context of mutex, which must be held by the caller and in
1010 1.10 riastrad * WW_CTX or WW_WANTOWN state, and clear the mutex's reference.
1011 1.10 riastrad * Caller must hold the internal lock of mutex, and is responsible
1012 1.10 riastrad * for notifying waiters.
1013 1.10 riastrad *
1014 1.10 riastrad * Internal subroutine.
1015 1.10 riastrad */
1016 1.1 riastrad static void
1017 1.1 riastrad ww_mutex_unlock_release(struct ww_mutex *mutex)
1018 1.1 riastrad {
1019 1.1 riastrad
1020 1.1 riastrad KASSERT(mutex_owned(&mutex->wwm_lock));
1021 1.1 riastrad KASSERT((mutex->wwm_state == WW_CTX) ||
1022 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
1023 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
1024 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner == curlwp),
1025 1.1 riastrad "ww_mutex %p ctx %p held by %p, not by self (%p)",
1026 1.1 riastrad mutex, mutex->wwm_u.ctx, mutex->wwm_u.ctx->wwx_owner,
1027 1.1 riastrad curlwp);
1028 1.1 riastrad KASSERT(mutex->wwm_u.ctx->wwx_acquired != ~0U);
1029 1.1 riastrad mutex->wwm_u.ctx->wwx_acquired--;
1030 1.1 riastrad mutex->wwm_u.ctx = NULL;
1031 1.1 riastrad }
1032 1.1 riastrad
1033 1.10 riastrad /*
1034 1.10 riastrad * ww_mutex_unlock(mutex)
1035 1.10 riastrad *
1036 1.10 riastrad * Release mutex and wake the next caller waiting, if any.
1037 1.10 riastrad */
1038 1.1 riastrad void
1039 1.1 riastrad ww_mutex_unlock(struct ww_mutex *mutex)
1040 1.1 riastrad {
1041 1.1 riastrad struct ww_acquire_ctx *ctx;
1042 1.1 riastrad
1043 1.1 riastrad mutex_enter(&mutex->wwm_lock);
1044 1.1 riastrad KASSERT(mutex->wwm_state != WW_UNLOCKED);
1045 1.1 riastrad switch (mutex->wwm_state) {
1046 1.1 riastrad case WW_UNLOCKED:
1047 1.1 riastrad panic("unlocking unlocked wait/wound mutex: %p", mutex);
1048 1.1 riastrad case WW_OWNED:
1049 1.1 riastrad /* Let the context lockers fight over it. */
1050 1.1 riastrad mutex->wwm_u.owner = NULL;
1051 1.1 riastrad mutex->wwm_state = WW_UNLOCKED;
1052 1.1 riastrad break;
1053 1.1 riastrad case WW_CTX:
1054 1.1 riastrad ww_mutex_unlock_release(mutex);
1055 1.1 riastrad /*
1056 1.1 riastrad * If there are any waiters with contexts, grant the
1057 1.1 riastrad * lock to the highest-priority one. Otherwise, just
1058 1.1 riastrad * unlock it.
1059 1.1 riastrad */
1060 1.1 riastrad if ((ctx = RB_TREE_MIN(&mutex->wwm_waiters)) != NULL) {
1061 1.1 riastrad mutex->wwm_state = WW_CTX;
1062 1.1 riastrad mutex->wwm_u.ctx = ctx;
1063 1.1 riastrad } else {
1064 1.1 riastrad mutex->wwm_state = WW_UNLOCKED;
1065 1.1 riastrad }
1066 1.1 riastrad break;
1067 1.1 riastrad case WW_WANTOWN:
1068 1.1 riastrad ww_mutex_unlock_release(mutex);
1069 1.1 riastrad /* Let the non-context lockers fight over it. */
1070 1.1 riastrad mutex->wwm_state = WW_UNLOCKED;
1071 1.1 riastrad break;
1072 1.1 riastrad }
1073 1.2 riastrad WW_UNLOCKED(mutex);
1074 1.1 riastrad cv_broadcast(&mutex->wwm_cv);
1075 1.1 riastrad mutex_exit(&mutex->wwm_lock);
1076 1.1 riastrad }
1077 1.8 riastrad
1078 1.10 riastrad /*
1079 1.10 riastrad * ww_mutex_locking_ctx(mutex)
1080 1.10 riastrad *
1081 1.10 riastrad * Return the current acquire context of mutex. Answer is stale
1082 1.10 riastrad * as soon as returned unless mutex is held by caller.
1083 1.10 riastrad */
1084 1.8 riastrad struct ww_acquire_ctx *
1085 1.8 riastrad ww_mutex_locking_ctx(struct ww_mutex *mutex)
1086 1.8 riastrad {
1087 1.8 riastrad struct ww_acquire_ctx *ctx;
1088 1.8 riastrad
1089 1.8 riastrad mutex_enter(&mutex->wwm_lock);
1090 1.8 riastrad switch (mutex->wwm_state) {
1091 1.8 riastrad case WW_UNLOCKED:
1092 1.8 riastrad case WW_OWNED:
1093 1.8 riastrad ctx = NULL;
1094 1.8 riastrad break;
1095 1.8 riastrad case WW_CTX:
1096 1.8 riastrad case WW_WANTOWN:
1097 1.8 riastrad ctx = mutex->wwm_u.ctx;
1098 1.8 riastrad break;
1099 1.8 riastrad default:
1100 1.8 riastrad panic("wait/wound mutex %p in bad state: %d",
1101 1.8 riastrad mutex, (int)mutex->wwm_state);
1102 1.8 riastrad }
1103 1.8 riastrad mutex_exit(&mutex->wwm_lock);
1104 1.8 riastrad
1105 1.8 riastrad return ctx;
1106 1.8 riastrad }
1107