linux_ww_mutex.c revision 1.1 1 1.1 riastrad /* $NetBSD: linux_ww_mutex.c,v 1.1 2015/01/08 23:35:47 riastradh Exp $ */
2 1.1 riastrad
3 1.1 riastrad /*-
4 1.1 riastrad * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 1.1 riastrad * All rights reserved.
6 1.1 riastrad *
7 1.1 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.1 riastrad * by Taylor R. Campbell.
9 1.1 riastrad *
10 1.1 riastrad * Redistribution and use in source and binary forms, with or without
11 1.1 riastrad * modification, are permitted provided that the following conditions
12 1.1 riastrad * are met:
13 1.1 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.1 riastrad * notice, this list of conditions and the following disclaimer.
15 1.1 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.1 riastrad * documentation and/or other materials provided with the distribution.
18 1.1 riastrad *
19 1.1 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.1 riastrad */
31 1.1 riastrad
32 1.1 riastrad #include <sys/cdefs.h>
33 1.1 riastrad __KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.1 2015/01/08 23:35:47 riastradh Exp $");
34 1.1 riastrad
35 1.1 riastrad #include <sys/types.h>
36 1.1 riastrad #include <sys/atomic.h>
37 1.1 riastrad #include <sys/condvar.h>
38 1.1 riastrad #include <sys/lwp.h>
39 1.1 riastrad #include <sys/mutex.h>
40 1.1 riastrad #include <sys/rbtree.h>
41 1.1 riastrad
42 1.1 riastrad #include <linux/ww_mutex.h>
43 1.1 riastrad
44 1.1 riastrad static int
45 1.1 riastrad ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb)
46 1.1 riastrad {
47 1.1 riastrad const struct ww_acquire_ctx *const ctx_a = va;
48 1.1 riastrad const struct ww_acquire_ctx *const ctx_b = vb;
49 1.1 riastrad
50 1.1 riastrad if (ctx_a->wwx_ticket < ctx_b->wwx_ticket)
51 1.1 riastrad return -1;
52 1.1 riastrad if (ctx_a->wwx_ticket > ctx_b->wwx_ticket)
53 1.1 riastrad return -1;
54 1.1 riastrad return 0;
55 1.1 riastrad }
56 1.1 riastrad
57 1.1 riastrad static int
58 1.1 riastrad ww_acquire_ctx_compare_key(void *cookie __unused, const void *vn,
59 1.1 riastrad const void *vk)
60 1.1 riastrad {
61 1.1 riastrad const struct ww_acquire_ctx *const ctx = vn;
62 1.1 riastrad const uint64_t *const ticketp = vk, ticket = *ticketp;
63 1.1 riastrad
64 1.1 riastrad if (ctx->wwx_ticket < ticket)
65 1.1 riastrad return -1;
66 1.1 riastrad if (ctx->wwx_ticket > ticket)
67 1.1 riastrad return -1;
68 1.1 riastrad return 0;
69 1.1 riastrad }
70 1.1 riastrad
71 1.1 riastrad static const rb_tree_ops_t ww_acquire_ctx_rb_ops = {
72 1.1 riastrad .rbto_compare_nodes = &ww_acquire_ctx_compare,
73 1.1 riastrad .rbto_compare_key = &ww_acquire_ctx_compare_key,
74 1.1 riastrad .rbto_node_offset = offsetof(struct ww_acquire_ctx, wwx_rb_node),
75 1.1 riastrad .rbto_context = NULL,
76 1.1 riastrad };
77 1.1 riastrad
78 1.1 riastrad void
79 1.1 riastrad ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *class)
80 1.1 riastrad {
81 1.1 riastrad
82 1.1 riastrad ctx->wwx_class = class;
83 1.1 riastrad ctx->wwx_owner = curlwp;
84 1.1 riastrad ctx->wwx_ticket = atomic_inc_64_nv(&class->wwc_ticket);
85 1.1 riastrad ctx->wwx_acquired = 0;
86 1.1 riastrad ctx->wwx_acquire_done = false;
87 1.1 riastrad }
88 1.1 riastrad
89 1.1 riastrad void
90 1.1 riastrad ww_acquire_done(struct ww_acquire_ctx *ctx)
91 1.1 riastrad {
92 1.1 riastrad
93 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
94 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
95 1.1 riastrad
96 1.1 riastrad ctx->wwx_acquire_done = true;
97 1.1 riastrad }
98 1.1 riastrad
99 1.1 riastrad void
100 1.1 riastrad ww_acquire_fini(struct ww_acquire_ctx *ctx)
101 1.1 riastrad {
102 1.1 riastrad
103 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
104 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
105 1.1 riastrad KASSERTMSG((ctx->wwx_acquired == 0), "ctx %p still holds %u locks",
106 1.1 riastrad ctx, ctx->wwx_acquired);
107 1.1 riastrad
108 1.1 riastrad ctx->wwx_acquired = ~0U; /* Fail if called again. */
109 1.1 riastrad ctx->wwx_owner = NULL;
110 1.1 riastrad }
111 1.1 riastrad
112 1.1 riastrad void
113 1.1 riastrad ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
114 1.1 riastrad {
115 1.1 riastrad
116 1.1 riastrad /*
117 1.1 riastrad * XXX Apparently Linux takes these with spin locks held. That
118 1.1 riastrad * strikes me as a bad idea, but so it is...
119 1.1 riastrad */
120 1.1 riastrad mutex_init(&mutex->wwm_lock, MUTEX_DEFAULT, IPL_VM);
121 1.1 riastrad mutex->wwm_state = WW_UNLOCKED;
122 1.1 riastrad mutex->wwm_class = class;
123 1.1 riastrad rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
124 1.1 riastrad cv_init(&mutex->wwm_cv, "linuxwwm");
125 1.1 riastrad }
126 1.1 riastrad
127 1.1 riastrad void
128 1.1 riastrad ww_mutex_destroy(struct ww_mutex *mutex)
129 1.1 riastrad {
130 1.1 riastrad
131 1.1 riastrad cv_destroy(&mutex->wwm_cv);
132 1.1 riastrad #if 0
133 1.1 riastrad rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
134 1.1 riastrad #endif
135 1.1 riastrad KASSERT(mutex->wwm_state == WW_UNLOCKED);
136 1.1 riastrad mutex_destroy(&mutex->wwm_lock);
137 1.1 riastrad }
138 1.1 riastrad
139 1.1 riastrad /*
140 1.1 riastrad * XXX WARNING: This returns true if it is locked by ANYONE. Does not
141 1.1 riastrad * mean `Do I hold this lock?' (answering which really requires an
142 1.1 riastrad * acquire context).
143 1.1 riastrad */
144 1.1 riastrad bool
145 1.1 riastrad ww_mutex_is_locked(struct ww_mutex *mutex)
146 1.1 riastrad {
147 1.1 riastrad int locked;
148 1.1 riastrad
149 1.1 riastrad mutex_enter(&mutex->wwm_lock);
150 1.1 riastrad switch (mutex->wwm_state) {
151 1.1 riastrad case WW_UNLOCKED:
152 1.1 riastrad locked = false;
153 1.1 riastrad break;
154 1.1 riastrad case WW_OWNED:
155 1.1 riastrad case WW_CTX:
156 1.1 riastrad case WW_WANTOWN:
157 1.1 riastrad locked = true;
158 1.1 riastrad break;
159 1.1 riastrad default:
160 1.1 riastrad panic("wait/wound mutex %p in bad state: %d", mutex,
161 1.1 riastrad (int)mutex->wwm_state);
162 1.1 riastrad }
163 1.1 riastrad mutex_exit(&mutex->wwm_lock);
164 1.1 riastrad
165 1.1 riastrad return locked;
166 1.1 riastrad }
167 1.1 riastrad
168 1.1 riastrad static void
169 1.1 riastrad ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
170 1.1 riastrad {
171 1.1 riastrad
172 1.1 riastrad KASSERT(mutex->wwm_state == state);
173 1.1 riastrad do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
174 1.1 riastrad while (mutex->wwm_state == state);
175 1.1 riastrad }
176 1.1 riastrad
177 1.1 riastrad static int
178 1.1 riastrad ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
179 1.1 riastrad {
180 1.1 riastrad int ret;
181 1.1 riastrad
182 1.1 riastrad KASSERT(mutex->wwm_state == state);
183 1.1 riastrad do {
184 1.1 riastrad /* XXX errno NetBSD->Linux */
185 1.1 riastrad ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
186 1.1 riastrad if (ret)
187 1.1 riastrad break;
188 1.1 riastrad } while (mutex->wwm_state == state);
189 1.1 riastrad
190 1.1 riastrad return ret;
191 1.1 riastrad }
192 1.1 riastrad
193 1.1 riastrad static void
194 1.1 riastrad ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
195 1.1 riastrad {
196 1.1 riastrad struct ww_acquire_ctx *collision __diagused;
197 1.1 riastrad
198 1.1 riastrad KASSERT(mutex_owned(&mutex->wwm_lock));
199 1.1 riastrad
200 1.1 riastrad KASSERT((mutex->wwm_state == WW_CTX) ||
201 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
202 1.1 riastrad KASSERT(mutex->wwm_u.ctx != ctx);
203 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
204 1.1 riastrad "ww mutex class mismatch: %p != %p",
205 1.1 riastrad ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
206 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
207 1.1 riastrad "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
208 1.1 riastrad ctx->wwx_ticket, ctx,
209 1.1 riastrad mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
210 1.1 riastrad
211 1.1 riastrad collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
212 1.1 riastrad KASSERTMSG((collision == ctx),
213 1.1 riastrad "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
214 1.1 riastrad ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
215 1.1 riastrad
216 1.1 riastrad do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
217 1.1 riastrad while (!(((mutex->wwm_state == WW_CTX) ||
218 1.1 riastrad (mutex->wwm_state == WW_WANTOWN)) &&
219 1.1 riastrad (mutex->wwm_u.ctx == ctx)));
220 1.1 riastrad
221 1.1 riastrad rb_tree_remove_node(&mutex->wwm_waiters, ctx);
222 1.1 riastrad }
223 1.1 riastrad
224 1.1 riastrad static int
225 1.1 riastrad ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
226 1.1 riastrad {
227 1.1 riastrad struct ww_acquire_ctx *collision __diagused;
228 1.1 riastrad int ret;
229 1.1 riastrad
230 1.1 riastrad KASSERT(mutex_owned(&mutex->wwm_lock));
231 1.1 riastrad
232 1.1 riastrad KASSERT((mutex->wwm_state == WW_CTX) ||
233 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
234 1.1 riastrad KASSERT(mutex->wwm_u.ctx != ctx);
235 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
236 1.1 riastrad "ww mutex class mismatch: %p != %p",
237 1.1 riastrad ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
238 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
239 1.1 riastrad "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
240 1.1 riastrad ctx->wwx_ticket, ctx,
241 1.1 riastrad mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
242 1.1 riastrad
243 1.1 riastrad collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
244 1.1 riastrad KASSERTMSG((collision == ctx),
245 1.1 riastrad "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
246 1.1 riastrad ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
247 1.1 riastrad
248 1.1 riastrad do {
249 1.1 riastrad /* XXX errno NetBSD->Linux */
250 1.1 riastrad ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
251 1.1 riastrad if (ret)
252 1.1 riastrad goto out;
253 1.1 riastrad } while (!(((mutex->wwm_state == WW_CTX) ||
254 1.1 riastrad (mutex->wwm_state == WW_WANTOWN)) &&
255 1.1 riastrad (mutex->wwm_u.ctx == ctx)));
256 1.1 riastrad
257 1.1 riastrad out: rb_tree_remove_node(&mutex->wwm_waiters, ctx);
258 1.1 riastrad return ret;
259 1.1 riastrad }
260 1.1 riastrad
261 1.1 riastrad static void
262 1.1 riastrad ww_mutex_lock_noctx(struct ww_mutex *mutex)
263 1.1 riastrad {
264 1.1 riastrad
265 1.1 riastrad mutex_enter(&mutex->wwm_lock);
266 1.1 riastrad retry: switch (mutex->wwm_state) {
267 1.1 riastrad case WW_UNLOCKED:
268 1.1 riastrad mutex->wwm_state = WW_OWNED;
269 1.1 riastrad mutex->wwm_u.owner = curlwp;
270 1.1 riastrad break;
271 1.1 riastrad case WW_OWNED:
272 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
273 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
274 1.1 riastrad ww_mutex_state_wait(mutex, WW_OWNED);
275 1.1 riastrad goto retry;
276 1.1 riastrad case WW_CTX:
277 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
278 1.1 riastrad mutex->wwm_state = WW_WANTOWN;
279 1.1 riastrad /* FALLTHROUGH */
280 1.1 riastrad case WW_WANTOWN:
281 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
282 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
283 1.1 riastrad ww_mutex_state_wait(mutex, WW_WANTOWN);
284 1.1 riastrad goto retry;
285 1.1 riastrad default:
286 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
287 1.1 riastrad mutex, (int)mutex->wwm_state);
288 1.1 riastrad }
289 1.1 riastrad KASSERT(mutex->wwm_state == WW_OWNED);
290 1.1 riastrad KASSERT(mutex->wwm_u.owner == curlwp);
291 1.1 riastrad mutex_exit(&mutex->wwm_lock);
292 1.1 riastrad }
293 1.1 riastrad
294 1.1 riastrad static int
295 1.1 riastrad ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
296 1.1 riastrad {
297 1.1 riastrad int ret;
298 1.1 riastrad
299 1.1 riastrad mutex_enter(&mutex->wwm_lock);
300 1.1 riastrad retry: switch (mutex->wwm_state) {
301 1.1 riastrad case WW_UNLOCKED:
302 1.1 riastrad mutex->wwm_state = WW_OWNED;
303 1.1 riastrad mutex->wwm_u.owner = curlwp;
304 1.1 riastrad break;
305 1.1 riastrad case WW_OWNED:
306 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
307 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
308 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
309 1.1 riastrad if (ret)
310 1.1 riastrad goto out;
311 1.1 riastrad goto retry;
312 1.1 riastrad case WW_CTX:
313 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
314 1.1 riastrad mutex->wwm_state = WW_WANTOWN;
315 1.1 riastrad /* FALLTHROUGH */
316 1.1 riastrad case WW_WANTOWN:
317 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
318 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
319 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
320 1.1 riastrad if (ret)
321 1.1 riastrad goto out;
322 1.1 riastrad goto retry;
323 1.1 riastrad default:
324 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
325 1.1 riastrad mutex, (int)mutex->wwm_state);
326 1.1 riastrad }
327 1.1 riastrad KASSERT(mutex->wwm_state == WW_OWNED);
328 1.1 riastrad KASSERT(mutex->wwm_u.owner == curlwp);
329 1.1 riastrad ret = 0;
330 1.1 riastrad out: mutex_exit(&mutex->wwm_lock);
331 1.1 riastrad return ret;
332 1.1 riastrad }
333 1.1 riastrad
334 1.1 riastrad int
335 1.1 riastrad ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
336 1.1 riastrad {
337 1.1 riastrad
338 1.1 riastrad ASSERT_SLEEPABLE();
339 1.1 riastrad
340 1.1 riastrad if (ctx == NULL) {
341 1.1 riastrad ww_mutex_lock_noctx(mutex);
342 1.1 riastrad return 0;
343 1.1 riastrad }
344 1.1 riastrad
345 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
346 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
347 1.1 riastrad KASSERTMSG(!ctx->wwx_acquire_done,
348 1.1 riastrad "ctx %p done acquiring locks, can't acquire more", ctx);
349 1.1 riastrad KASSERTMSG((ctx->wwx_acquired != ~0U),
350 1.1 riastrad "ctx %p finished, can't be used any more", ctx);
351 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
352 1.1 riastrad "ctx %p in class %p, mutex %p in class %p",
353 1.1 riastrad ctx, ctx->wwx_class, mutex, mutex->wwm_class);
354 1.1 riastrad
355 1.1 riastrad mutex_enter(&mutex->wwm_lock);
356 1.1 riastrad retry: switch (mutex->wwm_state) {
357 1.1 riastrad case WW_UNLOCKED:
358 1.1 riastrad mutex->wwm_state = WW_CTX;
359 1.1 riastrad mutex->wwm_u.ctx = ctx;
360 1.1 riastrad goto locked;
361 1.1 riastrad case WW_OWNED:
362 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
363 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
364 1.1 riastrad ww_mutex_state_wait(mutex, WW_OWNED);
365 1.1 riastrad goto retry;
366 1.1 riastrad case WW_CTX:
367 1.1 riastrad break;
368 1.1 riastrad case WW_WANTOWN:
369 1.1 riastrad ww_mutex_state_wait(mutex, WW_WANTOWN);
370 1.1 riastrad goto retry;
371 1.1 riastrad default:
372 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
373 1.1 riastrad mutex, (int)mutex->wwm_state);
374 1.1 riastrad }
375 1.1 riastrad KASSERT(mutex->wwm_state == WW_CTX);
376 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
377 1.1 riastrad KASSERT((mutex->wwm_u.ctx == ctx) ||
378 1.1 riastrad (mutex->wwm_u.ctx->wwx_owner != curlwp));
379 1.1 riastrad if (mutex->wwm_u.ctx == ctx) {
380 1.1 riastrad /*
381 1.1 riastrad * We already own it. Yes, this can happen correctly
382 1.1 riastrad * for objects whose locking order is determined by
383 1.1 riastrad * userland.
384 1.1 riastrad */
385 1.1 riastrad mutex_exit(&mutex->wwm_lock);
386 1.1 riastrad return -EALREADY;
387 1.1 riastrad } else if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
388 1.1 riastrad /*
389 1.1 riastrad * Owned by a higher-priority party. Tell the caller
390 1.1 riastrad * to unlock everything and start over.
391 1.1 riastrad */
392 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
393 1.1 riastrad "ww mutex class mismatch: %p != %p",
394 1.1 riastrad ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
395 1.1 riastrad mutex_exit(&mutex->wwm_lock);
396 1.1 riastrad return -EDEADLK;
397 1.1 riastrad } else {
398 1.1 riastrad /*
399 1.1 riastrad * Owned by a lower-priority party. Ask that party to
400 1.1 riastrad * wake us when it is done or it realizes it needs to
401 1.1 riastrad * back off.
402 1.1 riastrad */
403 1.1 riastrad ww_mutex_lock_wait(mutex, ctx);
404 1.1 riastrad }
405 1.1 riastrad locked: ctx->wwx_acquired++;
406 1.1 riastrad KASSERT((mutex->wwm_state == WW_CTX) ||
407 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
408 1.1 riastrad KASSERT(mutex->wwm_u.ctx == ctx);
409 1.1 riastrad mutex_exit(&mutex->wwm_lock);
410 1.1 riastrad return 0;
411 1.1 riastrad }
412 1.1 riastrad
413 1.1 riastrad int
414 1.1 riastrad ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
415 1.1 riastrad {
416 1.1 riastrad int ret;
417 1.1 riastrad
418 1.1 riastrad ASSERT_SLEEPABLE();
419 1.1 riastrad
420 1.1 riastrad if (ctx == NULL)
421 1.1 riastrad return ww_mutex_lock_noctx_sig(mutex);
422 1.1 riastrad
423 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
424 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
425 1.1 riastrad KASSERTMSG(!ctx->wwx_acquire_done,
426 1.1 riastrad "ctx %p done acquiring locks, can't acquire more", ctx);
427 1.1 riastrad KASSERTMSG((ctx->wwx_acquired != ~0U),
428 1.1 riastrad "ctx %p finished, can't be used any more", ctx);
429 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
430 1.1 riastrad "ctx %p in class %p, mutex %p in class %p",
431 1.1 riastrad ctx, ctx->wwx_class, mutex, mutex->wwm_class);
432 1.1 riastrad
433 1.1 riastrad mutex_enter(&mutex->wwm_lock);
434 1.1 riastrad retry: switch (mutex->wwm_state) {
435 1.1 riastrad case WW_UNLOCKED:
436 1.1 riastrad mutex->wwm_state = WW_CTX;
437 1.1 riastrad mutex->wwm_u.ctx = ctx;
438 1.1 riastrad goto locked;
439 1.1 riastrad case WW_OWNED:
440 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
441 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
442 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
443 1.1 riastrad if (ret)
444 1.1 riastrad goto out;
445 1.1 riastrad goto retry;
446 1.1 riastrad case WW_CTX:
447 1.1 riastrad break;
448 1.1 riastrad case WW_WANTOWN:
449 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
450 1.1 riastrad if (ret)
451 1.1 riastrad goto out;
452 1.1 riastrad goto retry;
453 1.1 riastrad default:
454 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
455 1.1 riastrad mutex, (int)mutex->wwm_state);
456 1.1 riastrad }
457 1.1 riastrad KASSERT(mutex->wwm_state == WW_CTX);
458 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
459 1.1 riastrad KASSERT((mutex->wwm_u.ctx == ctx) ||
460 1.1 riastrad (mutex->wwm_u.ctx->wwx_owner != curlwp));
461 1.1 riastrad if (mutex->wwm_u.ctx == ctx) {
462 1.1 riastrad /*
463 1.1 riastrad * We already own it. Yes, this can happen correctly
464 1.1 riastrad * for objects whose locking order is determined by
465 1.1 riastrad * userland.
466 1.1 riastrad */
467 1.1 riastrad mutex_exit(&mutex->wwm_lock);
468 1.1 riastrad return -EALREADY;
469 1.1 riastrad } else if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
470 1.1 riastrad /*
471 1.1 riastrad * Owned by a higher-priority party. Tell the caller
472 1.1 riastrad * to unlock everything and start over.
473 1.1 riastrad */
474 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
475 1.1 riastrad "ww mutex class mismatch: %p != %p",
476 1.1 riastrad ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
477 1.1 riastrad mutex_exit(&mutex->wwm_lock);
478 1.1 riastrad return -EDEADLK;
479 1.1 riastrad } else {
480 1.1 riastrad /*
481 1.1 riastrad * Owned by a lower-priority party. Ask that party to
482 1.1 riastrad * wake us when it is done or it realizes it needs to
483 1.1 riastrad * back off.
484 1.1 riastrad */
485 1.1 riastrad ret = ww_mutex_lock_wait_sig(mutex, ctx);
486 1.1 riastrad if (ret)
487 1.1 riastrad goto out;
488 1.1 riastrad }
489 1.1 riastrad locked: KASSERT((mutex->wwm_state == WW_CTX) ||
490 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
491 1.1 riastrad KASSERT(mutex->wwm_u.ctx == ctx);
492 1.1 riastrad ctx->wwx_acquired++;
493 1.1 riastrad ret = 0;
494 1.1 riastrad out: mutex_exit(&mutex->wwm_lock);
495 1.1 riastrad return ret;
496 1.1 riastrad }
497 1.1 riastrad
498 1.1 riastrad void
499 1.1 riastrad ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
500 1.1 riastrad {
501 1.1 riastrad
502 1.1 riastrad ASSERT_SLEEPABLE();
503 1.1 riastrad
504 1.1 riastrad if (ctx == NULL) {
505 1.1 riastrad ww_mutex_lock_noctx(mutex);
506 1.1 riastrad return;
507 1.1 riastrad }
508 1.1 riastrad
509 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
510 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
511 1.1 riastrad KASSERTMSG(!ctx->wwx_acquire_done,
512 1.1 riastrad "ctx %p done acquiring locks, can't acquire more", ctx);
513 1.1 riastrad KASSERTMSG((ctx->wwx_acquired != ~0U),
514 1.1 riastrad "ctx %p finished, can't be used any more", ctx);
515 1.1 riastrad KASSERTMSG((ctx->wwx_acquired == 0),
516 1.1 riastrad "ctx %p still holds %u locks, not allowed in slow path",
517 1.1 riastrad ctx, ctx->wwx_acquired);
518 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
519 1.1 riastrad "ctx %p in class %p, mutex %p in class %p",
520 1.1 riastrad ctx, ctx->wwx_class, mutex, mutex->wwm_class);
521 1.1 riastrad
522 1.1 riastrad mutex_enter(&mutex->wwm_lock);
523 1.1 riastrad retry: switch (mutex->wwm_state) {
524 1.1 riastrad case WW_UNLOCKED:
525 1.1 riastrad mutex->wwm_state = WW_CTX;
526 1.1 riastrad mutex->wwm_u.ctx = ctx;
527 1.1 riastrad goto locked;
528 1.1 riastrad case WW_OWNED:
529 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
530 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
531 1.1 riastrad ww_mutex_state_wait(mutex, WW_OWNED);
532 1.1 riastrad goto retry;
533 1.1 riastrad case WW_CTX:
534 1.1 riastrad break;
535 1.1 riastrad case WW_WANTOWN:
536 1.1 riastrad ww_mutex_state_wait(mutex, WW_WANTOWN);
537 1.1 riastrad goto retry;
538 1.1 riastrad default:
539 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
540 1.1 riastrad mutex, (int)mutex->wwm_state);
541 1.1 riastrad }
542 1.1 riastrad KASSERT(mutex->wwm_state == WW_CTX);
543 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
544 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
545 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
546 1.1 riastrad /*
547 1.1 riastrad * Owned by another party, of any priority. Ask that party to
548 1.1 riastrad * wake us when it's done.
549 1.1 riastrad */
550 1.1 riastrad ww_mutex_lock_wait(mutex, ctx);
551 1.1 riastrad locked: KASSERT((mutex->wwm_state == WW_CTX) ||
552 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
553 1.1 riastrad KASSERT(mutex->wwm_u.ctx == ctx);
554 1.1 riastrad ctx->wwx_acquired++;
555 1.1 riastrad mutex_exit(&mutex->wwm_lock);
556 1.1 riastrad }
557 1.1 riastrad
558 1.1 riastrad int
559 1.1 riastrad ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
560 1.1 riastrad struct ww_acquire_ctx *ctx)
561 1.1 riastrad {
562 1.1 riastrad int ret;
563 1.1 riastrad
564 1.1 riastrad ASSERT_SLEEPABLE();
565 1.1 riastrad
566 1.1 riastrad if (ctx == NULL)
567 1.1 riastrad return ww_mutex_lock_noctx_sig(mutex);
568 1.1 riastrad
569 1.1 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
570 1.1 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
571 1.1 riastrad KASSERTMSG(!ctx->wwx_acquire_done,
572 1.1 riastrad "ctx %p done acquiring locks, can't acquire more", ctx);
573 1.1 riastrad KASSERTMSG((ctx->wwx_acquired != ~0U),
574 1.1 riastrad "ctx %p finished, can't be used any more", ctx);
575 1.1 riastrad KASSERTMSG((ctx->wwx_acquired == 0),
576 1.1 riastrad "ctx %p still holds %u locks, not allowed in slow path",
577 1.1 riastrad ctx, ctx->wwx_acquired);
578 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
579 1.1 riastrad "ctx %p in class %p, mutex %p in class %p",
580 1.1 riastrad ctx, ctx->wwx_class, mutex, mutex->wwm_class);
581 1.1 riastrad
582 1.1 riastrad mutex_enter(&mutex->wwm_lock);
583 1.1 riastrad retry: switch (mutex->wwm_state) {
584 1.1 riastrad case WW_UNLOCKED:
585 1.1 riastrad mutex->wwm_state = WW_CTX;
586 1.1 riastrad mutex->wwm_u.ctx = ctx;
587 1.1 riastrad goto locked;
588 1.1 riastrad case WW_OWNED:
589 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
590 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
591 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
592 1.1 riastrad if (ret)
593 1.1 riastrad goto out;
594 1.1 riastrad goto retry;
595 1.1 riastrad case WW_CTX:
596 1.1 riastrad break;
597 1.1 riastrad case WW_WANTOWN:
598 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
599 1.1 riastrad if (ret)
600 1.1 riastrad goto out;
601 1.1 riastrad goto retry;
602 1.1 riastrad default:
603 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
604 1.1 riastrad mutex, (int)mutex->wwm_state);
605 1.1 riastrad }
606 1.1 riastrad KASSERT(mutex->wwm_state == WW_CTX);
607 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
608 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
609 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
610 1.1 riastrad /*
611 1.1 riastrad * Owned by another party, of any priority. Ask that party to
612 1.1 riastrad * wake us when it's done.
613 1.1 riastrad */
614 1.1 riastrad ret = ww_mutex_lock_wait_sig(mutex, ctx);
615 1.1 riastrad if (ret)
616 1.1 riastrad goto out;
617 1.1 riastrad locked: KASSERT((mutex->wwm_state == WW_CTX) ||
618 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
619 1.1 riastrad KASSERT(mutex->wwm_u.ctx == ctx);
620 1.1 riastrad ctx->wwx_acquired++;
621 1.1 riastrad ret = 0;
622 1.1 riastrad out: mutex_exit(&mutex->wwm_lock);
623 1.1 riastrad return ret;
624 1.1 riastrad }
625 1.1 riastrad
626 1.1 riastrad int
627 1.1 riastrad ww_mutex_trylock(struct ww_mutex *mutex)
628 1.1 riastrad {
629 1.1 riastrad int ret;
630 1.1 riastrad
631 1.1 riastrad mutex_enter(&mutex->wwm_lock);
632 1.1 riastrad if (mutex->wwm_state == WW_UNLOCKED) {
633 1.1 riastrad mutex->wwm_state = WW_OWNED;
634 1.1 riastrad mutex->wwm_u.owner = curlwp;
635 1.1 riastrad ret = 1;
636 1.1 riastrad } else {
637 1.1 riastrad KASSERTMSG(((mutex->wwm_state != WW_OWNED) ||
638 1.1 riastrad (mutex->wwm_u.owner != curlwp)),
639 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
640 1.1 riastrad KASSERTMSG(((mutex->wwm_state != WW_CTX) ||
641 1.1 riastrad (mutex->wwm_u.ctx->wwx_owner != curlwp)),
642 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
643 1.1 riastrad KASSERTMSG(((mutex->wwm_state != WW_WANTOWN) ||
644 1.1 riastrad (mutex->wwm_u.ctx->wwx_owner != curlwp)),
645 1.1 riastrad "locking %p against myself: %p", mutex, curlwp);
646 1.1 riastrad ret = 0;
647 1.1 riastrad }
648 1.1 riastrad mutex_exit(&mutex->wwm_lock);
649 1.1 riastrad
650 1.1 riastrad return ret;
651 1.1 riastrad }
652 1.1 riastrad
653 1.1 riastrad static void
654 1.1 riastrad ww_mutex_unlock_release(struct ww_mutex *mutex)
655 1.1 riastrad {
656 1.1 riastrad
657 1.1 riastrad KASSERT(mutex_owned(&mutex->wwm_lock));
658 1.1 riastrad KASSERT((mutex->wwm_state == WW_CTX) ||
659 1.1 riastrad (mutex->wwm_state == WW_WANTOWN));
660 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
661 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner == curlwp),
662 1.1 riastrad "ww_mutex %p ctx %p held by %p, not by self (%p)",
663 1.1 riastrad mutex, mutex->wwm_u.ctx, mutex->wwm_u.ctx->wwx_owner,
664 1.1 riastrad curlwp);
665 1.1 riastrad KASSERT(mutex->wwm_u.ctx->wwx_acquired != ~0U);
666 1.1 riastrad mutex->wwm_u.ctx->wwx_acquired--;
667 1.1 riastrad mutex->wwm_u.ctx = NULL;
668 1.1 riastrad }
669 1.1 riastrad
670 1.1 riastrad void
671 1.1 riastrad ww_mutex_unlock(struct ww_mutex *mutex)
672 1.1 riastrad {
673 1.1 riastrad struct ww_acquire_ctx *ctx;
674 1.1 riastrad
675 1.1 riastrad mutex_enter(&mutex->wwm_lock);
676 1.1 riastrad KASSERT(mutex->wwm_state != WW_UNLOCKED);
677 1.1 riastrad switch (mutex->wwm_state) {
678 1.1 riastrad case WW_UNLOCKED:
679 1.1 riastrad panic("unlocking unlocked wait/wound mutex: %p", mutex);
680 1.1 riastrad case WW_OWNED:
681 1.1 riastrad /* Let the context lockers fight over it. */
682 1.1 riastrad mutex->wwm_u.owner = NULL;
683 1.1 riastrad mutex->wwm_state = WW_UNLOCKED;
684 1.1 riastrad break;
685 1.1 riastrad case WW_CTX:
686 1.1 riastrad ww_mutex_unlock_release(mutex);
687 1.1 riastrad /*
688 1.1 riastrad * If there are any waiters with contexts, grant the
689 1.1 riastrad * lock to the highest-priority one. Otherwise, just
690 1.1 riastrad * unlock it.
691 1.1 riastrad */
692 1.1 riastrad if ((ctx = RB_TREE_MIN(&mutex->wwm_waiters)) != NULL) {
693 1.1 riastrad mutex->wwm_state = WW_CTX;
694 1.1 riastrad mutex->wwm_u.ctx = ctx;
695 1.1 riastrad } else {
696 1.1 riastrad mutex->wwm_state = WW_UNLOCKED;
697 1.1 riastrad }
698 1.1 riastrad break;
699 1.1 riastrad case WW_WANTOWN:
700 1.1 riastrad ww_mutex_unlock_release(mutex);
701 1.1 riastrad /* Let the non-context lockers fight over it. */
702 1.1 riastrad mutex->wwm_state = WW_UNLOCKED;
703 1.1 riastrad break;
704 1.1 riastrad }
705 1.1 riastrad cv_broadcast(&mutex->wwm_cv);
706 1.1 riastrad mutex_exit(&mutex->wwm_lock);
707 1.1 riastrad }
708