ww_mutex.h revision 1.7 1 1.7 riastrad /* $NetBSD: ww_mutex.h,v 1.7 2014/09/15 20:24:55 riastradh Exp $ */
2 1.1 riastrad
3 1.1 riastrad /*-
4 1.1 riastrad * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 1.1 riastrad * All rights reserved.
6 1.1 riastrad *
7 1.1 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.1 riastrad * by Taylor R. Campbell.
9 1.1 riastrad *
10 1.1 riastrad * Redistribution and use in source and binary forms, with or without
11 1.1 riastrad * modification, are permitted provided that the following conditions
12 1.1 riastrad * are met:
13 1.1 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.1 riastrad * notice, this list of conditions and the following disclaimer.
15 1.1 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.1 riastrad * documentation and/or other materials provided with the distribution.
18 1.1 riastrad *
19 1.1 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.1 riastrad */
31 1.1 riastrad
32 1.1 riastrad #ifndef _ASM_WW_MUTEX_H_
33 1.1 riastrad #define _ASM_WW_MUTEX_H_
34 1.1 riastrad
35 1.1 riastrad #include <sys/rbtree.h>
36 1.1 riastrad
37 1.1 riastrad #include <linux/mutex.h>
38 1.1 riastrad
39 1.1 riastrad struct ww_class {
40 1.1 riastrad volatile uint64_t wwc_ticket;
41 1.1 riastrad };
42 1.1 riastrad
43 1.1 riastrad #define DEFINE_WW_CLASS(CLASS) \
44 1.1 riastrad struct ww_class CLASS = { \
45 1.1 riastrad .wwc_ticket = 0, \
46 1.1 riastrad }
47 1.1 riastrad
48 1.1 riastrad struct ww_acquire_ctx {
49 1.1 riastrad struct ww_class *wwx_class __diagused;
50 1.7 riastrad struct lwp *wwx_owner __diagused;
51 1.1 riastrad uint64_t wwx_ticket;
52 1.1 riastrad unsigned wwx_acquired;
53 1.1 riastrad bool wwx_acquire_done;
54 1.1 riastrad struct rb_node wwx_rb_node;
55 1.1 riastrad };
56 1.1 riastrad
57 1.1 riastrad static inline int
58 1.1 riastrad ww_acquire_ctx_compare(void *cookie __unused, const void *va, const void *vb)
59 1.1 riastrad {
60 1.1 riastrad const struct ww_acquire_ctx *const ctx_a = va;
61 1.1 riastrad const struct ww_acquire_ctx *const ctx_b = vb;
62 1.1 riastrad
63 1.1 riastrad if (ctx_a->wwx_ticket < ctx_b->wwx_ticket)
64 1.1 riastrad return -1;
65 1.1 riastrad if (ctx_a->wwx_ticket > ctx_b->wwx_ticket)
66 1.1 riastrad return -1;
67 1.1 riastrad return 0;
68 1.1 riastrad }
69 1.1 riastrad
70 1.1 riastrad static inline int
71 1.1 riastrad ww_acquire_ctx_compare_key(void *cookie __unused, const void *vn,
72 1.1 riastrad const void *vk)
73 1.1 riastrad {
74 1.1 riastrad const struct ww_acquire_ctx *const ctx = vn;
75 1.1 riastrad const uint64_t *const ticketp = vk, ticket = *ticketp;
76 1.1 riastrad
77 1.1 riastrad if (ctx->wwx_ticket < ticket)
78 1.1 riastrad return -1;
79 1.1 riastrad if (ctx->wwx_ticket > ticket)
80 1.1 riastrad return -1;
81 1.1 riastrad return 0;
82 1.1 riastrad }
83 1.1 riastrad
84 1.1 riastrad static const rb_tree_ops_t ww_acquire_ctx_rb_ops = {
85 1.1 riastrad .rbto_compare_nodes = &ww_acquire_ctx_compare,
86 1.1 riastrad .rbto_compare_key = &ww_acquire_ctx_compare_key,
87 1.1 riastrad .rbto_node_offset = offsetof(struct ww_acquire_ctx, wwx_rb_node),
88 1.1 riastrad .rbto_context = NULL,
89 1.1 riastrad };
90 1.1 riastrad
91 1.1 riastrad static inline void
92 1.1 riastrad ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *class)
93 1.1 riastrad {
94 1.1 riastrad
95 1.1 riastrad ctx->wwx_class = class;
96 1.7 riastrad ctx->wwx_owner = curlwp;
97 1.1 riastrad ctx->wwx_ticket = atomic_inc_64_nv(&class->wwc_ticket);
98 1.1 riastrad ctx->wwx_acquired = 0;
99 1.1 riastrad ctx->wwx_acquire_done = false;
100 1.1 riastrad }
101 1.1 riastrad
102 1.1 riastrad static inline void
103 1.1 riastrad ww_acquire_done(struct ww_acquire_ctx *ctx)
104 1.1 riastrad {
105 1.1 riastrad
106 1.7 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
107 1.7 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
108 1.7 riastrad
109 1.1 riastrad ctx->wwx_acquire_done = true;
110 1.1 riastrad }
111 1.1 riastrad
112 1.1 riastrad static inline void
113 1.1 riastrad ww_acquire_fini(struct ww_acquire_ctx *ctx)
114 1.1 riastrad {
115 1.1 riastrad
116 1.7 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
117 1.7 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
118 1.5 riastrad KASSERTMSG((ctx->wwx_acquired == 0), "ctx %p still holds %u locks",
119 1.5 riastrad ctx, ctx->wwx_acquired);
120 1.7 riastrad
121 1.1 riastrad ctx->wwx_acquired = ~0U; /* Fail if called again. */
122 1.7 riastrad ctx->wwx_owner = NULL;
123 1.1 riastrad }
124 1.1 riastrad
125 1.1 riastrad struct ww_mutex {
126 1.1 riastrad kmutex_t wwm_lock;
127 1.1 riastrad enum ww_mutex_state {
128 1.7 riastrad WW_UNLOCKED, /* nobody owns it */
129 1.7 riastrad WW_OWNED, /* owned by a lwp without a context */
130 1.7 riastrad WW_CTX, /* owned by a context */
131 1.7 riastrad WW_WANTOWN, /* owned by ctx, waiters w/o ctx waiting */
132 1.1 riastrad } wwm_state;
133 1.1 riastrad union {
134 1.1 riastrad struct lwp *owner;
135 1.1 riastrad struct ww_acquire_ctx *ctx;
136 1.1 riastrad } wwm_u;
137 1.1 riastrad struct ww_class *wwm_class;
138 1.1 riastrad struct rb_tree wwm_waiters;
139 1.1 riastrad kcondvar_t wwm_cv;
140 1.1 riastrad };
141 1.1 riastrad
142 1.1 riastrad static inline void
143 1.1 riastrad ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
144 1.1 riastrad {
145 1.1 riastrad
146 1.2 riastrad /*
147 1.2 riastrad * XXX Apparently Linux takes these with spin locks held. That
148 1.2 riastrad * strikes me as a bad idea, but so it is...
149 1.2 riastrad */
150 1.2 riastrad mutex_init(&mutex->wwm_lock, MUTEX_DEFAULT, IPL_VM);
151 1.1 riastrad mutex->wwm_state = WW_UNLOCKED;
152 1.1 riastrad mutex->wwm_class = class;
153 1.1 riastrad rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
154 1.1 riastrad cv_init(&mutex->wwm_cv, "linuxwwm");
155 1.1 riastrad }
156 1.1 riastrad
157 1.1 riastrad static inline void
158 1.1 riastrad ww_mutex_destroy(struct ww_mutex *mutex)
159 1.1 riastrad {
160 1.1 riastrad
161 1.1 riastrad cv_destroy(&mutex->wwm_cv);
162 1.1 riastrad #if 0
163 1.1 riastrad rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
164 1.1 riastrad #endif
165 1.1 riastrad KASSERT(mutex->wwm_state == WW_UNLOCKED);
166 1.1 riastrad mutex_destroy(&mutex->wwm_lock);
167 1.1 riastrad }
168 1.1 riastrad
169 1.1 riastrad /*
170 1.1 riastrad * XXX WARNING: This returns true if it is locked by ANYONE. Does not
171 1.1 riastrad * mean `Do I hold this lock?' (answering which really requires an
172 1.1 riastrad * acquire context).
173 1.1 riastrad */
174 1.1 riastrad static inline bool
175 1.1 riastrad ww_mutex_is_locked(struct ww_mutex *mutex)
176 1.1 riastrad {
177 1.1 riastrad int locked;
178 1.1 riastrad
179 1.1 riastrad mutex_enter(&mutex->wwm_lock);
180 1.1 riastrad switch (mutex->wwm_state) {
181 1.1 riastrad case WW_UNLOCKED:
182 1.1 riastrad locked = false;
183 1.1 riastrad break;
184 1.1 riastrad case WW_OWNED:
185 1.1 riastrad case WW_CTX:
186 1.1 riastrad case WW_WANTOWN:
187 1.1 riastrad locked = true;
188 1.1 riastrad break;
189 1.1 riastrad default:
190 1.1 riastrad panic("wait/wound mutex %p in bad state: %d", mutex,
191 1.1 riastrad (int)mutex->wwm_state);
192 1.1 riastrad }
193 1.1 riastrad mutex_exit(&mutex->wwm_lock);
194 1.1 riastrad
195 1.1 riastrad return locked;
196 1.1 riastrad }
197 1.1 riastrad
198 1.1 riastrad static inline void
199 1.1 riastrad ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
200 1.1 riastrad {
201 1.1 riastrad
202 1.1 riastrad KASSERT(mutex->wwm_state == state);
203 1.1 riastrad do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
204 1.1 riastrad while (mutex->wwm_state == state);
205 1.1 riastrad }
206 1.1 riastrad
207 1.1 riastrad static inline int
208 1.1 riastrad ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
209 1.1 riastrad {
210 1.1 riastrad int ret;
211 1.1 riastrad
212 1.1 riastrad KASSERT(mutex->wwm_state == state);
213 1.1 riastrad do {
214 1.1 riastrad /* XXX errno NetBSD->Linux */
215 1.1 riastrad ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
216 1.1 riastrad if (ret)
217 1.1 riastrad break;
218 1.1 riastrad } while (mutex->wwm_state == state);
219 1.1 riastrad
220 1.1 riastrad return ret;
221 1.1 riastrad }
222 1.1 riastrad
223 1.1 riastrad static inline void
224 1.1 riastrad ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
225 1.1 riastrad {
226 1.1 riastrad struct ww_acquire_ctx *collision __diagused;
227 1.1 riastrad
228 1.1 riastrad KASSERT(mutex_owned(&mutex->wwm_lock));
229 1.1 riastrad
230 1.7 riastrad KASSERT((mutex->wwm_state == WW_CTX) ||
231 1.7 riastrad (mutex->wwm_state == WW_WANTOWN));
232 1.7 riastrad KASSERT(mutex->wwm_u.ctx != ctx);
233 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
234 1.1 riastrad "ww mutex class mismatch: %p != %p",
235 1.1 riastrad ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
236 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
237 1.1 riastrad "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
238 1.4 riastrad ctx->wwx_ticket, ctx,
239 1.4 riastrad mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
240 1.1 riastrad
241 1.1 riastrad collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
242 1.1 riastrad KASSERTMSG((collision == ctx),
243 1.1 riastrad "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
244 1.1 riastrad ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
245 1.1 riastrad
246 1.1 riastrad do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
247 1.7 riastrad while (!(((mutex->wwm_state == WW_CTX) ||
248 1.7 riastrad (mutex->wwm_state == WW_WANTOWN)) &&
249 1.7 riastrad (mutex->wwm_u.ctx == ctx)));
250 1.1 riastrad
251 1.1 riastrad rb_tree_remove_node(&mutex->wwm_waiters, ctx);
252 1.1 riastrad }
253 1.1 riastrad
254 1.1 riastrad static inline int
255 1.1 riastrad ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
256 1.1 riastrad {
257 1.1 riastrad struct ww_acquire_ctx *collision __diagused;
258 1.1 riastrad int ret;
259 1.1 riastrad
260 1.1 riastrad KASSERT(mutex_owned(&mutex->wwm_lock));
261 1.1 riastrad
262 1.7 riastrad KASSERT((mutex->wwm_state == WW_CTX) ||
263 1.7 riastrad (mutex->wwm_state == WW_WANTOWN));
264 1.7 riastrad KASSERT(mutex->wwm_u.ctx != ctx);
265 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
266 1.1 riastrad "ww mutex class mismatch: %p != %p",
267 1.1 riastrad ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
268 1.1 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
269 1.1 riastrad "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
270 1.4 riastrad ctx->wwx_ticket, ctx,
271 1.4 riastrad mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
272 1.1 riastrad
273 1.1 riastrad collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
274 1.1 riastrad KASSERTMSG((collision == ctx),
275 1.1 riastrad "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
276 1.1 riastrad ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
277 1.1 riastrad
278 1.1 riastrad do {
279 1.1 riastrad /* XXX errno NetBSD->Linux */
280 1.1 riastrad ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
281 1.1 riastrad if (ret)
282 1.1 riastrad goto out;
283 1.7 riastrad } while (!(((mutex->wwm_state == WW_CTX) ||
284 1.7 riastrad (mutex->wwm_state == WW_WANTOWN)) &&
285 1.7 riastrad (mutex->wwm_u.ctx == ctx)));
286 1.1 riastrad
287 1.1 riastrad out: rb_tree_remove_node(&mutex->wwm_waiters, ctx);
288 1.1 riastrad return ret;
289 1.1 riastrad }
290 1.1 riastrad
291 1.1 riastrad static inline void
292 1.1 riastrad ww_mutex_lock_noctx(struct ww_mutex *mutex)
293 1.1 riastrad {
294 1.1 riastrad
295 1.1 riastrad mutex_enter(&mutex->wwm_lock);
296 1.1 riastrad retry: switch (mutex->wwm_state) {
297 1.1 riastrad case WW_UNLOCKED:
298 1.1 riastrad mutex->wwm_state = WW_OWNED;
299 1.1 riastrad mutex->wwm_u.owner = curlwp;
300 1.1 riastrad break;
301 1.1 riastrad case WW_OWNED:
302 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
303 1.7 riastrad "locking %p against myself: %p", mutex, curlwp);
304 1.1 riastrad ww_mutex_state_wait(mutex, WW_OWNED);
305 1.1 riastrad goto retry;
306 1.1 riastrad case WW_CTX:
307 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
308 1.1 riastrad mutex->wwm_state = WW_WANTOWN;
309 1.7 riastrad /* FALLTHROUGH */
310 1.1 riastrad case WW_WANTOWN:
311 1.7 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
312 1.7 riastrad "locking %p against myself: %p", mutex, curlwp);
313 1.1 riastrad ww_mutex_state_wait(mutex, WW_WANTOWN);
314 1.1 riastrad goto retry;
315 1.1 riastrad default:
316 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
317 1.1 riastrad mutex, (int)mutex->wwm_state);
318 1.1 riastrad }
319 1.1 riastrad KASSERT(mutex->wwm_state == WW_OWNED);
320 1.1 riastrad KASSERT(mutex->wwm_u.owner == curlwp);
321 1.1 riastrad mutex_exit(&mutex->wwm_lock);
322 1.1 riastrad }
323 1.1 riastrad
324 1.1 riastrad static inline int
325 1.1 riastrad ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
326 1.1 riastrad {
327 1.1 riastrad int ret;
328 1.1 riastrad
329 1.1 riastrad mutex_enter(&mutex->wwm_lock);
330 1.1 riastrad retry: switch (mutex->wwm_state) {
331 1.1 riastrad case WW_UNLOCKED:
332 1.1 riastrad mutex->wwm_state = WW_OWNED;
333 1.1 riastrad mutex->wwm_u.owner = curlwp;
334 1.1 riastrad break;
335 1.1 riastrad case WW_OWNED:
336 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
337 1.7 riastrad "locking %p against myself: %p", mutex, curlwp);
338 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
339 1.1 riastrad if (ret)
340 1.1 riastrad goto out;
341 1.1 riastrad goto retry;
342 1.1 riastrad case WW_CTX:
343 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
344 1.1 riastrad mutex->wwm_state = WW_WANTOWN;
345 1.7 riastrad /* FALLTHROUGH */
346 1.1 riastrad case WW_WANTOWN:
347 1.7 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
348 1.7 riastrad "locking %p against myself: %p", mutex, curlwp);
349 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
350 1.1 riastrad if (ret)
351 1.1 riastrad goto out;
352 1.1 riastrad goto retry;
353 1.1 riastrad default:
354 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
355 1.1 riastrad mutex, (int)mutex->wwm_state);
356 1.1 riastrad }
357 1.1 riastrad KASSERT(mutex->wwm_state == WW_OWNED);
358 1.1 riastrad KASSERT(mutex->wwm_u.owner == curlwp);
359 1.1 riastrad ret = 0;
360 1.1 riastrad out: mutex_exit(&mutex->wwm_lock);
361 1.1 riastrad return ret;
362 1.1 riastrad }
363 1.1 riastrad
364 1.1 riastrad static inline int
365 1.1 riastrad ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
366 1.1 riastrad {
367 1.1 riastrad
368 1.1 riastrad ASSERT_SLEEPABLE();
369 1.1 riastrad
370 1.1 riastrad if (ctx == NULL) {
371 1.1 riastrad ww_mutex_lock_noctx(mutex);
372 1.1 riastrad return 0;
373 1.1 riastrad }
374 1.1 riastrad
375 1.7 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
376 1.7 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
377 1.7 riastrad KASSERTMSG(!ctx->wwx_acquire_done,
378 1.7 riastrad "ctx %p done acquiring locks, can't acquire more", ctx);
379 1.7 riastrad KASSERTMSG((ctx->wwx_acquired != ~0U),
380 1.7 riastrad "ctx %p finished, can't be used any more", ctx);
381 1.7 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
382 1.7 riastrad "ctx %p in class %p, mutex %p in class %p",
383 1.7 riastrad ctx, ctx->wwx_class, mutex, mutex->wwm_class);
384 1.1 riastrad
385 1.1 riastrad mutex_enter(&mutex->wwm_lock);
386 1.1 riastrad retry: switch (mutex->wwm_state) {
387 1.1 riastrad case WW_UNLOCKED:
388 1.1 riastrad mutex->wwm_state = WW_CTX;
389 1.1 riastrad mutex->wwm_u.ctx = ctx;
390 1.1 riastrad goto locked;
391 1.1 riastrad case WW_OWNED:
392 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
393 1.7 riastrad "locking %p against myself: %p", mutex, curlwp);
394 1.1 riastrad ww_mutex_state_wait(mutex, WW_OWNED);
395 1.1 riastrad goto retry;
396 1.1 riastrad case WW_CTX:
397 1.1 riastrad break;
398 1.1 riastrad case WW_WANTOWN:
399 1.1 riastrad ww_mutex_state_wait(mutex, WW_WANTOWN);
400 1.1 riastrad goto retry;
401 1.1 riastrad default:
402 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
403 1.1 riastrad mutex, (int)mutex->wwm_state);
404 1.1 riastrad }
405 1.1 riastrad KASSERT(mutex->wwm_state == WW_CTX);
406 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
407 1.7 riastrad KASSERT((mutex->wwm_u.ctx == ctx) ||
408 1.7 riastrad (mutex->wwm_u.ctx->wwx_owner != curlwp));
409 1.1 riastrad if (mutex->wwm_u.ctx == ctx) {
410 1.1 riastrad /*
411 1.1 riastrad * We already own it. Yes, this can happen correctly
412 1.1 riastrad * for objects whose locking order is determined by
413 1.1 riastrad * userland.
414 1.1 riastrad */
415 1.1 riastrad mutex_exit(&mutex->wwm_lock);
416 1.1 riastrad return -EALREADY;
417 1.1 riastrad } else if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
418 1.1 riastrad /*
419 1.1 riastrad * Owned by a higher-priority party. Tell the caller
420 1.1 riastrad * to unlock everything and start over.
421 1.1 riastrad */
422 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
423 1.1 riastrad "ww mutex class mismatch: %p != %p",
424 1.1 riastrad ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
425 1.1 riastrad mutex_exit(&mutex->wwm_lock);
426 1.1 riastrad return -EDEADLK;
427 1.1 riastrad } else {
428 1.1 riastrad /*
429 1.1 riastrad * Owned by a lower-priority party. Ask that party to
430 1.1 riastrad * wake us when it is done or it realizes it needs to
431 1.1 riastrad * back off.
432 1.1 riastrad */
433 1.1 riastrad ww_mutex_lock_wait(mutex, ctx);
434 1.1 riastrad }
435 1.1 riastrad locked: ctx->wwx_acquired++;
436 1.7 riastrad KASSERT((mutex->wwm_state == WW_CTX) ||
437 1.7 riastrad (mutex->wwm_state == WW_WANTOWN));
438 1.1 riastrad KASSERT(mutex->wwm_u.ctx == ctx);
439 1.1 riastrad mutex_exit(&mutex->wwm_lock);
440 1.1 riastrad return 0;
441 1.1 riastrad }
442 1.1 riastrad
443 1.1 riastrad static inline int
444 1.1 riastrad ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
445 1.1 riastrad {
446 1.1 riastrad int ret;
447 1.1 riastrad
448 1.1 riastrad ASSERT_SLEEPABLE();
449 1.1 riastrad
450 1.1 riastrad if (ctx == NULL)
451 1.1 riastrad return ww_mutex_lock_noctx_sig(mutex);
452 1.1 riastrad
453 1.7 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
454 1.7 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
455 1.7 riastrad KASSERTMSG(!ctx->wwx_acquire_done,
456 1.7 riastrad "ctx %p done acquiring locks, can't acquire more", ctx);
457 1.7 riastrad KASSERTMSG((ctx->wwx_acquired != ~0U),
458 1.7 riastrad "ctx %p finished, can't be used any more", ctx);
459 1.7 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
460 1.7 riastrad "ctx %p in class %p, mutex %p in class %p",
461 1.7 riastrad ctx, ctx->wwx_class, mutex, mutex->wwm_class);
462 1.1 riastrad
463 1.1 riastrad mutex_enter(&mutex->wwm_lock);
464 1.1 riastrad retry: switch (mutex->wwm_state) {
465 1.1 riastrad case WW_UNLOCKED:
466 1.1 riastrad mutex->wwm_state = WW_CTX;
467 1.1 riastrad mutex->wwm_u.ctx = ctx;
468 1.1 riastrad goto locked;
469 1.1 riastrad case WW_OWNED:
470 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
471 1.7 riastrad "locking %p against myself: %p", mutex, curlwp);
472 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
473 1.1 riastrad if (ret)
474 1.1 riastrad goto out;
475 1.1 riastrad goto retry;
476 1.1 riastrad case WW_CTX:
477 1.1 riastrad break;
478 1.1 riastrad case WW_WANTOWN:
479 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
480 1.1 riastrad if (ret)
481 1.1 riastrad goto out;
482 1.1 riastrad goto retry;
483 1.1 riastrad default:
484 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
485 1.1 riastrad mutex, (int)mutex->wwm_state);
486 1.1 riastrad }
487 1.1 riastrad KASSERT(mutex->wwm_state == WW_CTX);
488 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
489 1.7 riastrad KASSERT((mutex->wwm_u.ctx == ctx) ||
490 1.7 riastrad (mutex->wwm_u.ctx->wwx_owner != curlwp));
491 1.1 riastrad if (mutex->wwm_u.ctx == ctx) {
492 1.1 riastrad /*
493 1.1 riastrad * We already own it. Yes, this can happen correctly
494 1.1 riastrad * for objects whose locking order is determined by
495 1.1 riastrad * userland.
496 1.1 riastrad */
497 1.1 riastrad mutex_exit(&mutex->wwm_lock);
498 1.1 riastrad return -EALREADY;
499 1.1 riastrad } else if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
500 1.1 riastrad /*
501 1.1 riastrad * Owned by a higher-priority party. Tell the caller
502 1.1 riastrad * to unlock everything and start over.
503 1.1 riastrad */
504 1.1 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
505 1.1 riastrad "ww mutex class mismatch: %p != %p",
506 1.1 riastrad ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
507 1.1 riastrad mutex_exit(&mutex->wwm_lock);
508 1.1 riastrad return -EDEADLK;
509 1.1 riastrad } else {
510 1.1 riastrad /*
511 1.1 riastrad * Owned by a lower-priority party. Ask that party to
512 1.1 riastrad * wake us when it is done or it realizes it needs to
513 1.1 riastrad * back off.
514 1.1 riastrad */
515 1.1 riastrad ret = ww_mutex_lock_wait_sig(mutex, ctx);
516 1.1 riastrad if (ret)
517 1.1 riastrad goto out;
518 1.1 riastrad }
519 1.7 riastrad locked: KASSERT((mutex->wwm_state == WW_CTX) ||
520 1.7 riastrad (mutex->wwm_state == WW_WANTOWN));
521 1.1 riastrad KASSERT(mutex->wwm_u.ctx == ctx);
522 1.1 riastrad ctx->wwx_acquired++;
523 1.1 riastrad ret = 0;
524 1.1 riastrad out: mutex_exit(&mutex->wwm_lock);
525 1.1 riastrad return ret;
526 1.1 riastrad }
527 1.1 riastrad
528 1.1 riastrad static inline void
529 1.1 riastrad ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
530 1.1 riastrad {
531 1.1 riastrad
532 1.1 riastrad ASSERT_SLEEPABLE();
533 1.1 riastrad
534 1.1 riastrad if (ctx == NULL) {
535 1.1 riastrad ww_mutex_lock_noctx(mutex);
536 1.1 riastrad return;
537 1.1 riastrad }
538 1.1 riastrad
539 1.7 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
540 1.7 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
541 1.7 riastrad KASSERTMSG(!ctx->wwx_acquire_done,
542 1.7 riastrad "ctx %p done acquiring locks, can't acquire more", ctx);
543 1.7 riastrad KASSERTMSG((ctx->wwx_acquired != ~0U),
544 1.7 riastrad "ctx %p finished, can't be used any more", ctx);
545 1.7 riastrad KASSERTMSG((ctx->wwx_acquired == 0),
546 1.7 riastrad "ctx %p still holds %u locks, not allowed in slow path",
547 1.7 riastrad ctx, ctx->wwx_acquired);
548 1.7 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
549 1.7 riastrad "ctx %p in class %p, mutex %p in class %p",
550 1.7 riastrad ctx, ctx->wwx_class, mutex, mutex->wwm_class);
551 1.1 riastrad
552 1.1 riastrad mutex_enter(&mutex->wwm_lock);
553 1.1 riastrad retry: switch (mutex->wwm_state) {
554 1.1 riastrad case WW_UNLOCKED:
555 1.1 riastrad mutex->wwm_state = WW_CTX;
556 1.1 riastrad mutex->wwm_u.ctx = ctx;
557 1.1 riastrad goto locked;
558 1.1 riastrad case WW_OWNED:
559 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
560 1.7 riastrad "locking %p against myself: %p", mutex, curlwp);
561 1.1 riastrad ww_mutex_state_wait(mutex, WW_OWNED);
562 1.1 riastrad goto retry;
563 1.1 riastrad case WW_CTX:
564 1.1 riastrad break;
565 1.1 riastrad case WW_WANTOWN:
566 1.1 riastrad ww_mutex_state_wait(mutex, WW_WANTOWN);
567 1.1 riastrad goto retry;
568 1.1 riastrad default:
569 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
570 1.1 riastrad mutex, (int)mutex->wwm_state);
571 1.1 riastrad }
572 1.1 riastrad KASSERT(mutex->wwm_state == WW_CTX);
573 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
574 1.7 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
575 1.7 riastrad "locking %p against myself: %p", mutex, curlwp);
576 1.1 riastrad /*
577 1.1 riastrad * Owned by another party, of any priority. Ask that party to
578 1.1 riastrad * wake us when it's done.
579 1.1 riastrad */
580 1.1 riastrad ww_mutex_lock_wait(mutex, ctx);
581 1.7 riastrad locked: KASSERT((mutex->wwm_state == WW_CTX) ||
582 1.7 riastrad (mutex->wwm_state == WW_WANTOWN));
583 1.1 riastrad KASSERT(mutex->wwm_u.ctx == ctx);
584 1.1 riastrad ctx->wwx_acquired++;
585 1.1 riastrad mutex_exit(&mutex->wwm_lock);
586 1.1 riastrad }
587 1.1 riastrad
588 1.1 riastrad static inline int
589 1.1 riastrad ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
590 1.1 riastrad struct ww_acquire_ctx *ctx)
591 1.1 riastrad {
592 1.1 riastrad int ret;
593 1.1 riastrad
594 1.1 riastrad ASSERT_SLEEPABLE();
595 1.1 riastrad
596 1.1 riastrad if (ctx == NULL)
597 1.1 riastrad return ww_mutex_lock_noctx_sig(mutex);
598 1.1 riastrad
599 1.7 riastrad KASSERTMSG((ctx->wwx_owner == curlwp),
600 1.7 riastrad "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
601 1.7 riastrad KASSERTMSG(!ctx->wwx_acquire_done,
602 1.7 riastrad "ctx %p done acquiring locks, can't acquire more", ctx);
603 1.7 riastrad KASSERTMSG((ctx->wwx_acquired != ~0U),
604 1.7 riastrad "ctx %p finished, can't be used any more", ctx);
605 1.7 riastrad KASSERTMSG((ctx->wwx_acquired == 0),
606 1.7 riastrad "ctx %p still holds %u locks, not allowed in slow path",
607 1.7 riastrad ctx, ctx->wwx_acquired);
608 1.7 riastrad KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
609 1.7 riastrad "ctx %p in class %p, mutex %p in class %p",
610 1.7 riastrad ctx, ctx->wwx_class, mutex, mutex->wwm_class);
611 1.1 riastrad
612 1.1 riastrad mutex_enter(&mutex->wwm_lock);
613 1.1 riastrad retry: switch (mutex->wwm_state) {
614 1.1 riastrad case WW_UNLOCKED:
615 1.1 riastrad mutex->wwm_state = WW_CTX;
616 1.1 riastrad mutex->wwm_u.ctx = ctx;
617 1.1 riastrad goto locked;
618 1.1 riastrad case WW_OWNED:
619 1.1 riastrad KASSERTMSG((mutex->wwm_u.owner != curlwp),
620 1.7 riastrad "locking %p against myself: %p", mutex, curlwp);
621 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
622 1.1 riastrad if (ret)
623 1.1 riastrad goto out;
624 1.1 riastrad goto retry;
625 1.1 riastrad case WW_CTX:
626 1.1 riastrad break;
627 1.1 riastrad case WW_WANTOWN:
628 1.1 riastrad ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
629 1.1 riastrad if (ret)
630 1.1 riastrad goto out;
631 1.1 riastrad goto retry;
632 1.1 riastrad default:
633 1.1 riastrad panic("wait/wound mutex %p in bad state: %d",
634 1.1 riastrad mutex, (int)mutex->wwm_state);
635 1.1 riastrad }
636 1.1 riastrad KASSERT(mutex->wwm_state == WW_CTX);
637 1.1 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
638 1.7 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
639 1.7 riastrad "locking %p against myself: %p", mutex, curlwp);
640 1.1 riastrad /*
641 1.1 riastrad * Owned by another party, of any priority. Ask that party to
642 1.1 riastrad * wake us when it's done.
643 1.1 riastrad */
644 1.1 riastrad ret = ww_mutex_lock_wait_sig(mutex, ctx);
645 1.1 riastrad if (ret)
646 1.1 riastrad goto out;
647 1.7 riastrad locked: KASSERT((mutex->wwm_state == WW_CTX) ||
648 1.7 riastrad (mutex->wwm_state == WW_WANTOWN));
649 1.1 riastrad KASSERT(mutex->wwm_u.ctx == ctx);
650 1.1 riastrad ctx->wwx_acquired++;
651 1.1 riastrad ret = 0;
652 1.1 riastrad out: mutex_exit(&mutex->wwm_lock);
653 1.1 riastrad return ret;
654 1.1 riastrad }
655 1.1 riastrad
656 1.1 riastrad static inline int
657 1.1 riastrad ww_mutex_trylock(struct ww_mutex *mutex)
658 1.1 riastrad {
659 1.1 riastrad int ret;
660 1.1 riastrad
661 1.1 riastrad mutex_enter(&mutex->wwm_lock);
662 1.1 riastrad if (mutex->wwm_state == WW_UNLOCKED) {
663 1.1 riastrad mutex->wwm_state = WW_OWNED;
664 1.1 riastrad mutex->wwm_u.owner = curlwp;
665 1.1 riastrad ret = 1;
666 1.1 riastrad } else {
667 1.7 riastrad KASSERTMSG(((mutex->wwm_state != WW_OWNED) ||
668 1.7 riastrad (mutex->wwm_u.owner != curlwp)),
669 1.7 riastrad "locking %p against myself: %p", mutex, curlwp);
670 1.7 riastrad KASSERTMSG(((mutex->wwm_state != WW_CTX) ||
671 1.7 riastrad (mutex->wwm_u.ctx->wwx_owner != curlwp)),
672 1.7 riastrad "locking %p against myself: %p", mutex, curlwp);
673 1.7 riastrad KASSERTMSG(((mutex->wwm_state != WW_WANTOWN) ||
674 1.7 riastrad (mutex->wwm_u.ctx->wwx_owner != curlwp)),
675 1.7 riastrad "locking %p against myself: %p", mutex, curlwp);
676 1.1 riastrad ret = 0;
677 1.1 riastrad }
678 1.1 riastrad mutex_exit(&mutex->wwm_lock);
679 1.1 riastrad
680 1.1 riastrad return ret;
681 1.1 riastrad }
682 1.1 riastrad
683 1.1 riastrad static inline void
684 1.7 riastrad ww_mutex_unlock_release(struct ww_mutex *mutex)
685 1.7 riastrad {
686 1.7 riastrad
687 1.7 riastrad KASSERT(mutex_owned(&mutex->wwm_lock));
688 1.7 riastrad KASSERT((mutex->wwm_state == WW_CTX) ||
689 1.7 riastrad (mutex->wwm_state == WW_WANTOWN));
690 1.7 riastrad KASSERT(mutex->wwm_u.ctx != NULL);
691 1.7 riastrad KASSERTMSG((mutex->wwm_u.ctx->wwx_owner == curlwp),
692 1.7 riastrad "ww_mutex %p ctx %p held by %p, not by self (%p)",
693 1.7 riastrad mutex, mutex->wwm_u.ctx, mutex->wwm_u.ctx->wwx_owner,
694 1.7 riastrad curlwp);
695 1.7 riastrad KASSERT(mutex->wwm_u.ctx->wwx_acquired != ~0U);
696 1.7 riastrad mutex->wwm_u.ctx->wwx_acquired--;
697 1.7 riastrad mutex->wwm_u.ctx = NULL;
698 1.7 riastrad }
699 1.7 riastrad
700 1.7 riastrad static inline void
701 1.1 riastrad ww_mutex_unlock(struct ww_mutex *mutex)
702 1.1 riastrad {
703 1.1 riastrad struct ww_acquire_ctx *ctx;
704 1.1 riastrad
705 1.1 riastrad mutex_enter(&mutex->wwm_lock);
706 1.1 riastrad KASSERT(mutex->wwm_state != WW_UNLOCKED);
707 1.1 riastrad switch (mutex->wwm_state) {
708 1.1 riastrad case WW_UNLOCKED:
709 1.1 riastrad panic("unlocking unlocked wait/wound mutex: %p", mutex);
710 1.1 riastrad case WW_OWNED:
711 1.1 riastrad /* Let the context lockers fight over it. */
712 1.1 riastrad mutex->wwm_u.owner = NULL;
713 1.1 riastrad mutex->wwm_state = WW_UNLOCKED;
714 1.1 riastrad break;
715 1.1 riastrad case WW_CTX:
716 1.7 riastrad ww_mutex_unlock_release(mutex);
717 1.1 riastrad /*
718 1.1 riastrad * If there are any waiters with contexts, grant the
719 1.1 riastrad * lock to the highest-priority one. Otherwise, just
720 1.1 riastrad * unlock it.
721 1.1 riastrad */
722 1.1 riastrad if ((ctx = RB_TREE_MIN(&mutex->wwm_waiters)) != NULL) {
723 1.1 riastrad mutex->wwm_state = WW_CTX;
724 1.1 riastrad mutex->wwm_u.ctx = ctx;
725 1.1 riastrad } else {
726 1.1 riastrad mutex->wwm_state = WW_UNLOCKED;
727 1.1 riastrad }
728 1.1 riastrad break;
729 1.1 riastrad case WW_WANTOWN:
730 1.7 riastrad ww_mutex_unlock_release(mutex);
731 1.1 riastrad /* Let the non-context lockers fight over it. */
732 1.1 riastrad mutex->wwm_state = WW_UNLOCKED;
733 1.1 riastrad break;
734 1.1 riastrad }
735 1.1 riastrad cv_broadcast(&mutex->wwm_cv);
736 1.1 riastrad mutex_exit(&mutex->wwm_lock);
737 1.1 riastrad }
738 1.1 riastrad
739 1.1 riastrad #endif /* _ASM_WW_MUTEX_H_ */
740