kern_mutex.c revision 1.45 1 1.45 rmind /* $NetBSD: kern_mutex.c,v 1.45 2009/01/25 04:45:14 rmind Exp $ */
2 1.2 ad
3 1.2 ad /*-
4 1.30 ad * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 1.2 ad * All rights reserved.
6 1.2 ad *
7 1.2 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 ad * by Jason R. Thorpe and Andrew Doran.
9 1.2 ad *
10 1.2 ad * Redistribution and use in source and binary forms, with or without
11 1.2 ad * modification, are permitted provided that the following conditions
12 1.2 ad * are met:
13 1.2 ad * 1. Redistributions of source code must retain the above copyright
14 1.2 ad * notice, this list of conditions and the following disclaimer.
15 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 ad * notice, this list of conditions and the following disclaimer in the
17 1.2 ad * documentation and/or other materials provided with the distribution.
18 1.2 ad *
19 1.2 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 ad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 ad */
31 1.2 ad
32 1.2 ad /*
33 1.2 ad * Kernel mutex implementation, modeled after those found in Solaris,
34 1.2 ad * a description of which can be found in:
35 1.2 ad *
36 1.2 ad * Solaris Internals: Core Kernel Architecture, Jim Mauro and
37 1.2 ad * Richard McDougall.
38 1.2 ad */
39 1.2 ad
40 1.2 ad #define __MUTEX_PRIVATE
41 1.2 ad
42 1.2 ad #include <sys/cdefs.h>
43 1.45 rmind __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.45 2009/01/25 04:45:14 rmind Exp $");
44 1.2 ad
45 1.2 ad #include <sys/param.h>
46 1.2 ad #include <sys/proc.h>
47 1.2 ad #include <sys/mutex.h>
48 1.2 ad #include <sys/sched.h>
49 1.2 ad #include <sys/sleepq.h>
50 1.2 ad #include <sys/systm.h>
51 1.2 ad #include <sys/lockdebug.h>
52 1.2 ad #include <sys/kernel.h>
53 1.24 ad #include <sys/atomic.h>
54 1.24 ad #include <sys/intr.h>
55 1.29 xtraeme #include <sys/lock.h>
56 1.31 ad #include <sys/pool.h>
57 1.2 ad
58 1.2 ad #include <dev/lockstat.h>
59 1.2 ad
60 1.28 ad #include <machine/lock.h>
61 1.28 ad
62 1.44 wrstuden #include "opt_sa.h"
63 1.44 wrstuden
64 1.2 ad /*
65 1.2 ad * When not running a debug kernel, spin mutexes are not much
66 1.2 ad * more than an splraiseipl() and splx() pair.
67 1.2 ad */
68 1.2 ad
69 1.2 ad #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
70 1.2 ad #define FULL
71 1.2 ad #endif
72 1.2 ad
73 1.2 ad /*
74 1.2 ad * Debugging support.
75 1.2 ad */
76 1.2 ad
77 1.2 ad #define MUTEX_WANTLOCK(mtx) \
78 1.23 yamt LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \
79 1.40 ad (uintptr_t)__builtin_return_address(0), false, false)
80 1.2 ad #define MUTEX_LOCKED(mtx) \
81 1.42 ad LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), NULL, \
82 1.2 ad (uintptr_t)__builtin_return_address(0), 0)
83 1.2 ad #define MUTEX_UNLOCKED(mtx) \
84 1.23 yamt LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), \
85 1.2 ad (uintptr_t)__builtin_return_address(0), 0)
86 1.2 ad #define MUTEX_ABORT(mtx, msg) \
87 1.17 ad mutex_abort(mtx, __func__, msg)
88 1.2 ad
89 1.2 ad #if defined(LOCKDEBUG)
90 1.2 ad
91 1.2 ad #define MUTEX_DASSERT(mtx, cond) \
92 1.2 ad do { \
93 1.2 ad if (!(cond)) \
94 1.2 ad MUTEX_ABORT(mtx, "assertion failed: " #cond); \
95 1.2 ad } while (/* CONSTCOND */ 0);
96 1.2 ad
97 1.2 ad #else /* LOCKDEBUG */
98 1.2 ad
99 1.2 ad #define MUTEX_DASSERT(mtx, cond) /* nothing */
100 1.2 ad
101 1.2 ad #endif /* LOCKDEBUG */
102 1.2 ad
103 1.2 ad #if defined(DIAGNOSTIC)
104 1.2 ad
105 1.2 ad #define MUTEX_ASSERT(mtx, cond) \
106 1.2 ad do { \
107 1.2 ad if (!(cond)) \
108 1.2 ad MUTEX_ABORT(mtx, "assertion failed: " #cond); \
109 1.2 ad } while (/* CONSTCOND */ 0)
110 1.2 ad
111 1.2 ad #else /* DIAGNOSTIC */
112 1.2 ad
113 1.2 ad #define MUTEX_ASSERT(mtx, cond) /* nothing */
114 1.2 ad
115 1.2 ad #endif /* DIAGNOSTIC */
116 1.2 ad
117 1.2 ad /*
118 1.2 ad * Spin mutex SPL save / restore.
119 1.2 ad */
120 1.12 matt #ifndef MUTEX_COUNT_BIAS
121 1.12 matt #define MUTEX_COUNT_BIAS 0
122 1.12 matt #endif
123 1.2 ad
124 1.2 ad #define MUTEX_SPIN_SPLRAISE(mtx) \
125 1.2 ad do { \
126 1.36 ad struct cpu_info *x__ci; \
127 1.2 ad int x__cnt, s; \
128 1.36 ad s = splraiseipl(mtx->mtx_ipl); \
129 1.36 ad x__ci = curcpu(); \
130 1.2 ad x__cnt = x__ci->ci_mtx_count--; \
131 1.37 ad __insn_barrier(); \
132 1.12 matt if (x__cnt == MUTEX_COUNT_BIAS) \
133 1.2 ad x__ci->ci_mtx_oldspl = (s); \
134 1.2 ad } while (/* CONSTCOND */ 0)
135 1.2 ad
136 1.2 ad #define MUTEX_SPIN_SPLRESTORE(mtx) \
137 1.2 ad do { \
138 1.2 ad struct cpu_info *x__ci = curcpu(); \
139 1.2 ad int s = x__ci->ci_mtx_oldspl; \
140 1.2 ad __insn_barrier(); \
141 1.12 matt if (++(x__ci->ci_mtx_count) == MUTEX_COUNT_BIAS) \
142 1.2 ad splx(s); \
143 1.2 ad } while (/* CONSTCOND */ 0)
144 1.2 ad
145 1.2 ad /*
146 1.2 ad * For architectures that provide 'simple' mutexes: they provide a
147 1.2 ad * CAS function that is either MP-safe, or does not need to be MP
148 1.2 ad * safe. Adaptive mutexes on these architectures do not require an
149 1.2 ad * additional interlock.
150 1.2 ad */
151 1.2 ad
152 1.2 ad #ifdef __HAVE_SIMPLE_MUTEXES
153 1.2 ad
154 1.2 ad #define MUTEX_OWNER(owner) \
155 1.2 ad (owner & MUTEX_THREAD)
156 1.2 ad #define MUTEX_HAS_WAITERS(mtx) \
157 1.2 ad (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0)
158 1.2 ad
159 1.23 yamt #define MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug) \
160 1.2 ad do { \
161 1.23 yamt if (dodebug) \
162 1.23 yamt (mtx)->mtx_owner |= MUTEX_BIT_DEBUG; \
163 1.2 ad } while (/* CONSTCOND */ 0);
164 1.2 ad
165 1.23 yamt #define MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl) \
166 1.2 ad do { \
167 1.2 ad (mtx)->mtx_owner = MUTEX_BIT_SPIN; \
168 1.23 yamt if (dodebug) \
169 1.23 yamt (mtx)->mtx_owner |= MUTEX_BIT_DEBUG; \
170 1.2 ad (mtx)->mtx_ipl = makeiplcookie((ipl)); \
171 1.2 ad __cpu_simple_lock_init(&(mtx)->mtx_lock); \
172 1.2 ad } while (/* CONSTCOND */ 0)
173 1.2 ad
174 1.2 ad #define MUTEX_DESTROY(mtx) \
175 1.2 ad do { \
176 1.2 ad (mtx)->mtx_owner = MUTEX_THREAD; \
177 1.2 ad } while (/* CONSTCOND */ 0);
178 1.2 ad
179 1.2 ad #define MUTEX_SPIN_P(mtx) \
180 1.2 ad (((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0)
181 1.2 ad #define MUTEX_ADAPTIVE_P(mtx) \
182 1.2 ad (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0)
183 1.2 ad
184 1.23 yamt #define MUTEX_DEBUG_P(mtx) (((mtx)->mtx_owner & MUTEX_BIT_DEBUG) != 0)
185 1.23 yamt #if defined(LOCKDEBUG)
186 1.23 yamt #define MUTEX_OWNED(owner) (((owner) & ~MUTEX_BIT_DEBUG) != 0)
187 1.23 yamt #define MUTEX_INHERITDEBUG(new, old) (new) |= (old) & MUTEX_BIT_DEBUG
188 1.23 yamt #else /* defined(LOCKDEBUG) */
189 1.23 yamt #define MUTEX_OWNED(owner) ((owner) != 0)
190 1.23 yamt #define MUTEX_INHERITDEBUG(new, old) /* nothing */
191 1.23 yamt #endif /* defined(LOCKDEBUG) */
192 1.2 ad
193 1.2 ad static inline int
194 1.2 ad MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
195 1.2 ad {
196 1.2 ad int rv;
197 1.23 yamt uintptr_t old = 0;
198 1.23 yamt uintptr_t new = curthread;
199 1.23 yamt
200 1.23 yamt MUTEX_INHERITDEBUG(old, mtx->mtx_owner);
201 1.23 yamt MUTEX_INHERITDEBUG(new, old);
202 1.23 yamt rv = MUTEX_CAS(&mtx->mtx_owner, old, new);
203 1.7 itohy MUTEX_RECEIVE(mtx);
204 1.2 ad return rv;
205 1.2 ad }
206 1.2 ad
207 1.2 ad static inline int
208 1.2 ad MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
209 1.2 ad {
210 1.2 ad int rv;
211 1.2 ad rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS);
212 1.7 itohy MUTEX_RECEIVE(mtx);
213 1.2 ad return rv;
214 1.2 ad }
215 1.2 ad
216 1.2 ad static inline void
217 1.2 ad MUTEX_RELEASE(kmutex_t *mtx)
218 1.2 ad {
219 1.23 yamt uintptr_t new;
220 1.23 yamt
221 1.7 itohy MUTEX_GIVE(mtx);
222 1.23 yamt new = 0;
223 1.23 yamt MUTEX_INHERITDEBUG(new, mtx->mtx_owner);
224 1.23 yamt mtx->mtx_owner = new;
225 1.2 ad }
226 1.4 ad
227 1.4 ad static inline void
228 1.4 ad MUTEX_CLEAR_WAITERS(kmutex_t *mtx)
229 1.4 ad {
230 1.4 ad /* nothing */
231 1.4 ad }
232 1.2 ad #endif /* __HAVE_SIMPLE_MUTEXES */
233 1.2 ad
234 1.2 ad /*
235 1.2 ad * Patch in stubs via strong alias where they are not available.
236 1.2 ad */
237 1.2 ad
238 1.2 ad #if defined(LOCKDEBUG)
239 1.2 ad #undef __HAVE_MUTEX_STUBS
240 1.2 ad #undef __HAVE_SPIN_MUTEX_STUBS
241 1.2 ad #endif
242 1.2 ad
243 1.2 ad #ifndef __HAVE_MUTEX_STUBS
244 1.8 itohy __strong_alias(mutex_enter,mutex_vector_enter);
245 1.8 itohy __strong_alias(mutex_exit,mutex_vector_exit);
246 1.2 ad #endif
247 1.2 ad
248 1.2 ad #ifndef __HAVE_SPIN_MUTEX_STUBS
249 1.8 itohy __strong_alias(mutex_spin_enter,mutex_vector_enter);
250 1.8 itohy __strong_alias(mutex_spin_exit,mutex_vector_exit);
251 1.2 ad #endif
252 1.2 ad
253 1.2 ad void mutex_abort(kmutex_t *, const char *, const char *);
254 1.2 ad void mutex_dump(volatile void *);
255 1.2 ad int mutex_onproc(uintptr_t, struct cpu_info **);
256 1.2 ad
257 1.2 ad lockops_t mutex_spin_lockops = {
258 1.2 ad "Mutex",
259 1.42 ad LOCKOPS_SPIN,
260 1.2 ad mutex_dump
261 1.2 ad };
262 1.2 ad
263 1.2 ad lockops_t mutex_adaptive_lockops = {
264 1.2 ad "Mutex",
265 1.42 ad LOCKOPS_SLEEP,
266 1.2 ad mutex_dump
267 1.2 ad };
268 1.2 ad
269 1.5 yamt syncobj_t mutex_syncobj = {
270 1.5 yamt SOBJ_SLEEPQ_SORTED,
271 1.5 yamt turnstile_unsleep,
272 1.5 yamt turnstile_changepri,
273 1.5 yamt sleepq_lendpri,
274 1.27 ad (void *)mutex_owner,
275 1.5 yamt };
276 1.5 yamt
277 1.31 ad /* Mutex cache */
278 1.31 ad #define MUTEX_OBJ_MAGIC 0x5aa3c85d
279 1.31 ad struct kmutexobj {
280 1.31 ad kmutex_t mo_lock;
281 1.31 ad u_int mo_magic;
282 1.31 ad u_int mo_refcnt;
283 1.31 ad };
284 1.31 ad
285 1.31 ad static int mutex_obj_ctor(void *, void *, int);
286 1.31 ad
287 1.31 ad static pool_cache_t mutex_obj_cache;
288 1.31 ad
289 1.2 ad /*
290 1.2 ad * mutex_dump:
291 1.2 ad *
292 1.2 ad * Dump the contents of a mutex structure.
293 1.2 ad */
294 1.2 ad void
295 1.2 ad mutex_dump(volatile void *cookie)
296 1.2 ad {
297 1.2 ad volatile kmutex_t *mtx = cookie;
298 1.2 ad
299 1.2 ad printf_nolog("owner field : %#018lx wait/spin: %16d/%d\n",
300 1.2 ad (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx),
301 1.2 ad MUTEX_SPIN_P(mtx));
302 1.2 ad }
303 1.2 ad
304 1.2 ad /*
305 1.2 ad * mutex_abort:
306 1.2 ad *
307 1.3 ad * Dump information about an error and panic the system. This
308 1.3 ad * generates a lot of machine code in the DIAGNOSTIC case, so
309 1.3 ad * we ask the compiler to not inline it.
310 1.2 ad */
311 1.43 ad void __noinline
312 1.2 ad mutex_abort(kmutex_t *mtx, const char *func, const char *msg)
313 1.2 ad {
314 1.2 ad
315 1.23 yamt LOCKDEBUG_ABORT(mtx, (MUTEX_SPIN_P(mtx) ?
316 1.3 ad &mutex_spin_lockops : &mutex_adaptive_lockops), func, msg);
317 1.2 ad }
318 1.2 ad
319 1.2 ad /*
320 1.2 ad * mutex_init:
321 1.2 ad *
322 1.2 ad * Initialize a mutex for use. Note that adaptive mutexes are in
323 1.2 ad * essence spin mutexes that can sleep to avoid deadlock and wasting
324 1.2 ad * CPU time. We can't easily provide a type of mutex that always
325 1.2 ad * sleeps - see comments in mutex_vector_enter() about releasing
326 1.2 ad * mutexes unlocked.
327 1.2 ad */
328 1.2 ad void
329 1.2 ad mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
330 1.2 ad {
331 1.23 yamt bool dodebug;
332 1.2 ad
333 1.2 ad memset(mtx, 0, sizeof(*mtx));
334 1.2 ad
335 1.15 ad switch (type) {
336 1.15 ad case MUTEX_ADAPTIVE:
337 1.15 ad KASSERT(ipl == IPL_NONE);
338 1.15 ad break;
339 1.22 ad case MUTEX_DEFAULT:
340 1.15 ad case MUTEX_DRIVER:
341 1.26 ad if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
342 1.26 ad ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
343 1.26 ad ipl == IPL_SOFTSERIAL) {
344 1.22 ad type = MUTEX_ADAPTIVE;
345 1.26 ad } else {
346 1.22 ad type = MUTEX_SPIN;
347 1.22 ad }
348 1.15 ad break;
349 1.15 ad default:
350 1.15 ad break;
351 1.15 ad }
352 1.2 ad
353 1.2 ad switch (type) {
354 1.11 ad case MUTEX_NODEBUG:
355 1.23 yamt dodebug = LOCKDEBUG_ALLOC(mtx, NULL,
356 1.19 ad (uintptr_t)__builtin_return_address(0));
357 1.23 yamt MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
358 1.11 ad break;
359 1.2 ad case MUTEX_ADAPTIVE:
360 1.23 yamt dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops,
361 1.19 ad (uintptr_t)__builtin_return_address(0));
362 1.23 yamt MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug);
363 1.2 ad break;
364 1.2 ad case MUTEX_SPIN:
365 1.23 yamt dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops,
366 1.19 ad (uintptr_t)__builtin_return_address(0));
367 1.23 yamt MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
368 1.2 ad break;
369 1.2 ad default:
370 1.2 ad panic("mutex_init: impossible type");
371 1.2 ad break;
372 1.2 ad }
373 1.2 ad }
374 1.2 ad
375 1.2 ad /*
376 1.2 ad * mutex_destroy:
377 1.2 ad *
378 1.2 ad * Tear down a mutex.
379 1.2 ad */
380 1.2 ad void
381 1.2 ad mutex_destroy(kmutex_t *mtx)
382 1.2 ad {
383 1.2 ad
384 1.2 ad if (MUTEX_ADAPTIVE_P(mtx)) {
385 1.2 ad MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) &&
386 1.2 ad !MUTEX_HAS_WAITERS(mtx));
387 1.2 ad } else {
388 1.16 skrll MUTEX_ASSERT(mtx, !__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock));
389 1.2 ad }
390 1.2 ad
391 1.23 yamt LOCKDEBUG_FREE(MUTEX_DEBUG_P(mtx), mtx);
392 1.2 ad MUTEX_DESTROY(mtx);
393 1.2 ad }
394 1.2 ad
395 1.2 ad /*
396 1.2 ad * mutex_onproc:
397 1.2 ad *
398 1.2 ad * Return true if an adaptive mutex owner is running on a CPU in the
399 1.2 ad * system. If the target is waiting on the kernel big lock, then we
400 1.15 ad * must release it. This is necessary to avoid deadlock.
401 1.2 ad *
402 1.2 ad * Note that we can't use the mutex owner field as an LWP pointer. We
403 1.2 ad * don't have full control over the timing of our execution, and so the
404 1.2 ad * pointer could be completely invalid by the time we dereference it.
405 1.2 ad */
406 1.2 ad #ifdef MULTIPROCESSOR
407 1.2 ad int
408 1.2 ad mutex_onproc(uintptr_t owner, struct cpu_info **cip)
409 1.2 ad {
410 1.2 ad CPU_INFO_ITERATOR cii;
411 1.2 ad struct cpu_info *ci;
412 1.2 ad struct lwp *l;
413 1.2 ad
414 1.2 ad if (!MUTEX_OWNED(owner))
415 1.2 ad return 0;
416 1.2 ad l = (struct lwp *)MUTEX_OWNER(owner);
417 1.2 ad
418 1.15 ad /* See if the target is running on a CPU somewhere. */
419 1.10 ad if ((ci = *cip) != NULL && ci->ci_curlwp == l)
420 1.15 ad goto run;
421 1.15 ad for (CPU_INFO_FOREACH(cii, ci))
422 1.15 ad if (ci->ci_curlwp == l)
423 1.15 ad goto run;
424 1.2 ad
425 1.15 ad /* No: it may be safe to block now. */
426 1.2 ad *cip = NULL;
427 1.2 ad return 0;
428 1.15 ad
429 1.15 ad run:
430 1.15 ad /* Target is running; do we need to block? */
431 1.15 ad *cip = ci;
432 1.15 ad return ci->ci_biglock_wanted != l;
433 1.2 ad }
434 1.15 ad #endif /* MULTIPROCESSOR */
435 1.2 ad
436 1.2 ad /*
437 1.2 ad * mutex_vector_enter:
438 1.2 ad *
439 1.45 rmind * Support routine for mutex_enter() that must handle all cases. In
440 1.2 ad * the LOCKDEBUG case, mutex_enter() is always aliased here, even if
441 1.2 ad * fast-path stubs are available. If an mutex_spin_enter() stub is
442 1.2 ad * not available, then it is also aliased directly here.
443 1.2 ad */
444 1.2 ad void
445 1.2 ad mutex_vector_enter(kmutex_t *mtx)
446 1.2 ad {
447 1.2 ad uintptr_t owner, curthread;
448 1.2 ad turnstile_t *ts;
449 1.2 ad #ifdef MULTIPROCESSOR
450 1.2 ad struct cpu_info *ci = NULL;
451 1.2 ad u_int count;
452 1.2 ad #endif
453 1.44 wrstuden #ifdef KERN_SA
454 1.44 wrstuden int f;
455 1.44 wrstuden #endif
456 1.2 ad LOCKSTAT_COUNTER(spincnt);
457 1.2 ad LOCKSTAT_COUNTER(slpcnt);
458 1.2 ad LOCKSTAT_TIMER(spintime);
459 1.2 ad LOCKSTAT_TIMER(slptime);
460 1.2 ad LOCKSTAT_FLAG(lsflag);
461 1.2 ad
462 1.2 ad /*
463 1.2 ad * Handle spin mutexes.
464 1.2 ad */
465 1.2 ad if (MUTEX_SPIN_P(mtx)) {
466 1.2 ad #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR)
467 1.2 ad u_int spins = 0;
468 1.2 ad #endif
469 1.2 ad MUTEX_SPIN_SPLRAISE(mtx);
470 1.2 ad MUTEX_WANTLOCK(mtx);
471 1.2 ad #ifdef FULL
472 1.2 ad if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
473 1.2 ad MUTEX_LOCKED(mtx);
474 1.2 ad return;
475 1.2 ad }
476 1.2 ad #if !defined(MULTIPROCESSOR)
477 1.2 ad MUTEX_ABORT(mtx, "locking against myself");
478 1.2 ad #else /* !MULTIPROCESSOR */
479 1.2 ad
480 1.2 ad LOCKSTAT_ENTER(lsflag);
481 1.2 ad LOCKSTAT_START_TIMER(lsflag, spintime);
482 1.2 ad count = SPINLOCK_BACKOFF_MIN;
483 1.2 ad
484 1.2 ad /*
485 1.2 ad * Spin testing the lock word and do exponential backoff
486 1.2 ad * to reduce cache line ping-ponging between CPUs.
487 1.2 ad */
488 1.2 ad do {
489 1.2 ad if (panicstr != NULL)
490 1.2 ad break;
491 1.16 skrll while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) {
492 1.2 ad SPINLOCK_BACKOFF(count);
493 1.2 ad #ifdef LOCKDEBUG
494 1.2 ad if (SPINLOCK_SPINOUT(spins))
495 1.2 ad MUTEX_ABORT(mtx, "spinout");
496 1.2 ad #endif /* LOCKDEBUG */
497 1.2 ad }
498 1.2 ad } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
499 1.2 ad
500 1.2 ad if (count != SPINLOCK_BACKOFF_MIN) {
501 1.2 ad LOCKSTAT_STOP_TIMER(lsflag, spintime);
502 1.2 ad LOCKSTAT_EVENT(lsflag, mtx,
503 1.2 ad LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
504 1.2 ad }
505 1.2 ad LOCKSTAT_EXIT(lsflag);
506 1.2 ad #endif /* !MULTIPROCESSOR */
507 1.2 ad #endif /* FULL */
508 1.2 ad MUTEX_LOCKED(mtx);
509 1.2 ad return;
510 1.2 ad }
511 1.2 ad
512 1.2 ad curthread = (uintptr_t)curlwp;
513 1.2 ad
514 1.2 ad MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
515 1.2 ad MUTEX_ASSERT(mtx, curthread != 0);
516 1.2 ad MUTEX_WANTLOCK(mtx);
517 1.2 ad
518 1.2 ad if (panicstr == NULL) {
519 1.2 ad LOCKDEBUG_BARRIER(&kernel_lock, 1);
520 1.2 ad }
521 1.2 ad
522 1.2 ad LOCKSTAT_ENTER(lsflag);
523 1.2 ad
524 1.2 ad /*
525 1.2 ad * Adaptive mutex; spin trying to acquire the mutex. If we
526 1.2 ad * determine that the owner is not running on a processor,
527 1.2 ad * then we stop spinning, and sleep instead.
528 1.2 ad */
529 1.34 ad for (owner = mtx->mtx_owner;;) {
530 1.2 ad if (!MUTEX_OWNED(owner)) {
531 1.2 ad /*
532 1.2 ad * Mutex owner clear could mean two things:
533 1.2 ad *
534 1.2 ad * * The mutex has been released.
535 1.2 ad * * The owner field hasn't been set yet.
536 1.2 ad *
537 1.2 ad * Try to acquire it again. If that fails,
538 1.2 ad * we'll just loop again.
539 1.2 ad */
540 1.2 ad if (MUTEX_ACQUIRE(mtx, curthread))
541 1.2 ad break;
542 1.34 ad owner = mtx->mtx_owner;
543 1.2 ad continue;
544 1.2 ad }
545 1.2 ad
546 1.45 rmind if (__predict_false(panicstr != NULL))
547 1.2 ad return;
548 1.45 rmind if (__predict_false(MUTEX_OWNER(owner) == curthread))
549 1.2 ad MUTEX_ABORT(mtx, "locking against myself");
550 1.2 ad
551 1.2 ad #ifdef MULTIPROCESSOR
552 1.2 ad /*
553 1.2 ad * Check to see if the owner is running on a processor.
554 1.2 ad * If so, then we should just spin, as the owner will
555 1.2 ad * likely release the lock very soon.
556 1.2 ad */
557 1.2 ad if (mutex_onproc(owner, &ci)) {
558 1.2 ad LOCKSTAT_START_TIMER(lsflag, spintime);
559 1.2 ad count = SPINLOCK_BACKOFF_MIN;
560 1.2 ad for (;;) {
561 1.34 ad SPINLOCK_BACKOFF(count);
562 1.2 ad owner = mtx->mtx_owner;
563 1.2 ad if (!mutex_onproc(owner, &ci))
564 1.2 ad break;
565 1.2 ad }
566 1.2 ad LOCKSTAT_STOP_TIMER(lsflag, spintime);
567 1.2 ad LOCKSTAT_COUNT(spincnt, 1);
568 1.2 ad if (!MUTEX_OWNED(owner))
569 1.2 ad continue;
570 1.2 ad }
571 1.2 ad #endif
572 1.2 ad
573 1.2 ad ts = turnstile_lookup(mtx);
574 1.2 ad
575 1.2 ad /*
576 1.2 ad * Once we have the turnstile chain interlock, mark the
577 1.2 ad * mutex has having waiters. If that fails, spin again:
578 1.2 ad * chances are that the mutex has been released.
579 1.2 ad */
580 1.2 ad if (!MUTEX_SET_WAITERS(mtx, owner)) {
581 1.2 ad turnstile_exit(mtx);
582 1.34 ad owner = mtx->mtx_owner;
583 1.2 ad continue;
584 1.2 ad }
585 1.2 ad
586 1.2 ad #ifdef MULTIPROCESSOR
587 1.2 ad /*
588 1.2 ad * mutex_exit() is permitted to release the mutex without
589 1.2 ad * any interlocking instructions, and the following can
590 1.2 ad * occur as a result:
591 1.2 ad *
592 1.2 ad * CPU 1: MUTEX_SET_WAITERS() CPU2: mutex_exit()
593 1.2 ad * ---------------------------- ----------------------------
594 1.2 ad * .. acquire cache line
595 1.2 ad * .. test for waiters
596 1.2 ad * acquire cache line <- lose cache line
597 1.2 ad * lock cache line ..
598 1.2 ad * verify mutex is held ..
599 1.2 ad * set waiters ..
600 1.2 ad * unlock cache line ..
601 1.2 ad * lose cache line -> acquire cache line
602 1.2 ad * .. clear lock word, waiters
603 1.2 ad * return success
604 1.2 ad *
605 1.2 ad * There is a another race that can occur: a third CPU could
606 1.2 ad * acquire the mutex as soon as it is released. Since
607 1.2 ad * adaptive mutexes are primarily spin mutexes, this is not
608 1.2 ad * something that we need to worry about too much. What we
609 1.2 ad * do need to ensure is that the waiters bit gets set.
610 1.2 ad *
611 1.2 ad * To allow the unlocked release, we need to make some
612 1.2 ad * assumptions here:
613 1.2 ad *
614 1.2 ad * o Release is the only non-atomic/unlocked operation
615 1.2 ad * that can be performed on the mutex. (It must still
616 1.2 ad * be atomic on the local CPU, e.g. in case interrupted
617 1.2 ad * or preempted).
618 1.2 ad *
619 1.2 ad * o At any given time, MUTEX_SET_WAITERS() can only ever
620 1.21 pooka * be in progress on one CPU in the system - guaranteed
621 1.2 ad * by the turnstile chain lock.
622 1.2 ad *
623 1.2 ad * o No other operations other than MUTEX_SET_WAITERS()
624 1.2 ad * and release can modify a mutex with a non-zero
625 1.2 ad * owner field.
626 1.2 ad *
627 1.2 ad * o The result of a successful MUTEX_SET_WAITERS() call
628 1.2 ad * is an unbuffered write that is immediately visible
629 1.2 ad * to all other processors in the system.
630 1.2 ad *
631 1.2 ad * o If the holding LWP switches away, it posts a store
632 1.2 ad * fence before changing curlwp, ensuring that any
633 1.2 ad * overwrite of the mutex waiters flag by mutex_exit()
634 1.2 ad * completes before the modification of curlwp becomes
635 1.2 ad * visible to this CPU.
636 1.2 ad *
637 1.14 yamt * o mi_switch() posts a store fence before setting curlwp
638 1.2 ad * and before resuming execution of an LWP.
639 1.2 ad *
640 1.2 ad * o _kernel_lock() posts a store fence before setting
641 1.2 ad * curcpu()->ci_biglock_wanted, and after clearing it.
642 1.2 ad * This ensures that any overwrite of the mutex waiters
643 1.2 ad * flag by mutex_exit() completes before the modification
644 1.2 ad * of ci_biglock_wanted becomes visible.
645 1.2 ad *
646 1.2 ad * We now post a read memory barrier (after setting the
647 1.2 ad * waiters field) and check the lock holder's status again.
648 1.2 ad * Some of the possible outcomes (not an exhaustive list):
649 1.2 ad *
650 1.2 ad * 1. The onproc check returns true: the holding LWP is
651 1.2 ad * running again. The lock may be released soon and
652 1.2 ad * we should spin. Importantly, we can't trust the
653 1.2 ad * value of the waiters flag.
654 1.2 ad *
655 1.2 ad * 2. The onproc check returns false: the holding LWP is
656 1.39 yamt * not running. We now have the opportunity to check
657 1.2 ad * if mutex_exit() has blatted the modifications made
658 1.2 ad * by MUTEX_SET_WAITERS().
659 1.2 ad *
660 1.2 ad * 3. The onproc check returns false: the holding LWP may
661 1.2 ad * or may not be running. It has context switched at
662 1.2 ad * some point during our check. Again, we have the
663 1.2 ad * chance to see if the waiters bit is still set or
664 1.2 ad * has been overwritten.
665 1.2 ad *
666 1.2 ad * 4. The onproc check returns false: the holding LWP is
667 1.2 ad * running on a CPU, but wants the big lock. It's OK
668 1.2 ad * to check the waiters field in this case.
669 1.2 ad *
670 1.2 ad * 5. The has-waiters check fails: the mutex has been
671 1.2 ad * released, the waiters flag cleared and another LWP
672 1.2 ad * now owns the mutex.
673 1.2 ad *
674 1.2 ad * 6. The has-waiters check fails: the mutex has been
675 1.2 ad * released.
676 1.2 ad *
677 1.2 ad * If the waiters bit is not set it's unsafe to go asleep,
678 1.2 ad * as we might never be awoken.
679 1.2 ad */
680 1.24 ad if ((membar_consumer(), mutex_onproc(owner, &ci)) ||
681 1.24 ad (membar_consumer(), !MUTEX_HAS_WAITERS(mtx))) {
682 1.2 ad turnstile_exit(mtx);
683 1.34 ad owner = mtx->mtx_owner;
684 1.2 ad continue;
685 1.2 ad }
686 1.2 ad #endif /* MULTIPROCESSOR */
687 1.2 ad
688 1.44 wrstuden #ifdef KERN_SA
689 1.44 wrstuden /*
690 1.44 wrstuden * Sleeping for a mutex should not generate an upcall.
691 1.44 wrstuden * So set LP_SA_NOBLOCK to indicate this.
692 1.44 wrstuden * f indicates if we should clear LP_SA_NOBLOCK when done.
693 1.44 wrstuden */
694 1.44 wrstuden f = ~curlwp->l_pflag & LP_SA_NOBLOCK;
695 1.44 wrstuden curlwp->l_pflag |= LP_SA_NOBLOCK;
696 1.44 wrstuden #endif /* KERN_SA */
697 1.44 wrstuden
698 1.2 ad LOCKSTAT_START_TIMER(lsflag, slptime);
699 1.2 ad
700 1.5 yamt turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj);
701 1.2 ad
702 1.2 ad LOCKSTAT_STOP_TIMER(lsflag, slptime);
703 1.2 ad LOCKSTAT_COUNT(slpcnt, 1);
704 1.34 ad
705 1.44 wrstuden #ifdef KERN_SA
706 1.44 wrstuden curlwp->l_pflag ^= f;
707 1.44 wrstuden #endif /* KERN_SA */
708 1.44 wrstuden
709 1.34 ad owner = mtx->mtx_owner;
710 1.2 ad }
711 1.2 ad
712 1.2 ad LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1,
713 1.2 ad slpcnt, slptime);
714 1.2 ad LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN,
715 1.2 ad spincnt, spintime);
716 1.2 ad LOCKSTAT_EXIT(lsflag);
717 1.2 ad
718 1.2 ad MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
719 1.2 ad MUTEX_LOCKED(mtx);
720 1.2 ad }
721 1.2 ad
722 1.2 ad /*
723 1.2 ad * mutex_vector_exit:
724 1.2 ad *
725 1.2 ad * Support routine for mutex_exit() that handles all cases.
726 1.2 ad */
727 1.2 ad void
728 1.2 ad mutex_vector_exit(kmutex_t *mtx)
729 1.2 ad {
730 1.2 ad turnstile_t *ts;
731 1.2 ad uintptr_t curthread;
732 1.2 ad
733 1.2 ad if (MUTEX_SPIN_P(mtx)) {
734 1.2 ad #ifdef FULL
735 1.33 ad if (__predict_false(!__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock))) {
736 1.33 ad if (panicstr != NULL)
737 1.33 ad return;
738 1.2 ad MUTEX_ABORT(mtx, "exiting unheld spin mutex");
739 1.33 ad }
740 1.2 ad MUTEX_UNLOCKED(mtx);
741 1.2 ad __cpu_simple_unlock(&mtx->mtx_lock);
742 1.2 ad #endif
743 1.2 ad MUTEX_SPIN_SPLRESTORE(mtx);
744 1.2 ad return;
745 1.2 ad }
746 1.2 ad
747 1.11 ad if (__predict_false((uintptr_t)panicstr | cold)) {
748 1.2 ad MUTEX_UNLOCKED(mtx);
749 1.2 ad MUTEX_RELEASE(mtx);
750 1.2 ad return;
751 1.2 ad }
752 1.2 ad
753 1.2 ad curthread = (uintptr_t)curlwp;
754 1.2 ad MUTEX_DASSERT(mtx, curthread != 0);
755 1.2 ad MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
756 1.2 ad MUTEX_UNLOCKED(mtx);
757 1.2 ad
758 1.15 ad #ifdef LOCKDEBUG
759 1.15 ad /*
760 1.15 ad * Avoid having to take the turnstile chain lock every time
761 1.15 ad * around. Raise the priority level to splhigh() in order
762 1.15 ad * to disable preemption and so make the following atomic.
763 1.15 ad */
764 1.15 ad {
765 1.15 ad int s = splhigh();
766 1.15 ad if (!MUTEX_HAS_WAITERS(mtx)) {
767 1.15 ad MUTEX_RELEASE(mtx);
768 1.15 ad splx(s);
769 1.15 ad return;
770 1.15 ad }
771 1.15 ad splx(s);
772 1.15 ad }
773 1.15 ad #endif
774 1.15 ad
775 1.2 ad /*
776 1.2 ad * Get this lock's turnstile. This gets the interlock on
777 1.2 ad * the sleep queue. Once we have that, we can clear the
778 1.2 ad * lock. If there was no turnstile for the lock, there
779 1.2 ad * were no waiters remaining.
780 1.2 ad */
781 1.2 ad ts = turnstile_lookup(mtx);
782 1.2 ad
783 1.2 ad if (ts == NULL) {
784 1.2 ad MUTEX_RELEASE(mtx);
785 1.2 ad turnstile_exit(mtx);
786 1.2 ad } else {
787 1.2 ad MUTEX_RELEASE(mtx);
788 1.2 ad turnstile_wakeup(ts, TS_WRITER_Q,
789 1.2 ad TS_WAITERS(ts, TS_WRITER_Q), NULL);
790 1.2 ad }
791 1.2 ad }
792 1.2 ad
793 1.4 ad #ifndef __HAVE_SIMPLE_MUTEXES
794 1.4 ad /*
795 1.4 ad * mutex_wakeup:
796 1.4 ad *
797 1.4 ad * Support routine for mutex_exit() that wakes up all waiters.
798 1.4 ad * We assume that the mutex has been released, but it need not
799 1.4 ad * be.
800 1.4 ad */
801 1.4 ad void
802 1.4 ad mutex_wakeup(kmutex_t *mtx)
803 1.4 ad {
804 1.4 ad turnstile_t *ts;
805 1.4 ad
806 1.4 ad ts = turnstile_lookup(mtx);
807 1.4 ad if (ts == NULL) {
808 1.4 ad turnstile_exit(mtx);
809 1.4 ad return;
810 1.4 ad }
811 1.4 ad MUTEX_CLEAR_WAITERS(mtx);
812 1.4 ad turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL);
813 1.4 ad }
814 1.4 ad #endif /* !__HAVE_SIMPLE_MUTEXES */
815 1.4 ad
816 1.2 ad /*
817 1.2 ad * mutex_owned:
818 1.2 ad *
819 1.3 ad * Return true if the current LWP (adaptive) or CPU (spin)
820 1.3 ad * holds the mutex.
821 1.2 ad */
822 1.2 ad int
823 1.2 ad mutex_owned(kmutex_t *mtx)
824 1.2 ad {
825 1.2 ad
826 1.35 ad if (mtx == NULL)
827 1.35 ad return 0;
828 1.2 ad if (MUTEX_ADAPTIVE_P(mtx))
829 1.2 ad return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp;
830 1.2 ad #ifdef FULL
831 1.16 skrll return __SIMPLELOCK_LOCKED_P(&mtx->mtx_lock);
832 1.2 ad #else
833 1.2 ad return 1;
834 1.2 ad #endif
835 1.2 ad }
836 1.2 ad
837 1.2 ad /*
838 1.2 ad * mutex_owner:
839 1.2 ad *
840 1.6 ad * Return the current owner of an adaptive mutex. Used for
841 1.6 ad * priority inheritance.
842 1.2 ad */
843 1.27 ad lwp_t *
844 1.27 ad mutex_owner(kmutex_t *mtx)
845 1.2 ad {
846 1.2 ad
847 1.2 ad MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
848 1.2 ad return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner);
849 1.2 ad }
850 1.2 ad
851 1.2 ad /*
852 1.2 ad * mutex_tryenter:
853 1.2 ad *
854 1.2 ad * Try to acquire the mutex; return non-zero if we did.
855 1.2 ad */
856 1.2 ad int
857 1.2 ad mutex_tryenter(kmutex_t *mtx)
858 1.2 ad {
859 1.2 ad uintptr_t curthread;
860 1.2 ad
861 1.2 ad /*
862 1.2 ad * Handle spin mutexes.
863 1.2 ad */
864 1.2 ad if (MUTEX_SPIN_P(mtx)) {
865 1.2 ad MUTEX_SPIN_SPLRAISE(mtx);
866 1.2 ad #ifdef FULL
867 1.2 ad if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
868 1.4 ad MUTEX_WANTLOCK(mtx);
869 1.2 ad MUTEX_LOCKED(mtx);
870 1.2 ad return 1;
871 1.2 ad }
872 1.2 ad MUTEX_SPIN_SPLRESTORE(mtx);
873 1.2 ad #else
874 1.4 ad MUTEX_WANTLOCK(mtx);
875 1.2 ad MUTEX_LOCKED(mtx);
876 1.2 ad return 1;
877 1.2 ad #endif
878 1.2 ad } else {
879 1.2 ad curthread = (uintptr_t)curlwp;
880 1.2 ad MUTEX_ASSERT(mtx, curthread != 0);
881 1.2 ad if (MUTEX_ACQUIRE(mtx, curthread)) {
882 1.4 ad MUTEX_WANTLOCK(mtx);
883 1.2 ad MUTEX_LOCKED(mtx);
884 1.2 ad MUTEX_DASSERT(mtx,
885 1.2 ad MUTEX_OWNER(mtx->mtx_owner) == curthread);
886 1.2 ad return 1;
887 1.2 ad }
888 1.2 ad }
889 1.2 ad
890 1.2 ad return 0;
891 1.2 ad }
892 1.2 ad
893 1.2 ad #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL)
894 1.2 ad /*
895 1.2 ad * mutex_spin_retry:
896 1.2 ad *
897 1.2 ad * Support routine for mutex_spin_enter(). Assumes that the caller
898 1.2 ad * has already raised the SPL, and adjusted counters.
899 1.2 ad */
900 1.2 ad void
901 1.2 ad mutex_spin_retry(kmutex_t *mtx)
902 1.2 ad {
903 1.2 ad #ifdef MULTIPROCESSOR
904 1.2 ad u_int count;
905 1.2 ad LOCKSTAT_TIMER(spintime);
906 1.2 ad LOCKSTAT_FLAG(lsflag);
907 1.2 ad #ifdef LOCKDEBUG
908 1.2 ad u_int spins = 0;
909 1.2 ad #endif /* LOCKDEBUG */
910 1.2 ad
911 1.2 ad MUTEX_WANTLOCK(mtx);
912 1.2 ad
913 1.2 ad LOCKSTAT_ENTER(lsflag);
914 1.2 ad LOCKSTAT_START_TIMER(lsflag, spintime);
915 1.2 ad count = SPINLOCK_BACKOFF_MIN;
916 1.2 ad
917 1.2 ad /*
918 1.2 ad * Spin testing the lock word and do exponential backoff
919 1.2 ad * to reduce cache line ping-ponging between CPUs.
920 1.2 ad */
921 1.2 ad do {
922 1.2 ad if (panicstr != NULL)
923 1.2 ad break;
924 1.16 skrll while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) {
925 1.2 ad SPINLOCK_BACKOFF(count);
926 1.2 ad #ifdef LOCKDEBUG
927 1.2 ad if (SPINLOCK_SPINOUT(spins))
928 1.2 ad MUTEX_ABORT(mtx, "spinout");
929 1.2 ad #endif /* LOCKDEBUG */
930 1.2 ad }
931 1.2 ad } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
932 1.2 ad
933 1.2 ad LOCKSTAT_STOP_TIMER(lsflag, spintime);
934 1.2 ad LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
935 1.2 ad LOCKSTAT_EXIT(lsflag);
936 1.2 ad
937 1.2 ad MUTEX_LOCKED(mtx);
938 1.2 ad #else /* MULTIPROCESSOR */
939 1.2 ad MUTEX_ABORT(mtx, "locking against myself");
940 1.2 ad #endif /* MULTIPROCESSOR */
941 1.2 ad }
942 1.2 ad #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */
943 1.31 ad
944 1.31 ad /*
945 1.31 ad * mutex_obj_init:
946 1.31 ad *
947 1.31 ad * Initialize the mutex object store.
948 1.31 ad */
949 1.31 ad void
950 1.31 ad mutex_obj_init(void)
951 1.31 ad {
952 1.31 ad
953 1.31 ad mutex_obj_cache = pool_cache_init(sizeof(struct kmutexobj),
954 1.31 ad coherency_unit, 0, 0, "mutex", NULL, IPL_NONE, mutex_obj_ctor,
955 1.31 ad NULL, NULL);
956 1.31 ad }
957 1.31 ad
958 1.31 ad /*
959 1.31 ad * mutex_obj_ctor:
960 1.31 ad *
961 1.31 ad * Initialize a new lock for the cache.
962 1.31 ad */
963 1.31 ad static int
964 1.31 ad mutex_obj_ctor(void *arg, void *obj, int flags)
965 1.31 ad {
966 1.31 ad struct kmutexobj * mo = obj;
967 1.31 ad
968 1.31 ad mo->mo_magic = MUTEX_OBJ_MAGIC;
969 1.31 ad
970 1.31 ad return 0;
971 1.31 ad }
972 1.31 ad
973 1.31 ad /*
974 1.31 ad * mutex_obj_alloc:
975 1.31 ad *
976 1.31 ad * Allocate a single lock object.
977 1.31 ad */
978 1.31 ad kmutex_t *
979 1.31 ad mutex_obj_alloc(kmutex_type_t type, int ipl)
980 1.31 ad {
981 1.31 ad struct kmutexobj *mo;
982 1.31 ad
983 1.31 ad mo = pool_cache_get(mutex_obj_cache, PR_WAITOK);
984 1.31 ad mutex_init(&mo->mo_lock, type, ipl);
985 1.31 ad mo->mo_refcnt = 1;
986 1.31 ad
987 1.31 ad return (kmutex_t *)mo;
988 1.31 ad }
989 1.31 ad
990 1.31 ad /*
991 1.31 ad * mutex_obj_hold:
992 1.31 ad *
993 1.31 ad * Add a single reference to a lock object. A reference to the object
994 1.31 ad * must already be held, and must be held across this call.
995 1.31 ad */
996 1.31 ad void
997 1.31 ad mutex_obj_hold(kmutex_t *lock)
998 1.31 ad {
999 1.31 ad struct kmutexobj *mo = (struct kmutexobj *)lock;
1000 1.31 ad
1001 1.31 ad KASSERT(mo->mo_magic == MUTEX_OBJ_MAGIC);
1002 1.31 ad KASSERT(mo->mo_refcnt > 0);
1003 1.31 ad
1004 1.31 ad atomic_inc_uint(&mo->mo_refcnt);
1005 1.31 ad }
1006 1.31 ad
1007 1.31 ad /*
1008 1.31 ad * mutex_obj_free:
1009 1.31 ad *
1010 1.31 ad * Drop a reference from a lock object. If the last reference is being
1011 1.31 ad * dropped, free the object and return true. Otherwise, return false.
1012 1.31 ad */
1013 1.31 ad bool
1014 1.31 ad mutex_obj_free(kmutex_t *lock)
1015 1.31 ad {
1016 1.31 ad struct kmutexobj *mo = (struct kmutexobj *)lock;
1017 1.31 ad
1018 1.31 ad KASSERT(mo->mo_magic == MUTEX_OBJ_MAGIC);
1019 1.31 ad KASSERT(mo->mo_refcnt > 0);
1020 1.31 ad
1021 1.31 ad if (atomic_dec_uint_nv(&mo->mo_refcnt) > 0) {
1022 1.31 ad return false;
1023 1.31 ad }
1024 1.31 ad mutex_destroy(&mo->mo_lock);
1025 1.31 ad pool_cache_put(mutex_obj_cache, mo);
1026 1.31 ad return true;
1027 1.31 ad }
1028