kern_mutex.c revision 1.30 1 /* $NetBSD: kern_mutex.c,v 1.30 2008/01/05 12:31:39 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Kernel mutex implementation, modeled after those found in Solaris,
41 * a description of which can be found in:
42 *
43 * Solaris Internals: Core Kernel Architecture, Jim Mauro and
44 * Richard McDougall.
45 */
46
47 #define __MUTEX_PRIVATE
48
49 #include <sys/cdefs.h>
50 __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.30 2008/01/05 12:31:39 ad Exp $");
51
52 #include "opt_multiprocessor.h"
53
54 #include <sys/param.h>
55 #include <sys/proc.h>
56 #include <sys/mutex.h>
57 #include <sys/sched.h>
58 #include <sys/sleepq.h>
59 #include <sys/systm.h>
60 #include <sys/lockdebug.h>
61 #include <sys/kernel.h>
62 #include <sys/atomic.h>
63 #include <sys/intr.h>
64 #include <sys/lock.h>
65
66 #include <dev/lockstat.h>
67
68 #include <machine/lock.h>
69
70 /*
71 * When not running a debug kernel, spin mutexes are not much
72 * more than an splraiseipl() and splx() pair.
73 */
74
75 #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
76 #define FULL
77 #endif
78
79 /*
80 * Debugging support.
81 */
82
83 #define MUTEX_WANTLOCK(mtx) \
84 LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \
85 (uintptr_t)__builtin_return_address(0), 0)
86 #define MUTEX_LOCKED(mtx) \
87 LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), \
88 (uintptr_t)__builtin_return_address(0), 0)
89 #define MUTEX_UNLOCKED(mtx) \
90 LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), \
91 (uintptr_t)__builtin_return_address(0), 0)
92 #define MUTEX_ABORT(mtx, msg) \
93 mutex_abort(mtx, __func__, msg)
94
95 #if defined(LOCKDEBUG)
96
97 #define MUTEX_DASSERT(mtx, cond) \
98 do { \
99 if (!(cond)) \
100 MUTEX_ABORT(mtx, "assertion failed: " #cond); \
101 } while (/* CONSTCOND */ 0);
102
103 #else /* LOCKDEBUG */
104
105 #define MUTEX_DASSERT(mtx, cond) /* nothing */
106
107 #endif /* LOCKDEBUG */
108
109 #if defined(DIAGNOSTIC)
110
111 #define MUTEX_ASSERT(mtx, cond) \
112 do { \
113 if (!(cond)) \
114 MUTEX_ABORT(mtx, "assertion failed: " #cond); \
115 } while (/* CONSTCOND */ 0)
116
117 #else /* DIAGNOSTIC */
118
119 #define MUTEX_ASSERT(mtx, cond) /* nothing */
120
121 #endif /* DIAGNOSTIC */
122
123 /*
124 * Spin mutex SPL save / restore.
125 */
126 #ifndef MUTEX_COUNT_BIAS
127 #define MUTEX_COUNT_BIAS 0
128 #endif
129
130 #define MUTEX_SPIN_SPLRAISE(mtx) \
131 do { \
132 struct cpu_info *x__ci = curcpu(); \
133 int x__cnt, s; \
134 x__cnt = x__ci->ci_mtx_count--; \
135 s = splraiseipl(mtx->mtx_ipl); \
136 if (x__cnt == MUTEX_COUNT_BIAS) \
137 x__ci->ci_mtx_oldspl = (s); \
138 } while (/* CONSTCOND */ 0)
139
140 #define MUTEX_SPIN_SPLRESTORE(mtx) \
141 do { \
142 struct cpu_info *x__ci = curcpu(); \
143 int s = x__ci->ci_mtx_oldspl; \
144 __insn_barrier(); \
145 if (++(x__ci->ci_mtx_count) == MUTEX_COUNT_BIAS) \
146 splx(s); \
147 } while (/* CONSTCOND */ 0)
148
149 /*
150 * For architectures that provide 'simple' mutexes: they provide a
151 * CAS function that is either MP-safe, or does not need to be MP
152 * safe. Adaptive mutexes on these architectures do not require an
153 * additional interlock.
154 */
155
156 #ifdef __HAVE_SIMPLE_MUTEXES
157
158 #define MUTEX_OWNER(owner) \
159 (owner & MUTEX_THREAD)
160 #define MUTEX_HAS_WAITERS(mtx) \
161 (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0)
162
163 #define MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug) \
164 do { \
165 if (dodebug) \
166 (mtx)->mtx_owner |= MUTEX_BIT_DEBUG; \
167 } while (/* CONSTCOND */ 0);
168
169 #define MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl) \
170 do { \
171 (mtx)->mtx_owner = MUTEX_BIT_SPIN; \
172 if (dodebug) \
173 (mtx)->mtx_owner |= MUTEX_BIT_DEBUG; \
174 (mtx)->mtx_ipl = makeiplcookie((ipl)); \
175 __cpu_simple_lock_init(&(mtx)->mtx_lock); \
176 } while (/* CONSTCOND */ 0)
177
178 #define MUTEX_DESTROY(mtx) \
179 do { \
180 (mtx)->mtx_owner = MUTEX_THREAD; \
181 } while (/* CONSTCOND */ 0);
182
183 #define MUTEX_SPIN_P(mtx) \
184 (((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0)
185 #define MUTEX_ADAPTIVE_P(mtx) \
186 (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0)
187
188 #define MUTEX_DEBUG_P(mtx) (((mtx)->mtx_owner & MUTEX_BIT_DEBUG) != 0)
189 #if defined(LOCKDEBUG)
190 #define MUTEX_OWNED(owner) (((owner) & ~MUTEX_BIT_DEBUG) != 0)
191 #define MUTEX_INHERITDEBUG(new, old) (new) |= (old) & MUTEX_BIT_DEBUG
192 #else /* defined(LOCKDEBUG) */
193 #define MUTEX_OWNED(owner) ((owner) != 0)
194 #define MUTEX_INHERITDEBUG(new, old) /* nothing */
195 #endif /* defined(LOCKDEBUG) */
196
197 static inline int
198 MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
199 {
200 int rv;
201 uintptr_t old = 0;
202 uintptr_t new = curthread;
203
204 MUTEX_INHERITDEBUG(old, mtx->mtx_owner);
205 MUTEX_INHERITDEBUG(new, old);
206 rv = MUTEX_CAS(&mtx->mtx_owner, old, new);
207 MUTEX_RECEIVE(mtx);
208 return rv;
209 }
210
211 static inline int
212 MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
213 {
214 int rv;
215 rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS);
216 MUTEX_RECEIVE(mtx);
217 return rv;
218 }
219
220 static inline void
221 MUTEX_RELEASE(kmutex_t *mtx)
222 {
223 uintptr_t new;
224
225 MUTEX_GIVE(mtx);
226 new = 0;
227 MUTEX_INHERITDEBUG(new, mtx->mtx_owner);
228 mtx->mtx_owner = new;
229 }
230
231 static inline void
232 MUTEX_CLEAR_WAITERS(kmutex_t *mtx)
233 {
234 /* nothing */
235 }
236 #endif /* __HAVE_SIMPLE_MUTEXES */
237
238 /*
239 * Patch in stubs via strong alias where they are not available.
240 */
241
242 #if defined(LOCKDEBUG)
243 #undef __HAVE_MUTEX_STUBS
244 #undef __HAVE_SPIN_MUTEX_STUBS
245 #endif
246
247 #ifndef __HAVE_MUTEX_STUBS
248 __strong_alias(mutex_enter,mutex_vector_enter);
249 __strong_alias(mutex_exit,mutex_vector_exit);
250 #endif
251
252 #ifndef __HAVE_SPIN_MUTEX_STUBS
253 __strong_alias(mutex_spin_enter,mutex_vector_enter);
254 __strong_alias(mutex_spin_exit,mutex_vector_exit);
255 #endif
256
257 void mutex_abort(kmutex_t *, const char *, const char *);
258 void mutex_dump(volatile void *);
259 int mutex_onproc(uintptr_t, struct cpu_info **);
260
261 lockops_t mutex_spin_lockops = {
262 "Mutex",
263 0,
264 mutex_dump
265 };
266
267 lockops_t mutex_adaptive_lockops = {
268 "Mutex",
269 1,
270 mutex_dump
271 };
272
273 syncobj_t mutex_syncobj = {
274 SOBJ_SLEEPQ_SORTED,
275 turnstile_unsleep,
276 turnstile_changepri,
277 sleepq_lendpri,
278 (void *)mutex_owner,
279 };
280
281 /*
282 * mutex_dump:
283 *
284 * Dump the contents of a mutex structure.
285 */
286 void
287 mutex_dump(volatile void *cookie)
288 {
289 volatile kmutex_t *mtx = cookie;
290
291 printf_nolog("owner field : %#018lx wait/spin: %16d/%d\n",
292 (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx),
293 MUTEX_SPIN_P(mtx));
294 }
295
296 /*
297 * mutex_abort:
298 *
299 * Dump information about an error and panic the system. This
300 * generates a lot of machine code in the DIAGNOSTIC case, so
301 * we ask the compiler to not inline it.
302 */
303
304 #if __GNUC_PREREQ__(3, 0)
305 __attribute ((noinline)) __attribute ((noreturn))
306 #endif
307 void
308 mutex_abort(kmutex_t *mtx, const char *func, const char *msg)
309 {
310
311 LOCKDEBUG_ABORT(mtx, (MUTEX_SPIN_P(mtx) ?
312 &mutex_spin_lockops : &mutex_adaptive_lockops), func, msg);
313 /* NOTREACHED */
314 }
315
316 /*
317 * mutex_init:
318 *
319 * Initialize a mutex for use. Note that adaptive mutexes are in
320 * essence spin mutexes that can sleep to avoid deadlock and wasting
321 * CPU time. We can't easily provide a type of mutex that always
322 * sleeps - see comments in mutex_vector_enter() about releasing
323 * mutexes unlocked.
324 */
325 void
326 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
327 {
328 bool dodebug;
329
330 memset(mtx, 0, sizeof(*mtx));
331
332 switch (type) {
333 case MUTEX_ADAPTIVE:
334 KASSERT(ipl == IPL_NONE);
335 break;
336 case MUTEX_DEFAULT:
337 case MUTEX_DRIVER:
338 if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
339 ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
340 ipl == IPL_SOFTSERIAL) {
341 type = MUTEX_ADAPTIVE;
342 } else {
343 type = MUTEX_SPIN;
344 }
345 break;
346 default:
347 break;
348 }
349
350 switch (type) {
351 case MUTEX_NODEBUG:
352 dodebug = LOCKDEBUG_ALLOC(mtx, NULL,
353 (uintptr_t)__builtin_return_address(0));
354 MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
355 break;
356 case MUTEX_ADAPTIVE:
357 dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops,
358 (uintptr_t)__builtin_return_address(0));
359 MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug);
360 break;
361 case MUTEX_SPIN:
362 dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops,
363 (uintptr_t)__builtin_return_address(0));
364 MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
365 break;
366 default:
367 panic("mutex_init: impossible type");
368 break;
369 }
370 }
371
372 /*
373 * mutex_destroy:
374 *
375 * Tear down a mutex.
376 */
377 void
378 mutex_destroy(kmutex_t *mtx)
379 {
380
381 if (MUTEX_ADAPTIVE_P(mtx)) {
382 MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) &&
383 !MUTEX_HAS_WAITERS(mtx));
384 } else {
385 MUTEX_ASSERT(mtx, !__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock));
386 }
387
388 LOCKDEBUG_FREE(MUTEX_DEBUG_P(mtx), mtx);
389 MUTEX_DESTROY(mtx);
390 }
391
392 /*
393 * mutex_onproc:
394 *
395 * Return true if an adaptive mutex owner is running on a CPU in the
396 * system. If the target is waiting on the kernel big lock, then we
397 * must release it. This is necessary to avoid deadlock.
398 *
399 * Note that we can't use the mutex owner field as an LWP pointer. We
400 * don't have full control over the timing of our execution, and so the
401 * pointer could be completely invalid by the time we dereference it.
402 */
403 #ifdef MULTIPROCESSOR
404 int
405 mutex_onproc(uintptr_t owner, struct cpu_info **cip)
406 {
407 CPU_INFO_ITERATOR cii;
408 struct cpu_info *ci;
409 struct lwp *l;
410
411 if (!MUTEX_OWNED(owner))
412 return 0;
413 l = (struct lwp *)MUTEX_OWNER(owner);
414
415 /* See if the target is running on a CPU somewhere. */
416 if ((ci = *cip) != NULL && ci->ci_curlwp == l)
417 goto run;
418 for (CPU_INFO_FOREACH(cii, ci))
419 if (ci->ci_curlwp == l)
420 goto run;
421
422 /* No: it may be safe to block now. */
423 *cip = NULL;
424 return 0;
425
426 run:
427 /* Target is running; do we need to block? */
428 *cip = ci;
429 return ci->ci_biglock_wanted != l;
430 }
431 #endif /* MULTIPROCESSOR */
432
433 /*
434 * mutex_vector_enter:
435 *
436 * Support routine for mutex_enter() that must handles all cases. In
437 * the LOCKDEBUG case, mutex_enter() is always aliased here, even if
438 * fast-path stubs are available. If an mutex_spin_enter() stub is
439 * not available, then it is also aliased directly here.
440 */
441 void
442 mutex_vector_enter(kmutex_t *mtx)
443 {
444 uintptr_t owner, curthread;
445 turnstile_t *ts;
446 #ifdef MULTIPROCESSOR
447 struct cpu_info *ci = NULL;
448 u_int count;
449 #endif
450 LOCKSTAT_COUNTER(spincnt);
451 LOCKSTAT_COUNTER(slpcnt);
452 LOCKSTAT_TIMER(spintime);
453 LOCKSTAT_TIMER(slptime);
454 LOCKSTAT_FLAG(lsflag);
455
456 /*
457 * Handle spin mutexes.
458 */
459 if (MUTEX_SPIN_P(mtx)) {
460 #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR)
461 u_int spins = 0;
462 #endif
463 MUTEX_SPIN_SPLRAISE(mtx);
464 MUTEX_WANTLOCK(mtx);
465 #ifdef FULL
466 if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
467 MUTEX_LOCKED(mtx);
468 return;
469 }
470 #if !defined(MULTIPROCESSOR)
471 MUTEX_ABORT(mtx, "locking against myself");
472 #else /* !MULTIPROCESSOR */
473
474 LOCKSTAT_ENTER(lsflag);
475 LOCKSTAT_START_TIMER(lsflag, spintime);
476 count = SPINLOCK_BACKOFF_MIN;
477
478 /*
479 * Spin testing the lock word and do exponential backoff
480 * to reduce cache line ping-ponging between CPUs.
481 */
482 do {
483 if (panicstr != NULL)
484 break;
485 while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) {
486 SPINLOCK_BACKOFF(count);
487 #ifdef LOCKDEBUG
488 if (SPINLOCK_SPINOUT(spins))
489 MUTEX_ABORT(mtx, "spinout");
490 #endif /* LOCKDEBUG */
491 }
492 } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
493
494 if (count != SPINLOCK_BACKOFF_MIN) {
495 LOCKSTAT_STOP_TIMER(lsflag, spintime);
496 LOCKSTAT_EVENT(lsflag, mtx,
497 LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
498 }
499 LOCKSTAT_EXIT(lsflag);
500 #endif /* !MULTIPROCESSOR */
501 #endif /* FULL */
502 MUTEX_LOCKED(mtx);
503 return;
504 }
505
506 curthread = (uintptr_t)curlwp;
507
508 MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
509 MUTEX_ASSERT(mtx, curthread != 0);
510 MUTEX_WANTLOCK(mtx);
511
512 if (panicstr == NULL) {
513 LOCKDEBUG_BARRIER(&kernel_lock, 1);
514 }
515
516 LOCKSTAT_ENTER(lsflag);
517
518 /*
519 * Adaptive mutex; spin trying to acquire the mutex. If we
520 * determine that the owner is not running on a processor,
521 * then we stop spinning, and sleep instead.
522 */
523 for (;;) {
524 owner = mtx->mtx_owner;
525 if (!MUTEX_OWNED(owner)) {
526 /*
527 * Mutex owner clear could mean two things:
528 *
529 * * The mutex has been released.
530 * * The owner field hasn't been set yet.
531 *
532 * Try to acquire it again. If that fails,
533 * we'll just loop again.
534 */
535 if (MUTEX_ACQUIRE(mtx, curthread))
536 break;
537 continue;
538 }
539
540 if (panicstr != NULL)
541 return;
542 if (MUTEX_OWNER(owner) == curthread)
543 MUTEX_ABORT(mtx, "locking against myself");
544
545 #ifdef MULTIPROCESSOR
546 /*
547 * Check to see if the owner is running on a processor.
548 * If so, then we should just spin, as the owner will
549 * likely release the lock very soon.
550 */
551 if (mutex_onproc(owner, &ci)) {
552 LOCKSTAT_START_TIMER(lsflag, spintime);
553 count = SPINLOCK_BACKOFF_MIN;
554 for (;;) {
555 owner = mtx->mtx_owner;
556 if (!mutex_onproc(owner, &ci))
557 break;
558 SPINLOCK_BACKOFF(count);
559 }
560 LOCKSTAT_STOP_TIMER(lsflag, spintime);
561 LOCKSTAT_COUNT(spincnt, 1);
562 if (!MUTEX_OWNED(owner))
563 continue;
564 }
565 #endif
566
567 ts = turnstile_lookup(mtx);
568
569 /*
570 * Once we have the turnstile chain interlock, mark the
571 * mutex has having waiters. If that fails, spin again:
572 * chances are that the mutex has been released.
573 */
574 if (!MUTEX_SET_WAITERS(mtx, owner)) {
575 turnstile_exit(mtx);
576 continue;
577 }
578
579 #ifdef MULTIPROCESSOR
580 /*
581 * mutex_exit() is permitted to release the mutex without
582 * any interlocking instructions, and the following can
583 * occur as a result:
584 *
585 * CPU 1: MUTEX_SET_WAITERS() CPU2: mutex_exit()
586 * ---------------------------- ----------------------------
587 * .. acquire cache line
588 * .. test for waiters
589 * acquire cache line <- lose cache line
590 * lock cache line ..
591 * verify mutex is held ..
592 * set waiters ..
593 * unlock cache line ..
594 * lose cache line -> acquire cache line
595 * .. clear lock word, waiters
596 * return success
597 *
598 * There is a another race that can occur: a third CPU could
599 * acquire the mutex as soon as it is released. Since
600 * adaptive mutexes are primarily spin mutexes, this is not
601 * something that we need to worry about too much. What we
602 * do need to ensure is that the waiters bit gets set.
603 *
604 * To allow the unlocked release, we need to make some
605 * assumptions here:
606 *
607 * o Release is the only non-atomic/unlocked operation
608 * that can be performed on the mutex. (It must still
609 * be atomic on the local CPU, e.g. in case interrupted
610 * or preempted).
611 *
612 * o At any given time, MUTEX_SET_WAITERS() can only ever
613 * be in progress on one CPU in the system - guaranteed
614 * by the turnstile chain lock.
615 *
616 * o No other operations other than MUTEX_SET_WAITERS()
617 * and release can modify a mutex with a non-zero
618 * owner field.
619 *
620 * o The result of a successful MUTEX_SET_WAITERS() call
621 * is an unbuffered write that is immediately visible
622 * to all other processors in the system.
623 *
624 * o If the holding LWP switches away, it posts a store
625 * fence before changing curlwp, ensuring that any
626 * overwrite of the mutex waiters flag by mutex_exit()
627 * completes before the modification of curlwp becomes
628 * visible to this CPU.
629 *
630 * o mi_switch() posts a store fence before setting curlwp
631 * and before resuming execution of an LWP.
632 *
633 * o _kernel_lock() posts a store fence before setting
634 * curcpu()->ci_biglock_wanted, and after clearing it.
635 * This ensures that any overwrite of the mutex waiters
636 * flag by mutex_exit() completes before the modification
637 * of ci_biglock_wanted becomes visible.
638 *
639 * We now post a read memory barrier (after setting the
640 * waiters field) and check the lock holder's status again.
641 * Some of the possible outcomes (not an exhaustive list):
642 *
643 * 1. The onproc check returns true: the holding LWP is
644 * running again. The lock may be released soon and
645 * we should spin. Importantly, we can't trust the
646 * value of the waiters flag.
647 *
648 * 2. The onproc check returns false: the holding LWP is
649 * not running. We now have the oppertunity to check
650 * if mutex_exit() has blatted the modifications made
651 * by MUTEX_SET_WAITERS().
652 *
653 * 3. The onproc check returns false: the holding LWP may
654 * or may not be running. It has context switched at
655 * some point during our check. Again, we have the
656 * chance to see if the waiters bit is still set or
657 * has been overwritten.
658 *
659 * 4. The onproc check returns false: the holding LWP is
660 * running on a CPU, but wants the big lock. It's OK
661 * to check the waiters field in this case.
662 *
663 * 5. The has-waiters check fails: the mutex has been
664 * released, the waiters flag cleared and another LWP
665 * now owns the mutex.
666 *
667 * 6. The has-waiters check fails: the mutex has been
668 * released.
669 *
670 * If the waiters bit is not set it's unsafe to go asleep,
671 * as we might never be awoken.
672 */
673 if ((membar_consumer(), mutex_onproc(owner, &ci)) ||
674 (membar_consumer(), !MUTEX_HAS_WAITERS(mtx))) {
675 turnstile_exit(mtx);
676 continue;
677 }
678 #endif /* MULTIPROCESSOR */
679
680 LOCKSTAT_START_TIMER(lsflag, slptime);
681
682 turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj);
683
684 LOCKSTAT_STOP_TIMER(lsflag, slptime);
685 LOCKSTAT_COUNT(slpcnt, 1);
686 }
687
688 LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1,
689 slpcnt, slptime);
690 LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN,
691 spincnt, spintime);
692 LOCKSTAT_EXIT(lsflag);
693
694 MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
695 MUTEX_LOCKED(mtx);
696 }
697
698 /*
699 * mutex_vector_exit:
700 *
701 * Support routine for mutex_exit() that handles all cases.
702 */
703 void
704 mutex_vector_exit(kmutex_t *mtx)
705 {
706 turnstile_t *ts;
707 uintptr_t curthread;
708
709 if (MUTEX_SPIN_P(mtx)) {
710 #ifdef FULL
711 if (!__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock))
712 MUTEX_ABORT(mtx, "exiting unheld spin mutex");
713 MUTEX_UNLOCKED(mtx);
714 __cpu_simple_unlock(&mtx->mtx_lock);
715 #endif
716 MUTEX_SPIN_SPLRESTORE(mtx);
717 return;
718 }
719
720 if (__predict_false((uintptr_t)panicstr | cold)) {
721 MUTEX_UNLOCKED(mtx);
722 MUTEX_RELEASE(mtx);
723 return;
724 }
725
726 curthread = (uintptr_t)curlwp;
727 MUTEX_DASSERT(mtx, curthread != 0);
728 MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
729 MUTEX_UNLOCKED(mtx);
730
731 #ifdef LOCKDEBUG
732 /*
733 * Avoid having to take the turnstile chain lock every time
734 * around. Raise the priority level to splhigh() in order
735 * to disable preemption and so make the following atomic.
736 */
737 {
738 int s = splhigh();
739 if (!MUTEX_HAS_WAITERS(mtx)) {
740 MUTEX_RELEASE(mtx);
741 splx(s);
742 return;
743 }
744 splx(s);
745 }
746 #endif
747
748 /*
749 * Get this lock's turnstile. This gets the interlock on
750 * the sleep queue. Once we have that, we can clear the
751 * lock. If there was no turnstile for the lock, there
752 * were no waiters remaining.
753 */
754 ts = turnstile_lookup(mtx);
755
756 if (ts == NULL) {
757 MUTEX_RELEASE(mtx);
758 turnstile_exit(mtx);
759 } else {
760 MUTEX_RELEASE(mtx);
761 turnstile_wakeup(ts, TS_WRITER_Q,
762 TS_WAITERS(ts, TS_WRITER_Q), NULL);
763 }
764 }
765
766 #ifndef __HAVE_SIMPLE_MUTEXES
767 /*
768 * mutex_wakeup:
769 *
770 * Support routine for mutex_exit() that wakes up all waiters.
771 * We assume that the mutex has been released, but it need not
772 * be.
773 */
774 void
775 mutex_wakeup(kmutex_t *mtx)
776 {
777 turnstile_t *ts;
778
779 ts = turnstile_lookup(mtx);
780 if (ts == NULL) {
781 turnstile_exit(mtx);
782 return;
783 }
784 MUTEX_CLEAR_WAITERS(mtx);
785 turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL);
786 }
787 #endif /* !__HAVE_SIMPLE_MUTEXES */
788
789 /*
790 * mutex_owned:
791 *
792 * Return true if the current LWP (adaptive) or CPU (spin)
793 * holds the mutex.
794 */
795 int
796 mutex_owned(kmutex_t *mtx)
797 {
798
799 if (MUTEX_ADAPTIVE_P(mtx))
800 return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp;
801 #ifdef FULL
802 return __SIMPLELOCK_LOCKED_P(&mtx->mtx_lock);
803 #else
804 return 1;
805 #endif
806 }
807
808 /*
809 * mutex_owner:
810 *
811 * Return the current owner of an adaptive mutex. Used for
812 * priority inheritance.
813 */
814 lwp_t *
815 mutex_owner(kmutex_t *mtx)
816 {
817
818 MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
819 return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner);
820 }
821
822 /*
823 * mutex_tryenter:
824 *
825 * Try to acquire the mutex; return non-zero if we did.
826 */
827 int
828 mutex_tryenter(kmutex_t *mtx)
829 {
830 uintptr_t curthread;
831
832 /*
833 * Handle spin mutexes.
834 */
835 if (MUTEX_SPIN_P(mtx)) {
836 MUTEX_SPIN_SPLRAISE(mtx);
837 #ifdef FULL
838 if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
839 MUTEX_WANTLOCK(mtx);
840 MUTEX_LOCKED(mtx);
841 return 1;
842 }
843 MUTEX_SPIN_SPLRESTORE(mtx);
844 #else
845 MUTEX_WANTLOCK(mtx);
846 MUTEX_LOCKED(mtx);
847 return 1;
848 #endif
849 } else {
850 curthread = (uintptr_t)curlwp;
851 MUTEX_ASSERT(mtx, curthread != 0);
852 if (MUTEX_ACQUIRE(mtx, curthread)) {
853 MUTEX_WANTLOCK(mtx);
854 MUTEX_LOCKED(mtx);
855 MUTEX_DASSERT(mtx,
856 MUTEX_OWNER(mtx->mtx_owner) == curthread);
857 return 1;
858 }
859 }
860
861 return 0;
862 }
863
864 #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL)
865 /*
866 * mutex_spin_retry:
867 *
868 * Support routine for mutex_spin_enter(). Assumes that the caller
869 * has already raised the SPL, and adjusted counters.
870 */
871 void
872 mutex_spin_retry(kmutex_t *mtx)
873 {
874 #ifdef MULTIPROCESSOR
875 u_int count;
876 LOCKSTAT_TIMER(spintime);
877 LOCKSTAT_FLAG(lsflag);
878 #ifdef LOCKDEBUG
879 u_int spins = 0;
880 #endif /* LOCKDEBUG */
881
882 MUTEX_WANTLOCK(mtx);
883
884 LOCKSTAT_ENTER(lsflag);
885 LOCKSTAT_START_TIMER(lsflag, spintime);
886 count = SPINLOCK_BACKOFF_MIN;
887
888 /*
889 * Spin testing the lock word and do exponential backoff
890 * to reduce cache line ping-ponging between CPUs.
891 */
892 do {
893 if (panicstr != NULL)
894 break;
895 while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) {
896 SPINLOCK_BACKOFF(count);
897 #ifdef LOCKDEBUG
898 if (SPINLOCK_SPINOUT(spins))
899 MUTEX_ABORT(mtx, "spinout");
900 #endif /* LOCKDEBUG */
901 }
902 } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
903
904 LOCKSTAT_STOP_TIMER(lsflag, spintime);
905 LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
906 LOCKSTAT_EXIT(lsflag);
907
908 MUTEX_LOCKED(mtx);
909 #else /* MULTIPROCESSOR */
910 MUTEX_ABORT(mtx, "locking against myself");
911 #endif /* MULTIPROCESSOR */
912 }
913 #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */
914