kern_mutex.c revision 1.59 1 /* $NetBSD: kern_mutex.c,v 1.59 2014/09/05 05:57:21 matt Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Kernel mutex implementation, modeled after those found in Solaris,
34 * a description of which can be found in:
35 *
36 * Solaris Internals: Core Kernel Architecture, Jim Mauro and
37 * Richard McDougall.
38 */
39
40 #define __MUTEX_PRIVATE
41
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.59 2014/09/05 05:57:21 matt Exp $");
44
45 #include <sys/param.h>
46 #include <sys/atomic.h>
47 #include <sys/proc.h>
48 #include <sys/mutex.h>
49 #include <sys/sched.h>
50 #include <sys/sleepq.h>
51 #include <sys/systm.h>
52 #include <sys/lockdebug.h>
53 #include <sys/kernel.h>
54 #include <sys/intr.h>
55 #include <sys/lock.h>
56 #include <sys/types.h>
57
58 #include <dev/lockstat.h>
59
60 #include <machine/lock.h>
61
62 /*
63 * When not running a debug kernel, spin mutexes are not much
64 * more than an splraiseipl() and splx() pair.
65 */
66
67 #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
68 #define FULL
69 #endif
70
71 /*
72 * Debugging support.
73 */
74
75 #define MUTEX_WANTLOCK(mtx) \
76 LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \
77 (uintptr_t)__builtin_return_address(0), 0)
78 #define MUTEX_LOCKED(mtx) \
79 LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), NULL, \
80 (uintptr_t)__builtin_return_address(0), 0)
81 #define MUTEX_UNLOCKED(mtx) \
82 LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), \
83 (uintptr_t)__builtin_return_address(0), 0)
84 #define MUTEX_ABORT(mtx, msg) \
85 mutex_abort(mtx, __func__, msg)
86
87 #if defined(LOCKDEBUG)
88
89 #define MUTEX_DASSERT(mtx, cond) \
90 do { \
91 if (!(cond)) \
92 MUTEX_ABORT(mtx, "assertion failed: " #cond); \
93 } while (/* CONSTCOND */ 0);
94
95 #else /* LOCKDEBUG */
96
97 #define MUTEX_DASSERT(mtx, cond) /* nothing */
98
99 #endif /* LOCKDEBUG */
100
101 #if defined(DIAGNOSTIC)
102
103 #define MUTEX_ASSERT(mtx, cond) \
104 do { \
105 if (!(cond)) \
106 MUTEX_ABORT(mtx, "assertion failed: " #cond); \
107 } while (/* CONSTCOND */ 0)
108
109 #else /* DIAGNOSTIC */
110
111 #define MUTEX_ASSERT(mtx, cond) /* nothing */
112
113 #endif /* DIAGNOSTIC */
114
115 /*
116 * Spin mutex SPL save / restore.
117 */
118
119 #define MUTEX_SPIN_SPLRAISE(mtx) \
120 do { \
121 struct cpu_info *x__ci; \
122 int x__cnt, s; \
123 s = splraiseipl(mtx->mtx_ipl); \
124 x__ci = curcpu(); \
125 x__cnt = x__ci->ci_mtx_count--; \
126 __insn_barrier(); \
127 if (x__cnt == 0) \
128 x__ci->ci_mtx_oldspl = (s); \
129 } while (/* CONSTCOND */ 0)
130
131 #define MUTEX_SPIN_SPLRESTORE(mtx) \
132 do { \
133 struct cpu_info *x__ci = curcpu(); \
134 int s = x__ci->ci_mtx_oldspl; \
135 __insn_barrier(); \
136 if (++(x__ci->ci_mtx_count) == 0) \
137 splx(s); \
138 } while (/* CONSTCOND */ 0)
139
140 /*
141 * For architectures that provide 'simple' mutexes: they provide a
142 * CAS function that is either MP-safe, or does not need to be MP
143 * safe. Adaptive mutexes on these architectures do not require an
144 * additional interlock.
145 */
146
147 #ifdef __HAVE_SIMPLE_MUTEXES
148
149 #define MUTEX_OWNER(owner) \
150 (owner & MUTEX_THREAD)
151 #define MUTEX_HAS_WAITERS(mtx) \
152 (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0)
153
154 #define MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug) \
155 if (!dodebug) \
156 (mtx)->mtx_owner |= MUTEX_BIT_NODEBUG; \
157 do { \
158 } while (/* CONSTCOND */ 0);
159
160 #define MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl) \
161 do { \
162 (mtx)->mtx_owner = MUTEX_BIT_SPIN; \
163 if (!dodebug) \
164 (mtx)->mtx_owner |= MUTEX_BIT_NODEBUG; \
165 (mtx)->mtx_ipl = makeiplcookie((ipl)); \
166 __cpu_simple_lock_init(&(mtx)->mtx_lock); \
167 } while (/* CONSTCOND */ 0)
168
169 #define MUTEX_DESTROY(mtx) \
170 do { \
171 (mtx)->mtx_owner = MUTEX_THREAD; \
172 } while (/* CONSTCOND */ 0);
173
174 #define MUTEX_SPIN_P(mtx) \
175 (((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0)
176 #define MUTEX_ADAPTIVE_P(mtx) \
177 (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0)
178
179 #define MUTEX_DEBUG_P(mtx) (((mtx)->mtx_owner & MUTEX_BIT_NODEBUG) == 0)
180 #if defined(LOCKDEBUG)
181 #define MUTEX_OWNED(owner) (((owner) & ~MUTEX_BIT_NODEBUG) != 0)
182 #define MUTEX_INHERITDEBUG(n, o) (n) |= (o) & MUTEX_BIT_NODEBUG
183 #else /* defined(LOCKDEBUG) */
184 #define MUTEX_OWNED(owner) ((owner) != 0)
185 #define MUTEX_INHERITDEBUG(n, o) /* nothing */
186 #endif /* defined(LOCKDEBUG) */
187
188 static inline int
189 MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
190 {
191 int rv;
192 uintptr_t oldown = 0;
193 uintptr_t newown = curthread;
194
195 MUTEX_INHERITDEBUG(oldown, mtx->mtx_owner);
196 MUTEX_INHERITDEBUG(newown, oldown);
197 rv = MUTEX_CAS(&mtx->mtx_owner, oldown, newown);
198 MUTEX_RECEIVE(mtx);
199 return rv;
200 }
201
202 static inline int
203 MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
204 {
205 int rv;
206 rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS);
207 MUTEX_RECEIVE(mtx);
208 return rv;
209 }
210
211 static inline void
212 MUTEX_RELEASE(kmutex_t *mtx)
213 {
214 uintptr_t newown;
215
216 MUTEX_GIVE(mtx);
217 newown = 0;
218 MUTEX_INHERITDEBUG(newown, mtx->mtx_owner);
219 mtx->mtx_owner = newown;
220 }
221 #endif /* __HAVE_SIMPLE_MUTEXES */
222
223 /*
224 * Patch in stubs via strong alias where they are not available.
225 */
226
227 #if defined(LOCKDEBUG)
228 #undef __HAVE_MUTEX_STUBS
229 #undef __HAVE_SPIN_MUTEX_STUBS
230 #endif
231
232 #ifndef __HAVE_MUTEX_STUBS
233 __strong_alias(mutex_enter,mutex_vector_enter);
234 __strong_alias(mutex_exit,mutex_vector_exit);
235 #endif
236
237 #ifndef __HAVE_SPIN_MUTEX_STUBS
238 __strong_alias(mutex_spin_enter,mutex_vector_enter);
239 __strong_alias(mutex_spin_exit,mutex_vector_exit);
240 #endif
241
242 static void mutex_abort(kmutex_t *, const char *, const char *);
243 static void mutex_dump(volatile void *);
244
245 lockops_t mutex_spin_lockops = {
246 "Mutex",
247 LOCKOPS_SPIN,
248 mutex_dump
249 };
250
251 lockops_t mutex_adaptive_lockops = {
252 "Mutex",
253 LOCKOPS_SLEEP,
254 mutex_dump
255 };
256
257 syncobj_t mutex_syncobj = {
258 SOBJ_SLEEPQ_SORTED,
259 turnstile_unsleep,
260 turnstile_changepri,
261 sleepq_lendpri,
262 (void *)mutex_owner,
263 };
264
265 /*
266 * mutex_dump:
267 *
268 * Dump the contents of a mutex structure.
269 */
270 void
271 mutex_dump(volatile void *cookie)
272 {
273 volatile kmutex_t *mtx = cookie;
274
275 printf_nolog("owner field : %#018lx wait/spin: %16d/%d\n",
276 (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx),
277 MUTEX_SPIN_P(mtx));
278 }
279
280 /*
281 * mutex_abort:
282 *
283 * Dump information about an error and panic the system. This
284 * generates a lot of machine code in the DIAGNOSTIC case, so
285 * we ask the compiler to not inline it.
286 */
287 void __noinline
288 mutex_abort(kmutex_t *mtx, const char *func, const char *msg)
289 {
290
291 LOCKDEBUG_ABORT(mtx, (MUTEX_SPIN_P(mtx) ?
292 &mutex_spin_lockops : &mutex_adaptive_lockops), func, msg);
293 }
294
295 /*
296 * mutex_init:
297 *
298 * Initialize a mutex for use. Note that adaptive mutexes are in
299 * essence spin mutexes that can sleep to avoid deadlock and wasting
300 * CPU time. We can't easily provide a type of mutex that always
301 * sleeps - see comments in mutex_vector_enter() about releasing
302 * mutexes unlocked.
303 */
304 void
305 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
306 {
307 bool dodebug;
308
309 memset(mtx, 0, sizeof(*mtx));
310
311 switch (type) {
312 case MUTEX_ADAPTIVE:
313 KASSERT(ipl == IPL_NONE);
314 break;
315 case MUTEX_DEFAULT:
316 case MUTEX_DRIVER:
317 if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
318 ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
319 ipl == IPL_SOFTSERIAL) {
320 type = MUTEX_ADAPTIVE;
321 } else {
322 type = MUTEX_SPIN;
323 }
324 break;
325 default:
326 break;
327 }
328
329 switch (type) {
330 case MUTEX_NODEBUG:
331 dodebug = LOCKDEBUG_ALLOC(mtx, NULL,
332 (uintptr_t)__builtin_return_address(0));
333 MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
334 break;
335 case MUTEX_ADAPTIVE:
336 dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops,
337 (uintptr_t)__builtin_return_address(0));
338 MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug);
339 break;
340 case MUTEX_SPIN:
341 dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops,
342 (uintptr_t)__builtin_return_address(0));
343 MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
344 break;
345 default:
346 panic("mutex_init: impossible type");
347 break;
348 }
349 }
350
351 /*
352 * mutex_destroy:
353 *
354 * Tear down a mutex.
355 */
356 void
357 mutex_destroy(kmutex_t *mtx)
358 {
359
360 if (MUTEX_ADAPTIVE_P(mtx)) {
361 MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) &&
362 !MUTEX_HAS_WAITERS(mtx));
363 } else {
364 MUTEX_ASSERT(mtx, !__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock));
365 }
366
367 LOCKDEBUG_FREE(MUTEX_DEBUG_P(mtx), mtx);
368 MUTEX_DESTROY(mtx);
369 }
370
371 #ifdef MULTIPROCESSOR
372 /*
373 * mutex_oncpu:
374 *
375 * Return true if an adaptive mutex owner is running on a CPU in the
376 * system. If the target is waiting on the kernel big lock, then we
377 * must release it. This is necessary to avoid deadlock.
378 */
379 static bool
380 mutex_oncpu(uintptr_t owner)
381 {
382 struct cpu_info *ci;
383 lwp_t *l;
384
385 KASSERT(kpreempt_disabled());
386
387 if (!MUTEX_OWNED(owner)) {
388 return false;
389 }
390
391 /*
392 * See lwp_dtor() why dereference of the LWP pointer is safe.
393 * We must have kernel preemption disabled for that.
394 */
395 l = (lwp_t *)MUTEX_OWNER(owner);
396 ci = l->l_cpu;
397
398 if (ci && ci->ci_curlwp == l) {
399 /* Target is running; do we need to block? */
400 return (ci->ci_biglock_wanted != l);
401 }
402
403 /* Not running. It may be safe to block now. */
404 return false;
405 }
406 #endif /* MULTIPROCESSOR */
407
408 /*
409 * mutex_vector_enter:
410 *
411 * Support routine for mutex_enter() that must handle all cases. In
412 * the LOCKDEBUG case, mutex_enter() is always aliased here, even if
413 * fast-path stubs are available. If an mutex_spin_enter() stub is
414 * not available, then it is also aliased directly here.
415 */
416 void
417 mutex_vector_enter(kmutex_t *mtx)
418 {
419 uintptr_t owner, curthread;
420 turnstile_t *ts;
421 #ifdef MULTIPROCESSOR
422 u_int count;
423 #endif
424 LOCKSTAT_COUNTER(spincnt);
425 LOCKSTAT_COUNTER(slpcnt);
426 LOCKSTAT_TIMER(spintime);
427 LOCKSTAT_TIMER(slptime);
428 LOCKSTAT_FLAG(lsflag);
429
430 /*
431 * Handle spin mutexes.
432 */
433 if (MUTEX_SPIN_P(mtx)) {
434 #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR)
435 u_int spins = 0;
436 #endif
437 MUTEX_SPIN_SPLRAISE(mtx);
438 MUTEX_WANTLOCK(mtx);
439 #ifdef FULL
440 if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
441 MUTEX_LOCKED(mtx);
442 return;
443 }
444 #if !defined(MULTIPROCESSOR)
445 MUTEX_ABORT(mtx, "locking against myself");
446 #else /* !MULTIPROCESSOR */
447
448 LOCKSTAT_ENTER(lsflag);
449 LOCKSTAT_START_TIMER(lsflag, spintime);
450 count = SPINLOCK_BACKOFF_MIN;
451
452 /*
453 * Spin testing the lock word and do exponential backoff
454 * to reduce cache line ping-ponging between CPUs.
455 */
456 do {
457 if (panicstr != NULL)
458 break;
459 while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) {
460 SPINLOCK_BACKOFF(count);
461 #ifdef LOCKDEBUG
462 if (SPINLOCK_SPINOUT(spins))
463 MUTEX_ABORT(mtx, "spinout");
464 #endif /* LOCKDEBUG */
465 }
466 } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
467
468 if (count != SPINLOCK_BACKOFF_MIN) {
469 LOCKSTAT_STOP_TIMER(lsflag, spintime);
470 LOCKSTAT_EVENT(lsflag, mtx,
471 LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
472 }
473 LOCKSTAT_EXIT(lsflag);
474 #endif /* !MULTIPROCESSOR */
475 #endif /* FULL */
476 MUTEX_LOCKED(mtx);
477 return;
478 }
479
480 curthread = (uintptr_t)curlwp;
481
482 MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
483 MUTEX_ASSERT(mtx, curthread != 0);
484 MUTEX_WANTLOCK(mtx);
485
486 if (panicstr == NULL) {
487 LOCKDEBUG_BARRIER(&kernel_lock, 1);
488 }
489
490 LOCKSTAT_ENTER(lsflag);
491
492 /*
493 * Adaptive mutex; spin trying to acquire the mutex. If we
494 * determine that the owner is not running on a processor,
495 * then we stop spinning, and sleep instead.
496 */
497 KPREEMPT_DISABLE(curlwp);
498 for (owner = mtx->mtx_owner;;) {
499 if (!MUTEX_OWNED(owner)) {
500 /*
501 * Mutex owner clear could mean two things:
502 *
503 * * The mutex has been released.
504 * * The owner field hasn't been set yet.
505 *
506 * Try to acquire it again. If that fails,
507 * we'll just loop again.
508 */
509 if (MUTEX_ACQUIRE(mtx, curthread))
510 break;
511 owner = mtx->mtx_owner;
512 continue;
513 }
514 if (__predict_false(panicstr != NULL)) {
515 kpreempt_enable();
516 return;
517 }
518 if (__predict_false(MUTEX_OWNER(owner) == curthread)) {
519 MUTEX_ABORT(mtx, "locking against myself");
520 }
521 #ifdef MULTIPROCESSOR
522 /*
523 * Check to see if the owner is running on a processor.
524 * If so, then we should just spin, as the owner will
525 * likely release the lock very soon.
526 */
527 if (mutex_oncpu(owner)) {
528 LOCKSTAT_START_TIMER(lsflag, spintime);
529 count = SPINLOCK_BACKOFF_MIN;
530 do {
531 KPREEMPT_ENABLE(curlwp);
532 SPINLOCK_BACKOFF(count);
533 KPREEMPT_DISABLE(curlwp);
534 owner = mtx->mtx_owner;
535 } while (mutex_oncpu(owner));
536 LOCKSTAT_STOP_TIMER(lsflag, spintime);
537 LOCKSTAT_COUNT(spincnt, 1);
538 if (!MUTEX_OWNED(owner))
539 continue;
540 }
541 #endif
542
543 ts = turnstile_lookup(mtx);
544
545 /*
546 * Once we have the turnstile chain interlock, mark the
547 * mutex has having waiters. If that fails, spin again:
548 * chances are that the mutex has been released.
549 */
550 if (!MUTEX_SET_WAITERS(mtx, owner)) {
551 turnstile_exit(mtx);
552 owner = mtx->mtx_owner;
553 continue;
554 }
555
556 #ifdef MULTIPROCESSOR
557 /*
558 * mutex_exit() is permitted to release the mutex without
559 * any interlocking instructions, and the following can
560 * occur as a result:
561 *
562 * CPU 1: MUTEX_SET_WAITERS() CPU2: mutex_exit()
563 * ---------------------------- ----------------------------
564 * .. acquire cache line
565 * .. test for waiters
566 * acquire cache line <- lose cache line
567 * lock cache line ..
568 * verify mutex is held ..
569 * set waiters ..
570 * unlock cache line ..
571 * lose cache line -> acquire cache line
572 * .. clear lock word, waiters
573 * return success
574 *
575 * There is another race that can occur: a third CPU could
576 * acquire the mutex as soon as it is released. Since
577 * adaptive mutexes are primarily spin mutexes, this is not
578 * something that we need to worry about too much. What we
579 * do need to ensure is that the waiters bit gets set.
580 *
581 * To allow the unlocked release, we need to make some
582 * assumptions here:
583 *
584 * o Release is the only non-atomic/unlocked operation
585 * that can be performed on the mutex. (It must still
586 * be atomic on the local CPU, e.g. in case interrupted
587 * or preempted).
588 *
589 * o At any given time, MUTEX_SET_WAITERS() can only ever
590 * be in progress on one CPU in the system - guaranteed
591 * by the turnstile chain lock.
592 *
593 * o No other operations other than MUTEX_SET_WAITERS()
594 * and release can modify a mutex with a non-zero
595 * owner field.
596 *
597 * o The result of a successful MUTEX_SET_WAITERS() call
598 * is an unbuffered write that is immediately visible
599 * to all other processors in the system.
600 *
601 * o If the holding LWP switches away, it posts a store
602 * fence before changing curlwp, ensuring that any
603 * overwrite of the mutex waiters flag by mutex_exit()
604 * completes before the modification of curlwp becomes
605 * visible to this CPU.
606 *
607 * o mi_switch() posts a store fence before setting curlwp
608 * and before resuming execution of an LWP.
609 *
610 * o _kernel_lock() posts a store fence before setting
611 * curcpu()->ci_biglock_wanted, and after clearing it.
612 * This ensures that any overwrite of the mutex waiters
613 * flag by mutex_exit() completes before the modification
614 * of ci_biglock_wanted becomes visible.
615 *
616 * We now post a read memory barrier (after setting the
617 * waiters field) and check the lock holder's status again.
618 * Some of the possible outcomes (not an exhaustive list):
619 *
620 * 1. The on-CPU check returns true: the holding LWP is
621 * running again. The lock may be released soon and
622 * we should spin. Importantly, we can't trust the
623 * value of the waiters flag.
624 *
625 * 2. The on-CPU check returns false: the holding LWP is
626 * not running. We now have the opportunity to check
627 * if mutex_exit() has blatted the modifications made
628 * by MUTEX_SET_WAITERS().
629 *
630 * 3. The on-CPU check returns false: the holding LWP may
631 * or may not be running. It has context switched at
632 * some point during our check. Again, we have the
633 * chance to see if the waiters bit is still set or
634 * has been overwritten.
635 *
636 * 4. The on-CPU check returns false: the holding LWP is
637 * running on a CPU, but wants the big lock. It's OK
638 * to check the waiters field in this case.
639 *
640 * 5. The has-waiters check fails: the mutex has been
641 * released, the waiters flag cleared and another LWP
642 * now owns the mutex.
643 *
644 * 6. The has-waiters check fails: the mutex has been
645 * released.
646 *
647 * If the waiters bit is not set it's unsafe to go asleep,
648 * as we might never be awoken.
649 */
650 if ((membar_consumer(), mutex_oncpu(owner)) ||
651 (membar_consumer(), !MUTEX_HAS_WAITERS(mtx))) {
652 turnstile_exit(mtx);
653 owner = mtx->mtx_owner;
654 continue;
655 }
656 #endif /* MULTIPROCESSOR */
657
658 LOCKSTAT_START_TIMER(lsflag, slptime);
659
660 turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj);
661
662 LOCKSTAT_STOP_TIMER(lsflag, slptime);
663 LOCKSTAT_COUNT(slpcnt, 1);
664
665 owner = mtx->mtx_owner;
666 }
667 KPREEMPT_ENABLE(curlwp);
668
669 LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1,
670 slpcnt, slptime);
671 LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN,
672 spincnt, spintime);
673 LOCKSTAT_EXIT(lsflag);
674
675 MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
676 MUTEX_LOCKED(mtx);
677 }
678
679 /*
680 * mutex_vector_exit:
681 *
682 * Support routine for mutex_exit() that handles all cases.
683 */
684 void
685 mutex_vector_exit(kmutex_t *mtx)
686 {
687 turnstile_t *ts;
688 uintptr_t curthread;
689
690 if (MUTEX_SPIN_P(mtx)) {
691 #ifdef FULL
692 if (__predict_false(!__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock))) {
693 if (panicstr != NULL)
694 return;
695 MUTEX_ABORT(mtx, "exiting unheld spin mutex");
696 }
697 MUTEX_UNLOCKED(mtx);
698 __cpu_simple_unlock(&mtx->mtx_lock);
699 #endif
700 MUTEX_SPIN_SPLRESTORE(mtx);
701 return;
702 }
703
704 if (__predict_false((uintptr_t)panicstr | cold)) {
705 MUTEX_UNLOCKED(mtx);
706 MUTEX_RELEASE(mtx);
707 return;
708 }
709
710 curthread = (uintptr_t)curlwp;
711 MUTEX_DASSERT(mtx, curthread != 0);
712 MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
713 MUTEX_UNLOCKED(mtx);
714 #if !defined(LOCKDEBUG)
715 __USE(curthread);
716 #endif
717
718 #ifdef LOCKDEBUG
719 /*
720 * Avoid having to take the turnstile chain lock every time
721 * around. Raise the priority level to splhigh() in order
722 * to disable preemption and so make the following atomic.
723 */
724 {
725 int s = splhigh();
726 if (!MUTEX_HAS_WAITERS(mtx)) {
727 MUTEX_RELEASE(mtx);
728 splx(s);
729 return;
730 }
731 splx(s);
732 }
733 #endif
734
735 /*
736 * Get this lock's turnstile. This gets the interlock on
737 * the sleep queue. Once we have that, we can clear the
738 * lock. If there was no turnstile for the lock, there
739 * were no waiters remaining.
740 */
741 ts = turnstile_lookup(mtx);
742
743 if (ts == NULL) {
744 MUTEX_RELEASE(mtx);
745 turnstile_exit(mtx);
746 } else {
747 MUTEX_RELEASE(mtx);
748 turnstile_wakeup(ts, TS_WRITER_Q,
749 TS_WAITERS(ts, TS_WRITER_Q), NULL);
750 }
751 }
752
753 #ifndef __HAVE_SIMPLE_MUTEXES
754 /*
755 * mutex_wakeup:
756 *
757 * Support routine for mutex_exit() that wakes up all waiters.
758 * We assume that the mutex has been released, but it need not
759 * be.
760 */
761 void
762 mutex_wakeup(kmutex_t *mtx)
763 {
764 turnstile_t *ts;
765
766 ts = turnstile_lookup(mtx);
767 if (ts == NULL) {
768 turnstile_exit(mtx);
769 return;
770 }
771 MUTEX_CLEAR_WAITERS(mtx);
772 turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL);
773 }
774 #endif /* !__HAVE_SIMPLE_MUTEXES */
775
776 /*
777 * mutex_owned:
778 *
779 * Return true if the current LWP (adaptive) or CPU (spin)
780 * holds the mutex.
781 */
782 int
783 mutex_owned(kmutex_t *mtx)
784 {
785
786 if (mtx == NULL)
787 return 0;
788 if (MUTEX_ADAPTIVE_P(mtx))
789 return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp;
790 #ifdef FULL
791 return __SIMPLELOCK_LOCKED_P(&mtx->mtx_lock);
792 #else
793 return 1;
794 #endif
795 }
796
797 /*
798 * mutex_owner:
799 *
800 * Return the current owner of an adaptive mutex. Used for
801 * priority inheritance.
802 */
803 lwp_t *
804 mutex_owner(kmutex_t *mtx)
805 {
806
807 MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
808 return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner);
809 }
810
811 /*
812 * mutex_tryenter:
813 *
814 * Try to acquire the mutex; return non-zero if we did.
815 */
816 int
817 mutex_tryenter(kmutex_t *mtx)
818 {
819 uintptr_t curthread;
820
821 /*
822 * Handle spin mutexes.
823 */
824 if (MUTEX_SPIN_P(mtx)) {
825 MUTEX_SPIN_SPLRAISE(mtx);
826 #ifdef FULL
827 if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
828 MUTEX_WANTLOCK(mtx);
829 MUTEX_LOCKED(mtx);
830 return 1;
831 }
832 MUTEX_SPIN_SPLRESTORE(mtx);
833 #else
834 MUTEX_WANTLOCK(mtx);
835 MUTEX_LOCKED(mtx);
836 return 1;
837 #endif
838 } else {
839 curthread = (uintptr_t)curlwp;
840 MUTEX_ASSERT(mtx, curthread != 0);
841 if (MUTEX_ACQUIRE(mtx, curthread)) {
842 MUTEX_WANTLOCK(mtx);
843 MUTEX_LOCKED(mtx);
844 MUTEX_DASSERT(mtx,
845 MUTEX_OWNER(mtx->mtx_owner) == curthread);
846 return 1;
847 }
848 }
849
850 return 0;
851 }
852
853 #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL)
854 /*
855 * mutex_spin_retry:
856 *
857 * Support routine for mutex_spin_enter(). Assumes that the caller
858 * has already raised the SPL, and adjusted counters.
859 */
860 void
861 mutex_spin_retry(kmutex_t *mtx)
862 {
863 #ifdef MULTIPROCESSOR
864 u_int count;
865 LOCKSTAT_TIMER(spintime);
866 LOCKSTAT_FLAG(lsflag);
867 #ifdef LOCKDEBUG
868 u_int spins = 0;
869 #endif /* LOCKDEBUG */
870
871 MUTEX_WANTLOCK(mtx);
872
873 LOCKSTAT_ENTER(lsflag);
874 LOCKSTAT_START_TIMER(lsflag, spintime);
875 count = SPINLOCK_BACKOFF_MIN;
876
877 /*
878 * Spin testing the lock word and do exponential backoff
879 * to reduce cache line ping-ponging between CPUs.
880 */
881 do {
882 if (panicstr != NULL)
883 break;
884 while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) {
885 SPINLOCK_BACKOFF(count);
886 #ifdef LOCKDEBUG
887 if (SPINLOCK_SPINOUT(spins))
888 MUTEX_ABORT(mtx, "spinout");
889 #endif /* LOCKDEBUG */
890 }
891 } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
892
893 LOCKSTAT_STOP_TIMER(lsflag, spintime);
894 LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
895 LOCKSTAT_EXIT(lsflag);
896
897 MUTEX_LOCKED(mtx);
898 #else /* MULTIPROCESSOR */
899 MUTEX_ABORT(mtx, "locking against myself");
900 #endif /* MULTIPROCESSOR */
901 }
902 #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */
903