kern_mutex.c revision 1.4 1 /* $NetBSD: kern_mutex.c,v 1.4 2007/02/15 15:49:27 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Kernel mutex implementation, modeled after those found in Solaris,
41 * a description of which can be found in:
42 *
43 * Solaris Internals: Core Kernel Architecture, Jim Mauro and
44 * Richard McDougall.
45 */
46
47 #include "opt_multiprocessor.h"
48
49 #define __MUTEX_PRIVATE
50
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.4 2007/02/15 15:49:27 ad Exp $");
53
54 #include <sys/param.h>
55 #include <sys/proc.h>
56 #include <sys/mutex.h>
57 #include <sys/sched.h>
58 #include <sys/sleepq.h>
59 #include <sys/systm.h>
60 #include <sys/lockdebug.h>
61 #include <sys/kernel.h>
62
63 #include <dev/lockstat.h>
64
65 #include <machine/intr.h>
66
67 /*
68 * When not running a debug kernel, spin mutexes are not much
69 * more than an splraiseipl() and splx() pair.
70 */
71
72 #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
73 #define FULL
74 #endif
75
76 /*
77 * Debugging support.
78 */
79
80 #define MUTEX_WANTLOCK(mtx) \
81 LOCKDEBUG_WANTLOCK(MUTEX_GETID(mtx), \
82 (uintptr_t)__builtin_return_address(0), 0)
83 #define MUTEX_LOCKED(mtx) \
84 LOCKDEBUG_LOCKED(MUTEX_GETID(mtx), \
85 (uintptr_t)__builtin_return_address(0), 0)
86 #define MUTEX_UNLOCKED(mtx) \
87 LOCKDEBUG_UNLOCKED(MUTEX_GETID(mtx), \
88 (uintptr_t)__builtin_return_address(0), 0)
89 #define MUTEX_ABORT(mtx, msg) \
90 mutex_abort(mtx, __FUNCTION__, msg)
91
92 #if defined(LOCKDEBUG)
93
94 #define MUTEX_DASSERT(mtx, cond) \
95 do { \
96 if (!(cond)) \
97 MUTEX_ABORT(mtx, "assertion failed: " #cond); \
98 } while (/* CONSTCOND */ 0);
99
100 #else /* LOCKDEBUG */
101
102 #define MUTEX_DASSERT(mtx, cond) /* nothing */
103
104 #endif /* LOCKDEBUG */
105
106 #if defined(DIAGNOSTIC)
107
108 #define MUTEX_ASSERT(mtx, cond) \
109 do { \
110 if (!(cond)) \
111 MUTEX_ABORT(mtx, "assertion failed: " #cond); \
112 } while (/* CONSTCOND */ 0)
113
114 #else /* DIAGNOSTIC */
115
116 #define MUTEX_ASSERT(mtx, cond) /* nothing */
117
118 #endif /* DIAGNOSTIC */
119
120 /*
121 * Spin mutex SPL save / restore.
122 */
123
124 #define MUTEX_SPIN_SPLRAISE(mtx) \
125 do { \
126 struct cpu_info *x__ci = curcpu(); \
127 int x__cnt, s; \
128 x__cnt = x__ci->ci_mtx_count--; \
129 s = splraiseipl(mtx->mtx_ipl); \
130 if (x__cnt == 0) \
131 x__ci->ci_mtx_oldspl = (s); \
132 } while (/* CONSTCOND */ 0)
133
134 #define MUTEX_SPIN_SPLRESTORE(mtx) \
135 do { \
136 struct cpu_info *x__ci = curcpu(); \
137 int s = x__ci->ci_mtx_oldspl; \
138 __insn_barrier(); \
139 if (++(x__ci->ci_mtx_count) == 0) \
140 splx(s); \
141 } while (/* CONSTCOND */ 0)
142
143 /*
144 * For architectures that provide 'simple' mutexes: they provide a
145 * CAS function that is either MP-safe, or does not need to be MP
146 * safe. Adaptive mutexes on these architectures do not require an
147 * additional interlock.
148 */
149
150 #ifdef __HAVE_SIMPLE_MUTEXES
151
152 #define MUTEX_OWNER(owner) \
153 (owner & MUTEX_THREAD)
154 #define MUTEX_OWNED(owner) \
155 (owner != 0)
156 #define MUTEX_HAS_WAITERS(mtx) \
157 (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0)
158
159 #define MUTEX_INITIALIZE_ADAPTIVE(mtx, id) \
160 do { \
161 (mtx)->mtx_id = (id); \
162 } while (/* CONSTCOND */ 0);
163
164 #define MUTEX_INITIALIZE_SPIN(mtx, id, ipl) \
165 do { \
166 (mtx)->mtx_owner = MUTEX_BIT_SPIN; \
167 (mtx)->mtx_ipl = makeiplcookie((ipl)); \
168 (mtx)->mtx_id = (id); \
169 __cpu_simple_lock_init(&(mtx)->mtx_lock); \
170 } while (/* CONSTCOND */ 0)
171
172 #define MUTEX_DESTROY(mtx) \
173 do { \
174 (mtx)->mtx_owner = MUTEX_THREAD; \
175 (mtx)->mtx_id = -1; \
176 } while (/* CONSTCOND */ 0);
177
178 #define MUTEX_SPIN_P(mtx) \
179 (((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0)
180 #define MUTEX_ADAPTIVE_P(mtx) \
181 (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0)
182
183 #define MUTEX_GETID(mtx) ((mtx)->mtx_id)
184
185 static inline int
186 MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
187 {
188 int rv;
189 rv = MUTEX_CAS(&mtx->mtx_owner, 0UL, curthread);
190 MUTEX_RECEIVE();
191 return rv;
192 }
193
194 static inline int
195 MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
196 {
197 int rv;
198 rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS);
199 MUTEX_RECEIVE();
200 return rv;
201 }
202
203 static inline void
204 MUTEX_RELEASE(kmutex_t *mtx)
205 {
206 MUTEX_GIVE();
207 mtx->mtx_owner = 0;
208 }
209
210 static inline void
211 MUTEX_CLEAR_WAITERS(kmutex_t *mtx)
212 {
213 /* nothing */
214 }
215 #endif /* __HAVE_SIMPLE_MUTEXES */
216
217 /*
218 * Patch in stubs via strong alias where they are not available.
219 */
220
221 #if defined(LOCKDEBUG)
222 #undef __HAVE_MUTEX_STUBS
223 #undef __HAVE_SPIN_MUTEX_STUBS
224 #endif
225
226 #ifndef __HAVE_MUTEX_STUBS
227 __strong_alias(mutex_enter, mutex_vector_enter);
228 __strong_alias(mutex_exit, mutex_vector_exit);
229 #endif
230
231 #ifndef __HAVE_SPIN_MUTEX_STUBS
232 __strong_alias(mutex_spin_enter, mutex_vector_enter);
233 __strong_alias(mutex_spin_exit, mutex_vector_exit);
234 #endif
235
236 void mutex_abort(kmutex_t *, const char *, const char *);
237 void mutex_dump(volatile void *);
238 int mutex_onproc(uintptr_t, struct cpu_info **);
239
240 lockops_t mutex_spin_lockops = {
241 "Mutex",
242 0,
243 mutex_dump
244 };
245
246 lockops_t mutex_adaptive_lockops = {
247 "Mutex",
248 1,
249 mutex_dump
250 };
251
252 /*
253 * mutex_dump:
254 *
255 * Dump the contents of a mutex structure.
256 */
257 void
258 mutex_dump(volatile void *cookie)
259 {
260 volatile kmutex_t *mtx = cookie;
261
262 printf_nolog("owner field : %#018lx wait/spin: %16d/%d\n",
263 (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx),
264 MUTEX_SPIN_P(mtx));
265 }
266
267 /*
268 * mutex_abort:
269 *
270 * Dump information about an error and panic the system. This
271 * generates a lot of machine code in the DIAGNOSTIC case, so
272 * we ask the compiler to not inline it.
273 */
274 __attribute ((noinline)) __attribute ((noreturn)) void
275 mutex_abort(kmutex_t *mtx, const char *func, const char *msg)
276 {
277
278 LOCKDEBUG_ABORT(MUTEX_GETID(mtx), mtx, (MUTEX_SPIN_P(mtx) ?
279 &mutex_spin_lockops : &mutex_adaptive_lockops), func, msg);
280 /* NOTREACHED */
281 }
282
283 /*
284 * mutex_init:
285 *
286 * Initialize a mutex for use. Note that adaptive mutexes are in
287 * essence spin mutexes that can sleep to avoid deadlock and wasting
288 * CPU time. We can't easily provide a type of mutex that always
289 * sleeps - see comments in mutex_vector_enter() about releasing
290 * mutexes unlocked.
291 */
292 void
293 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
294 {
295 u_int id;
296
297 memset(mtx, 0, sizeof(*mtx));
298
299 if (type == MUTEX_DRIVER)
300 type = (ipl == IPL_NONE ? MUTEX_ADAPTIVE : MUTEX_SPIN);
301
302 switch (type) {
303 case MUTEX_ADAPTIVE:
304 case MUTEX_DEFAULT:
305 KASSERT(ipl == IPL_NONE);
306 id = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops);
307 MUTEX_INITIALIZE_ADAPTIVE(mtx, id);
308 break;
309 case MUTEX_SPIN:
310 id = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops);
311 MUTEX_INITIALIZE_SPIN(mtx, id, ipl);
312 break;
313 default:
314 panic("mutex_init: impossible type");
315 break;
316 }
317 }
318
319 /*
320 * mutex_destroy:
321 *
322 * Tear down a mutex.
323 */
324 void
325 mutex_destroy(kmutex_t *mtx)
326 {
327
328 if (MUTEX_ADAPTIVE_P(mtx)) {
329 MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) &&
330 !MUTEX_HAS_WAITERS(mtx));
331 } else {
332 MUTEX_ASSERT(mtx, mtx->mtx_lock != __SIMPLELOCK_LOCKED);
333 }
334
335 LOCKDEBUG_FREE(mtx, MUTEX_GETID(mtx));
336 MUTEX_DESTROY(mtx);
337 }
338
339 /*
340 * mutex_onproc:
341 *
342 * Return true if an adaptive mutex owner is running on a CPU in the
343 * system. If the target is waiting on the kernel big lock, then we
344 * return false immediately. This is necessary to avoid deadlock
345 * against the big lock.
346 *
347 * Note that we can't use the mutex owner field as an LWP pointer. We
348 * don't have full control over the timing of our execution, and so the
349 * pointer could be completely invalid by the time we dereference it.
350 *
351 * XXX This should be optimised further to reduce potential cache line
352 * ping-ponging and skewing of the spin time while busy waiting.
353 */
354 #ifdef MULTIPROCESSOR
355 int
356 mutex_onproc(uintptr_t owner, struct cpu_info **cip)
357 {
358 CPU_INFO_ITERATOR cii;
359 struct cpu_info *ci;
360 struct lwp *l;
361
362 if (!MUTEX_OWNED(owner))
363 return 0;
364 l = (struct lwp *)MUTEX_OWNER(owner);
365
366 if ((ci = *cip) != NULL && ci->ci_curlwp == l) {
367 mb_read(); /* XXXSMP Very expensive, necessary? */
368 return ci->ci_biglock_wanted != l;
369 }
370
371 for (CPU_INFO_FOREACH(cii, ci)) {
372 if (ci->ci_curlwp == l) {
373 *cip = ci;
374 mb_read(); /* XXXSMP Very expensive, necessary? */
375 return ci->ci_biglock_wanted != l;
376 }
377 }
378
379 *cip = NULL;
380 return 0;
381 }
382 #endif
383
384 /*
385 * mutex_vector_enter:
386 *
387 * Support routine for mutex_enter() that must handles all cases. In
388 * the LOCKDEBUG case, mutex_enter() is always aliased here, even if
389 * fast-path stubs are available. If an mutex_spin_enter() stub is
390 * not available, then it is also aliased directly here.
391 */
392 void
393 mutex_vector_enter(kmutex_t *mtx)
394 {
395 uintptr_t owner, curthread;
396 turnstile_t *ts;
397 #ifdef MULTIPROCESSOR
398 struct cpu_info *ci = NULL;
399 u_int count;
400 #endif
401 LOCKSTAT_COUNTER(spincnt);
402 LOCKSTAT_COUNTER(slpcnt);
403 LOCKSTAT_TIMER(spintime);
404 LOCKSTAT_TIMER(slptime);
405 LOCKSTAT_FLAG(lsflag);
406
407 /*
408 * Handle spin mutexes.
409 */
410 if (MUTEX_SPIN_P(mtx)) {
411 #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR)
412 u_int spins = 0;
413 #endif
414 MUTEX_SPIN_SPLRAISE(mtx);
415 MUTEX_WANTLOCK(mtx);
416 #ifdef FULL
417 if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
418 MUTEX_LOCKED(mtx);
419 return;
420 }
421 #if !defined(MULTIPROCESSOR)
422 MUTEX_ABORT(mtx, "locking against myself");
423 #else /* !MULTIPROCESSOR */
424
425 LOCKSTAT_ENTER(lsflag);
426 LOCKSTAT_START_TIMER(lsflag, spintime);
427 count = SPINLOCK_BACKOFF_MIN;
428
429 /*
430 * Spin testing the lock word and do exponential backoff
431 * to reduce cache line ping-ponging between CPUs.
432 */
433 do {
434 if (panicstr != NULL)
435 break;
436 while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) {
437 SPINLOCK_BACKOFF(count);
438 #ifdef LOCKDEBUG
439 if (SPINLOCK_SPINOUT(spins))
440 MUTEX_ABORT(mtx, "spinout");
441 #endif /* LOCKDEBUG */
442 }
443 } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
444
445 if (count != SPINLOCK_BACKOFF_MIN) {
446 LOCKSTAT_STOP_TIMER(lsflag, spintime);
447 LOCKSTAT_EVENT(lsflag, mtx,
448 LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
449 }
450 LOCKSTAT_EXIT(lsflag);
451 #endif /* !MULTIPROCESSOR */
452 #endif /* FULL */
453 MUTEX_LOCKED(mtx);
454 return;
455 }
456
457 curthread = (uintptr_t)curlwp;
458
459 MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
460 MUTEX_ASSERT(mtx, curthread != 0);
461 MUTEX_WANTLOCK(mtx);
462
463 #ifdef LOCKDEBUG
464 if (panicstr == NULL) {
465 simple_lock_only_held(NULL, "mutex_enter");
466 #ifdef MULTIPROCESSOR
467 LOCKDEBUG_BARRIER(&kernel_lock, 1);
468 #else
469 LOCKDEBUG_BARRIER(NULL, 1);
470 #endif
471 }
472 #endif
473
474 LOCKSTAT_ENTER(lsflag);
475
476 /*
477 * Adaptive mutex; spin trying to acquire the mutex. If we
478 * determine that the owner is not running on a processor,
479 * then we stop spinning, and sleep instead.
480 */
481 for (;;) {
482 owner = mtx->mtx_owner;
483 if (!MUTEX_OWNED(owner)) {
484 /*
485 * Mutex owner clear could mean two things:
486 *
487 * * The mutex has been released.
488 * * The owner field hasn't been set yet.
489 *
490 * Try to acquire it again. If that fails,
491 * we'll just loop again.
492 */
493 if (MUTEX_ACQUIRE(mtx, curthread))
494 break;
495 continue;
496 }
497
498 if (panicstr != NULL)
499 return;
500 if (MUTEX_OWNER(owner) == curthread)
501 MUTEX_ABORT(mtx, "locking against myself");
502
503 #ifdef MULTIPROCESSOR
504 /*
505 * Check to see if the owner is running on a processor.
506 * If so, then we should just spin, as the owner will
507 * likely release the lock very soon.
508 */
509 if (mutex_onproc(owner, &ci)) {
510 LOCKSTAT_START_TIMER(lsflag, spintime);
511 count = SPINLOCK_BACKOFF_MIN;
512 for (;;) {
513 owner = mtx->mtx_owner;
514 if (!mutex_onproc(owner, &ci))
515 break;
516 SPINLOCK_BACKOFF(count);
517 }
518 LOCKSTAT_STOP_TIMER(lsflag, spintime);
519 LOCKSTAT_COUNT(spincnt, 1);
520 if (!MUTEX_OWNED(owner))
521 continue;
522 }
523 #endif
524
525 ts = turnstile_lookup(mtx);
526
527 /*
528 * Once we have the turnstile chain interlock, mark the
529 * mutex has having waiters. If that fails, spin again:
530 * chances are that the mutex has been released.
531 */
532 if (!MUTEX_SET_WAITERS(mtx, owner)) {
533 turnstile_exit(mtx);
534 continue;
535 }
536
537 #ifdef MULTIPROCESSOR
538 /*
539 * mutex_exit() is permitted to release the mutex without
540 * any interlocking instructions, and the following can
541 * occur as a result:
542 *
543 * CPU 1: MUTEX_SET_WAITERS() CPU2: mutex_exit()
544 * ---------------------------- ----------------------------
545 * .. acquire cache line
546 * .. test for waiters
547 * acquire cache line <- lose cache line
548 * lock cache line ..
549 * verify mutex is held ..
550 * set waiters ..
551 * unlock cache line ..
552 * lose cache line -> acquire cache line
553 * .. clear lock word, waiters
554 * return success
555 *
556 * There is a another race that can occur: a third CPU could
557 * acquire the mutex as soon as it is released. Since
558 * adaptive mutexes are primarily spin mutexes, this is not
559 * something that we need to worry about too much. What we
560 * do need to ensure is that the waiters bit gets set.
561 *
562 * To allow the unlocked release, we need to make some
563 * assumptions here:
564 *
565 * o Release is the only non-atomic/unlocked operation
566 * that can be performed on the mutex. (It must still
567 * be atomic on the local CPU, e.g. in case interrupted
568 * or preempted).
569 *
570 * o At any given time, MUTEX_SET_WAITERS() can only ever
571 * be in progress on one CPU in the system - guarenteed
572 * by the turnstile chain lock.
573 *
574 * o No other operations other than MUTEX_SET_WAITERS()
575 * and release can modify a mutex with a non-zero
576 * owner field.
577 *
578 * o The result of a successful MUTEX_SET_WAITERS() call
579 * is an unbuffered write that is immediately visible
580 * to all other processors in the system.
581 *
582 * o If the holding LWP switches away, it posts a store
583 * fence before changing curlwp, ensuring that any
584 * overwrite of the mutex waiters flag by mutex_exit()
585 * completes before the modification of curlwp becomes
586 * visible to this CPU.
587 *
588 * o cpu_switch() posts a store fence before setting curlwp
589 * and before resuming execution of an LWP.
590 *
591 * o _kernel_lock() posts a store fence before setting
592 * curcpu()->ci_biglock_wanted, and after clearing it.
593 * This ensures that any overwrite of the mutex waiters
594 * flag by mutex_exit() completes before the modification
595 * of ci_biglock_wanted becomes visible.
596 *
597 * We now post a read memory barrier (after setting the
598 * waiters field) and check the lock holder's status again.
599 * Some of the possible outcomes (not an exhaustive list):
600 *
601 * 1. The onproc check returns true: the holding LWP is
602 * running again. The lock may be released soon and
603 * we should spin. Importantly, we can't trust the
604 * value of the waiters flag.
605 *
606 * 2. The onproc check returns false: the holding LWP is
607 * not running. We now have the oppertunity to check
608 * if mutex_exit() has blatted the modifications made
609 * by MUTEX_SET_WAITERS().
610 *
611 * 3. The onproc check returns false: the holding LWP may
612 * or may not be running. It has context switched at
613 * some point during our check. Again, we have the
614 * chance to see if the waiters bit is still set or
615 * has been overwritten.
616 *
617 * 4. The onproc check returns false: the holding LWP is
618 * running on a CPU, but wants the big lock. It's OK
619 * to check the waiters field in this case.
620 *
621 * 5. The has-waiters check fails: the mutex has been
622 * released, the waiters flag cleared and another LWP
623 * now owns the mutex.
624 *
625 * 6. The has-waiters check fails: the mutex has been
626 * released.
627 *
628 * If the waiters bit is not set it's unsafe to go asleep,
629 * as we might never be awoken.
630 */
631 mb_read();
632 if (mutex_onproc(owner, &ci) || !MUTEX_HAS_WAITERS(mtx)) {
633 turnstile_exit(mtx);
634 continue;
635 }
636 #endif /* MULTIPROCESSOR */
637
638 LOCKSTAT_START_TIMER(lsflag, slptime);
639
640 turnstile_block(ts, TS_WRITER_Q, mtx);
641
642 LOCKSTAT_STOP_TIMER(lsflag, slptime);
643 LOCKSTAT_COUNT(slpcnt, 1);
644
645 turnstile_unblock();
646 }
647
648 LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1,
649 slpcnt, slptime);
650 LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN,
651 spincnt, spintime);
652 LOCKSTAT_EXIT(lsflag);
653
654 MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
655 MUTEX_LOCKED(mtx);
656 }
657
658 /*
659 * mutex_vector_exit:
660 *
661 * Support routine for mutex_exit() that handles all cases.
662 */
663 void
664 mutex_vector_exit(kmutex_t *mtx)
665 {
666 turnstile_t *ts;
667 uintptr_t curthread;
668
669 if (MUTEX_SPIN_P(mtx)) {
670 #ifdef FULL
671 if (mtx->mtx_lock != __SIMPLELOCK_LOCKED)
672 MUTEX_ABORT(mtx, "exiting unheld spin mutex");
673 MUTEX_UNLOCKED(mtx);
674 __cpu_simple_unlock(&mtx->mtx_lock);
675 #endif
676 MUTEX_SPIN_SPLRESTORE(mtx);
677 return;
678 }
679
680 if (__predict_false(panicstr != NULL) || __predict_false(cold)) {
681 MUTEX_UNLOCKED(mtx);
682 MUTEX_RELEASE(mtx);
683 return;
684 }
685
686 curthread = (uintptr_t)curlwp;
687 MUTEX_DASSERT(mtx, curthread != 0);
688 MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
689 MUTEX_UNLOCKED(mtx);
690
691 /*
692 * Get this lock's turnstile. This gets the interlock on
693 * the sleep queue. Once we have that, we can clear the
694 * lock. If there was no turnstile for the lock, there
695 * were no waiters remaining.
696 */
697 ts = turnstile_lookup(mtx);
698
699 if (ts == NULL) {
700 MUTEX_RELEASE(mtx);
701 turnstile_exit(mtx);
702 } else {
703 MUTEX_RELEASE(mtx);
704 turnstile_wakeup(ts, TS_WRITER_Q,
705 TS_WAITERS(ts, TS_WRITER_Q), NULL);
706 }
707 }
708
709 #ifndef __HAVE_SIMPLE_MUTEXES
710 /*
711 * mutex_wakeup:
712 *
713 * Support routine for mutex_exit() that wakes up all waiters.
714 * We assume that the mutex has been released, but it need not
715 * be.
716 */
717 void
718 mutex_wakeup(kmutex_t *mtx)
719 {
720 turnstile_t *ts;
721
722 ts = turnstile_lookup(mtx);
723 if (ts == NULL) {
724 turnstile_exit(mtx);
725 return;
726 }
727 MUTEX_CLEAR_WAITERS(mtx);
728 turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL);
729 }
730 #endif /* !__HAVE_SIMPLE_MUTEXES */
731
732 /*
733 * mutex_owned:
734 *
735 * Return true if the current LWP (adaptive) or CPU (spin)
736 * holds the mutex.
737 */
738 int
739 mutex_owned(kmutex_t *mtx)
740 {
741
742 if (MUTEX_ADAPTIVE_P(mtx))
743 return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp;
744 #ifdef FULL
745 return mtx->mtx_lock == __SIMPLELOCK_LOCKED;
746 #else
747 return 1;
748 #endif
749 }
750
751 /*
752 * mutex_owner:
753 *
754 * Return the current owner of an adaptive mutex.
755 */
756 struct lwp *
757 mutex_owner(kmutex_t *mtx)
758 {
759
760 MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
761 return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner);
762 }
763
764 /*
765 * mutex_tryenter:
766 *
767 * Try to acquire the mutex; return non-zero if we did.
768 */
769 int
770 mutex_tryenter(kmutex_t *mtx)
771 {
772 uintptr_t curthread;
773
774 /*
775 * Handle spin mutexes.
776 */
777 if (MUTEX_SPIN_P(mtx)) {
778 MUTEX_SPIN_SPLRAISE(mtx);
779 #ifdef FULL
780 if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
781 MUTEX_WANTLOCK(mtx);
782 MUTEX_LOCKED(mtx);
783 return 1;
784 }
785 MUTEX_SPIN_SPLRESTORE(mtx);
786 #else
787 MUTEX_WANTLOCK(mtx);
788 MUTEX_LOCKED(mtx);
789 return 1;
790 #endif
791 } else {
792 curthread = (uintptr_t)curlwp;
793 MUTEX_ASSERT(mtx, curthread != 0);
794 if (MUTEX_ACQUIRE(mtx, curthread)) {
795 MUTEX_WANTLOCK(mtx);
796 MUTEX_LOCKED(mtx);
797 MUTEX_DASSERT(mtx,
798 MUTEX_OWNER(mtx->mtx_owner) == curthread);
799 return 1;
800 }
801 }
802
803 return 0;
804 }
805
806 #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL)
807 /*
808 * mutex_spin_retry:
809 *
810 * Support routine for mutex_spin_enter(). Assumes that the caller
811 * has already raised the SPL, and adjusted counters.
812 */
813 void
814 mutex_spin_retry(kmutex_t *mtx)
815 {
816 #ifdef MULTIPROCESSOR
817 u_int count;
818 LOCKSTAT_TIMER(spintime);
819 LOCKSTAT_FLAG(lsflag);
820 #ifdef LOCKDEBUG
821 u_int spins = 0;
822 #endif /* LOCKDEBUG */
823
824 MUTEX_WANTLOCK(mtx);
825
826 LOCKSTAT_ENTER(lsflag);
827 LOCKSTAT_START_TIMER(lsflag, spintime);
828 count = SPINLOCK_BACKOFF_MIN;
829
830 /*
831 * Spin testing the lock word and do exponential backoff
832 * to reduce cache line ping-ponging between CPUs.
833 */
834 do {
835 if (panicstr != NULL)
836 break;
837 while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) {
838 SPINLOCK_BACKOFF(count);
839 #ifdef LOCKDEBUG
840 if (SPINLOCK_SPINOUT(spins))
841 MUTEX_ABORT(mtx, "spinout");
842 #endif /* LOCKDEBUG */
843 }
844 } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
845
846 LOCKSTAT_STOP_TIMER(lsflag, spintime);
847 LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
848 LOCKSTAT_EXIT(lsflag);
849
850 MUTEX_LOCKED(mtx);
851 #else /* MULTIPROCESSOR */
852 MUTEX_ABORT(mtx, "locking against myself");
853 #endif /* MULTIPROCESSOR */
854 }
855 #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */
856
857 /*
858 * sched_lock_idle:
859 *
860 * XXX Ugly hack for cpu_switch().
861 */
862 void
863 sched_lock_idle(void)
864 {
865 #ifdef FULL
866 kmutex_t *mtx = &sched_mutex;
867
868 curcpu()->ci_mtx_count--;
869
870 if (!__cpu_simple_lock_try(&mtx->mtx_lock)) {
871 mutex_spin_retry(mtx);
872 return;
873 }
874
875 MUTEX_LOCKED(mtx);
876 #else
877 curcpu()->ci_mtx_count--;
878 #endif /* FULL */
879 }
880
881 /*
882 * sched_unlock_idle:
883 *
884 * XXX Ugly hack for cpu_switch().
885 */
886 void
887 sched_unlock_idle(void)
888 {
889 #ifdef FULL
890 kmutex_t *mtx = &sched_mutex;
891
892 if (mtx->mtx_lock != __SIMPLELOCK_LOCKED)
893 MUTEX_ABORT(mtx, "sched_unlock_idle");
894
895 MUTEX_UNLOCKED(mtx);
896 __cpu_simple_unlock(&mtx->mtx_lock);
897 #endif /* FULL */
898 curcpu()->ci_mtx_count++;
899 }
900