kern_mutex.c revision 1.11 1 /* $NetBSD: kern_mutex.c,v 1.11 2007/03/10 16:01:13 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Kernel mutex implementation, modeled after those found in Solaris,
41 * a description of which can be found in:
42 *
43 * Solaris Internals: Core Kernel Architecture, Jim Mauro and
44 * Richard McDougall.
45 */
46
47 #include "opt_multiprocessor.h"
48
49 #define __MUTEX_PRIVATE
50
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.11 2007/03/10 16:01:13 ad Exp $");
53
54 #include <sys/param.h>
55 #include <sys/proc.h>
56 #include <sys/mutex.h>
57 #include <sys/sched.h>
58 #include <sys/sleepq.h>
59 #include <sys/systm.h>
60 #include <sys/lockdebug.h>
61 #include <sys/kernel.h>
62
63 #include <dev/lockstat.h>
64
65 #include <machine/intr.h>
66
67 /*
68 * When not running a debug kernel, spin mutexes are not much
69 * more than an splraiseipl() and splx() pair.
70 */
71
72 #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
73 #define FULL
74 #endif
75
76 /*
77 * Debugging support.
78 */
79
80 #define MUTEX_WANTLOCK(mtx) \
81 LOCKDEBUG_WANTLOCK(MUTEX_GETID(mtx), \
82 (uintptr_t)__builtin_return_address(0), 0)
83 #define MUTEX_LOCKED(mtx) \
84 LOCKDEBUG_LOCKED(MUTEX_GETID(mtx), \
85 (uintptr_t)__builtin_return_address(0), 0)
86 #define MUTEX_UNLOCKED(mtx) \
87 LOCKDEBUG_UNLOCKED(MUTEX_GETID(mtx), \
88 (uintptr_t)__builtin_return_address(0), 0)
89 #define MUTEX_ABORT(mtx, msg) \
90 mutex_abort(mtx, __FUNCTION__, msg)
91
92 #if defined(LOCKDEBUG)
93
94 #define MUTEX_DASSERT(mtx, cond) \
95 do { \
96 if (!(cond)) \
97 MUTEX_ABORT(mtx, "assertion failed: " #cond); \
98 } while (/* CONSTCOND */ 0);
99
100 #else /* LOCKDEBUG */
101
102 #define MUTEX_DASSERT(mtx, cond) /* nothing */
103
104 #endif /* LOCKDEBUG */
105
106 #if defined(DIAGNOSTIC)
107
108 #define MUTEX_ASSERT(mtx, cond) \
109 do { \
110 if (!(cond)) \
111 MUTEX_ABORT(mtx, "assertion failed: " #cond); \
112 } while (/* CONSTCOND */ 0)
113
114 #else /* DIAGNOSTIC */
115
116 #define MUTEX_ASSERT(mtx, cond) /* nothing */
117
118 #endif /* DIAGNOSTIC */
119
120 /*
121 * Spin mutex SPL save / restore.
122 */
123
124 #define MUTEX_SPIN_SPLRAISE(mtx) \
125 do { \
126 struct cpu_info *x__ci = curcpu(); \
127 int x__cnt, s; \
128 x__cnt = x__ci->ci_mtx_count--; \
129 s = splraiseipl(mtx->mtx_ipl); \
130 if (x__cnt == 0) \
131 x__ci->ci_mtx_oldspl = (s); \
132 } while (/* CONSTCOND */ 0)
133
134 #define MUTEX_SPIN_SPLRESTORE(mtx) \
135 do { \
136 struct cpu_info *x__ci = curcpu(); \
137 int s = x__ci->ci_mtx_oldspl; \
138 __insn_barrier(); \
139 if (++(x__ci->ci_mtx_count) == 0) \
140 splx(s); \
141 } while (/* CONSTCOND */ 0)
142
143 /*
144 * For architectures that provide 'simple' mutexes: they provide a
145 * CAS function that is either MP-safe, or does not need to be MP
146 * safe. Adaptive mutexes on these architectures do not require an
147 * additional interlock.
148 */
149
150 #ifdef __HAVE_SIMPLE_MUTEXES
151
152 #define MUTEX_OWNER(owner) \
153 (owner & MUTEX_THREAD)
154 #define MUTEX_OWNED(owner) \
155 (owner != 0)
156 #define MUTEX_HAS_WAITERS(mtx) \
157 (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0)
158
159 #define MUTEX_INITIALIZE_ADAPTIVE(mtx, id) \
160 do { \
161 (mtx)->mtx_id = (id); \
162 } while (/* CONSTCOND */ 0);
163
164 #define MUTEX_INITIALIZE_SPIN(mtx, id, ipl) \
165 do { \
166 (mtx)->mtx_owner = MUTEX_BIT_SPIN; \
167 (mtx)->mtx_ipl = makeiplcookie((ipl)); \
168 (mtx)->mtx_id = (id); \
169 __cpu_simple_lock_init(&(mtx)->mtx_lock); \
170 } while (/* CONSTCOND */ 0)
171
172 #define MUTEX_DESTROY(mtx) \
173 do { \
174 (mtx)->mtx_owner = MUTEX_THREAD; \
175 (mtx)->mtx_id = -1; \
176 } while (/* CONSTCOND */ 0);
177
178 #define MUTEX_SPIN_P(mtx) \
179 (((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0)
180 #define MUTEX_ADAPTIVE_P(mtx) \
181 (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0)
182
183 #define MUTEX_GETID(mtx) ((mtx)->mtx_id)
184
185 static inline int
186 MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
187 {
188 int rv;
189 rv = MUTEX_CAS(&mtx->mtx_owner, 0UL, curthread);
190 MUTEX_RECEIVE(mtx);
191 return rv;
192 }
193
194 static inline int
195 MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
196 {
197 int rv;
198 rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS);
199 MUTEX_RECEIVE(mtx);
200 return rv;
201 }
202
203 static inline void
204 MUTEX_RELEASE(kmutex_t *mtx)
205 {
206 MUTEX_GIVE(mtx);
207 mtx->mtx_owner = 0;
208 }
209
210 static inline void
211 MUTEX_CLEAR_WAITERS(kmutex_t *mtx)
212 {
213 /* nothing */
214 }
215 #endif /* __HAVE_SIMPLE_MUTEXES */
216
217 /*
218 * Patch in stubs via strong alias where they are not available.
219 */
220
221 #if defined(LOCKDEBUG)
222 #undef __HAVE_MUTEX_STUBS
223 #undef __HAVE_SPIN_MUTEX_STUBS
224 #endif
225
226 #ifndef __HAVE_MUTEX_STUBS
227 __strong_alias(mutex_enter,mutex_vector_enter);
228 __strong_alias(mutex_exit,mutex_vector_exit);
229 #endif
230
231 #ifndef __HAVE_SPIN_MUTEX_STUBS
232 __strong_alias(mutex_spin_enter,mutex_vector_enter);
233 __strong_alias(mutex_spin_exit,mutex_vector_exit);
234 #endif
235
236 void mutex_abort(kmutex_t *, const char *, const char *);
237 void mutex_dump(volatile void *);
238 int mutex_onproc(uintptr_t, struct cpu_info **);
239 static struct lwp *mutex_owner(wchan_t);
240
241 lockops_t mutex_spin_lockops = {
242 "Mutex",
243 0,
244 mutex_dump
245 };
246
247 lockops_t mutex_adaptive_lockops = {
248 "Mutex",
249 1,
250 mutex_dump
251 };
252
253 syncobj_t mutex_syncobj = {
254 SOBJ_SLEEPQ_SORTED,
255 turnstile_unsleep,
256 turnstile_changepri,
257 sleepq_lendpri,
258 mutex_owner,
259 };
260
261 /*
262 * mutex_dump:
263 *
264 * Dump the contents of a mutex structure.
265 */
266 void
267 mutex_dump(volatile void *cookie)
268 {
269 volatile kmutex_t *mtx = cookie;
270
271 printf_nolog("owner field : %#018lx wait/spin: %16d/%d\n",
272 (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx),
273 MUTEX_SPIN_P(mtx));
274 }
275
276 /*
277 * mutex_abort:
278 *
279 * Dump information about an error and panic the system. This
280 * generates a lot of machine code in the DIAGNOSTIC case, so
281 * we ask the compiler to not inline it.
282 */
283
284 #if __GNUC_PREREQ__(3, 0)
285 __attribute ((noinline)) __attribute ((noreturn))
286 #endif
287 void
288 mutex_abort(kmutex_t *mtx, const char *func, const char *msg)
289 {
290
291 LOCKDEBUG_ABORT(MUTEX_GETID(mtx), mtx, (MUTEX_SPIN_P(mtx) ?
292 &mutex_spin_lockops : &mutex_adaptive_lockops), func, msg);
293 /* NOTREACHED */
294 }
295
296 /*
297 * mutex_init:
298 *
299 * Initialize a mutex for use. Note that adaptive mutexes are in
300 * essence spin mutexes that can sleep to avoid deadlock and wasting
301 * CPU time. We can't easily provide a type of mutex that always
302 * sleeps - see comments in mutex_vector_enter() about releasing
303 * mutexes unlocked.
304 */
305 void
306 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
307 {
308 u_int id;
309
310 memset(mtx, 0, sizeof(*mtx));
311
312 if (type == MUTEX_DRIVER)
313 type = (ipl == IPL_NONE ? MUTEX_ADAPTIVE : MUTEX_SPIN);
314
315 switch (type) {
316 case MUTEX_NODEBUG:
317 KASSERT(ipl == IPL_NONE);
318 id = LOCKDEBUG_ALLOC(mtx, NULL);
319 MUTEX_INITIALIZE_ADAPTIVE(mtx, id);
320 break;
321 case MUTEX_ADAPTIVE:
322 case MUTEX_DEFAULT:
323 KASSERT(ipl == IPL_NONE);
324 id = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops);
325 MUTEX_INITIALIZE_ADAPTIVE(mtx, id);
326 break;
327 case MUTEX_SPIN:
328 id = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops);
329 MUTEX_INITIALIZE_SPIN(mtx, id, ipl);
330 break;
331 default:
332 panic("mutex_init: impossible type");
333 break;
334 }
335 }
336
337 /*
338 * mutex_destroy:
339 *
340 * Tear down a mutex.
341 */
342 void
343 mutex_destroy(kmutex_t *mtx)
344 {
345
346 if (MUTEX_ADAPTIVE_P(mtx)) {
347 MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) &&
348 !MUTEX_HAS_WAITERS(mtx));
349 } else {
350 MUTEX_ASSERT(mtx, mtx->mtx_lock != __SIMPLELOCK_LOCKED);
351 }
352
353 LOCKDEBUG_FREE(mtx, MUTEX_GETID(mtx));
354 MUTEX_DESTROY(mtx);
355 }
356
357 /*
358 * mutex_onproc:
359 *
360 * Return true if an adaptive mutex owner is running on a CPU in the
361 * system. If the target is waiting on the kernel big lock, then we
362 * return false immediately. This is necessary to avoid deadlock
363 * against the big lock.
364 *
365 * Note that we can't use the mutex owner field as an LWP pointer. We
366 * don't have full control over the timing of our execution, and so the
367 * pointer could be completely invalid by the time we dereference it.
368 *
369 * XXX This should be optimised further to reduce potential cache line
370 * ping-ponging and skewing of the spin time while busy waiting.
371 */
372 #ifdef MULTIPROCESSOR
373 int
374 mutex_onproc(uintptr_t owner, struct cpu_info **cip)
375 {
376 CPU_INFO_ITERATOR cii;
377 struct cpu_info *ci;
378 struct lwp *l;
379
380 if (!MUTEX_OWNED(owner))
381 return 0;
382 l = (struct lwp *)MUTEX_OWNER(owner);
383
384 if ((ci = *cip) != NULL && ci->ci_curlwp == l)
385 return ci->ci_biglock_wanted != l;
386
387 for (CPU_INFO_FOREACH(cii, ci)) {
388 if (ci->ci_curlwp == l) {
389 *cip = ci;
390 return ci->ci_biglock_wanted != l;
391 }
392 }
393
394 *cip = NULL;
395 return 0;
396 }
397 #endif
398
399 /*
400 * mutex_vector_enter:
401 *
402 * Support routine for mutex_enter() that must handles all cases. In
403 * the LOCKDEBUG case, mutex_enter() is always aliased here, even if
404 * fast-path stubs are available. If an mutex_spin_enter() stub is
405 * not available, then it is also aliased directly here.
406 */
407 void
408 mutex_vector_enter(kmutex_t *mtx)
409 {
410 uintptr_t owner, curthread;
411 turnstile_t *ts;
412 #ifdef MULTIPROCESSOR
413 struct cpu_info *ci = NULL;
414 u_int count;
415 #endif
416 LOCKSTAT_COUNTER(spincnt);
417 LOCKSTAT_COUNTER(slpcnt);
418 LOCKSTAT_TIMER(spintime);
419 LOCKSTAT_TIMER(slptime);
420 LOCKSTAT_FLAG(lsflag);
421
422 /*
423 * Handle spin mutexes.
424 */
425 if (MUTEX_SPIN_P(mtx)) {
426 #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR)
427 u_int spins = 0;
428 #endif
429 MUTEX_SPIN_SPLRAISE(mtx);
430 MUTEX_WANTLOCK(mtx);
431 #ifdef FULL
432 if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
433 MUTEX_LOCKED(mtx);
434 return;
435 }
436 #if !defined(MULTIPROCESSOR)
437 MUTEX_ABORT(mtx, "locking against myself");
438 #else /* !MULTIPROCESSOR */
439
440 LOCKSTAT_ENTER(lsflag);
441 LOCKSTAT_START_TIMER(lsflag, spintime);
442 count = SPINLOCK_BACKOFF_MIN;
443
444 /*
445 * Spin testing the lock word and do exponential backoff
446 * to reduce cache line ping-ponging between CPUs.
447 */
448 do {
449 if (panicstr != NULL)
450 break;
451 while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) {
452 SPINLOCK_BACKOFF(count);
453 #ifdef LOCKDEBUG
454 if (SPINLOCK_SPINOUT(spins))
455 MUTEX_ABORT(mtx, "spinout");
456 #endif /* LOCKDEBUG */
457 }
458 } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
459
460 if (count != SPINLOCK_BACKOFF_MIN) {
461 LOCKSTAT_STOP_TIMER(lsflag, spintime);
462 LOCKSTAT_EVENT(lsflag, mtx,
463 LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
464 }
465 LOCKSTAT_EXIT(lsflag);
466 #endif /* !MULTIPROCESSOR */
467 #endif /* FULL */
468 MUTEX_LOCKED(mtx);
469 return;
470 }
471
472 curthread = (uintptr_t)curlwp;
473
474 MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
475 MUTEX_ASSERT(mtx, curthread != 0);
476 MUTEX_WANTLOCK(mtx);
477
478 #ifdef LOCKDEBUG
479 if (panicstr == NULL) {
480 simple_lock_only_held(NULL, "mutex_enter");
481 #ifdef MULTIPROCESSOR
482 LOCKDEBUG_BARRIER(&kernel_lock, 1);
483 #else
484 LOCKDEBUG_BARRIER(NULL, 1);
485 #endif
486 }
487 #endif
488
489 LOCKSTAT_ENTER(lsflag);
490
491 /*
492 * Adaptive mutex; spin trying to acquire the mutex. If we
493 * determine that the owner is not running on a processor,
494 * then we stop spinning, and sleep instead.
495 */
496 for (;;) {
497 owner = mtx->mtx_owner;
498 if (!MUTEX_OWNED(owner)) {
499 /*
500 * Mutex owner clear could mean two things:
501 *
502 * * The mutex has been released.
503 * * The owner field hasn't been set yet.
504 *
505 * Try to acquire it again. If that fails,
506 * we'll just loop again.
507 */
508 if (MUTEX_ACQUIRE(mtx, curthread))
509 break;
510 continue;
511 }
512
513 if (panicstr != NULL)
514 return;
515 if (MUTEX_OWNER(owner) == curthread)
516 MUTEX_ABORT(mtx, "locking against myself");
517
518 #ifdef MULTIPROCESSOR
519 /*
520 * Check to see if the owner is running on a processor.
521 * If so, then we should just spin, as the owner will
522 * likely release the lock very soon.
523 */
524 if (mutex_onproc(owner, &ci)) {
525 LOCKSTAT_START_TIMER(lsflag, spintime);
526 count = SPINLOCK_BACKOFF_MIN;
527 for (;;) {
528 owner = mtx->mtx_owner;
529 if (!mutex_onproc(owner, &ci))
530 break;
531 SPINLOCK_BACKOFF(count);
532 }
533 LOCKSTAT_STOP_TIMER(lsflag, spintime);
534 LOCKSTAT_COUNT(spincnt, 1);
535 if (!MUTEX_OWNED(owner))
536 continue;
537 }
538 #endif
539
540 ts = turnstile_lookup(mtx);
541
542 /*
543 * Once we have the turnstile chain interlock, mark the
544 * mutex has having waiters. If that fails, spin again:
545 * chances are that the mutex has been released.
546 */
547 if (!MUTEX_SET_WAITERS(mtx, owner)) {
548 turnstile_exit(mtx);
549 continue;
550 }
551
552 #ifdef MULTIPROCESSOR
553 /*
554 * mutex_exit() is permitted to release the mutex without
555 * any interlocking instructions, and the following can
556 * occur as a result:
557 *
558 * CPU 1: MUTEX_SET_WAITERS() CPU2: mutex_exit()
559 * ---------------------------- ----------------------------
560 * .. acquire cache line
561 * .. test for waiters
562 * acquire cache line <- lose cache line
563 * lock cache line ..
564 * verify mutex is held ..
565 * set waiters ..
566 * unlock cache line ..
567 * lose cache line -> acquire cache line
568 * .. clear lock word, waiters
569 * return success
570 *
571 * There is a another race that can occur: a third CPU could
572 * acquire the mutex as soon as it is released. Since
573 * adaptive mutexes are primarily spin mutexes, this is not
574 * something that we need to worry about too much. What we
575 * do need to ensure is that the waiters bit gets set.
576 *
577 * To allow the unlocked release, we need to make some
578 * assumptions here:
579 *
580 * o Release is the only non-atomic/unlocked operation
581 * that can be performed on the mutex. (It must still
582 * be atomic on the local CPU, e.g. in case interrupted
583 * or preempted).
584 *
585 * o At any given time, MUTEX_SET_WAITERS() can only ever
586 * be in progress on one CPU in the system - guarenteed
587 * by the turnstile chain lock.
588 *
589 * o No other operations other than MUTEX_SET_WAITERS()
590 * and release can modify a mutex with a non-zero
591 * owner field.
592 *
593 * o The result of a successful MUTEX_SET_WAITERS() call
594 * is an unbuffered write that is immediately visible
595 * to all other processors in the system.
596 *
597 * o If the holding LWP switches away, it posts a store
598 * fence before changing curlwp, ensuring that any
599 * overwrite of the mutex waiters flag by mutex_exit()
600 * completes before the modification of curlwp becomes
601 * visible to this CPU.
602 *
603 * o cpu_switch() posts a store fence before setting curlwp
604 * and before resuming execution of an LWP.
605 *
606 * o _kernel_lock() posts a store fence before setting
607 * curcpu()->ci_biglock_wanted, and after clearing it.
608 * This ensures that any overwrite of the mutex waiters
609 * flag by mutex_exit() completes before the modification
610 * of ci_biglock_wanted becomes visible.
611 *
612 * We now post a read memory barrier (after setting the
613 * waiters field) and check the lock holder's status again.
614 * Some of the possible outcomes (not an exhaustive list):
615 *
616 * 1. The onproc check returns true: the holding LWP is
617 * running again. The lock may be released soon and
618 * we should spin. Importantly, we can't trust the
619 * value of the waiters flag.
620 *
621 * 2. The onproc check returns false: the holding LWP is
622 * not running. We now have the oppertunity to check
623 * if mutex_exit() has blatted the modifications made
624 * by MUTEX_SET_WAITERS().
625 *
626 * 3. The onproc check returns false: the holding LWP may
627 * or may not be running. It has context switched at
628 * some point during our check. Again, we have the
629 * chance to see if the waiters bit is still set or
630 * has been overwritten.
631 *
632 * 4. The onproc check returns false: the holding LWP is
633 * running on a CPU, but wants the big lock. It's OK
634 * to check the waiters field in this case.
635 *
636 * 5. The has-waiters check fails: the mutex has been
637 * released, the waiters flag cleared and another LWP
638 * now owns the mutex.
639 *
640 * 6. The has-waiters check fails: the mutex has been
641 * released.
642 *
643 * If the waiters bit is not set it's unsafe to go asleep,
644 * as we might never be awoken.
645 */
646 mb_read();
647 if (mutex_onproc(owner, &ci) || !MUTEX_HAS_WAITERS(mtx)) {
648 turnstile_exit(mtx);
649 continue;
650 }
651 #endif /* MULTIPROCESSOR */
652
653 LOCKSTAT_START_TIMER(lsflag, slptime);
654
655 turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj);
656
657 LOCKSTAT_STOP_TIMER(lsflag, slptime);
658 LOCKSTAT_COUNT(slpcnt, 1);
659
660 turnstile_unblock();
661 }
662
663 LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1,
664 slpcnt, slptime);
665 LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN,
666 spincnt, spintime);
667 LOCKSTAT_EXIT(lsflag);
668
669 MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
670 MUTEX_LOCKED(mtx);
671 }
672
673 /*
674 * mutex_vector_exit:
675 *
676 * Support routine for mutex_exit() that handles all cases.
677 */
678 void
679 mutex_vector_exit(kmutex_t *mtx)
680 {
681 turnstile_t *ts;
682 uintptr_t curthread;
683
684 if (MUTEX_SPIN_P(mtx)) {
685 #ifdef FULL
686 if (mtx->mtx_lock != __SIMPLELOCK_LOCKED)
687 MUTEX_ABORT(mtx, "exiting unheld spin mutex");
688 MUTEX_UNLOCKED(mtx);
689 __cpu_simple_unlock(&mtx->mtx_lock);
690 #endif
691 MUTEX_SPIN_SPLRESTORE(mtx);
692 return;
693 }
694
695 if (__predict_false((uintptr_t)panicstr | cold)) {
696 MUTEX_UNLOCKED(mtx);
697 MUTEX_RELEASE(mtx);
698 return;
699 }
700
701 curthread = (uintptr_t)curlwp;
702 MUTEX_DASSERT(mtx, curthread != 0);
703 MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
704 MUTEX_UNLOCKED(mtx);
705
706 /*
707 * Get this lock's turnstile. This gets the interlock on
708 * the sleep queue. Once we have that, we can clear the
709 * lock. If there was no turnstile for the lock, there
710 * were no waiters remaining.
711 */
712 ts = turnstile_lookup(mtx);
713
714 if (ts == NULL) {
715 MUTEX_RELEASE(mtx);
716 turnstile_exit(mtx);
717 } else {
718 MUTEX_RELEASE(mtx);
719 turnstile_wakeup(ts, TS_WRITER_Q,
720 TS_WAITERS(ts, TS_WRITER_Q), NULL);
721 }
722 }
723
724 #ifndef __HAVE_SIMPLE_MUTEXES
725 /*
726 * mutex_wakeup:
727 *
728 * Support routine for mutex_exit() that wakes up all waiters.
729 * We assume that the mutex has been released, but it need not
730 * be.
731 */
732 void
733 mutex_wakeup(kmutex_t *mtx)
734 {
735 turnstile_t *ts;
736
737 ts = turnstile_lookup(mtx);
738 if (ts == NULL) {
739 turnstile_exit(mtx);
740 return;
741 }
742 MUTEX_CLEAR_WAITERS(mtx);
743 turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL);
744 }
745 #endif /* !__HAVE_SIMPLE_MUTEXES */
746
747 /*
748 * mutex_owned:
749 *
750 * Return true if the current LWP (adaptive) or CPU (spin)
751 * holds the mutex.
752 */
753 int
754 mutex_owned(kmutex_t *mtx)
755 {
756
757 if (MUTEX_ADAPTIVE_P(mtx))
758 return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp;
759 #ifdef FULL
760 return mtx->mtx_lock == __SIMPLELOCK_LOCKED;
761 #else
762 return 1;
763 #endif
764 }
765
766 /*
767 * mutex_owner:
768 *
769 * Return the current owner of an adaptive mutex. Used for
770 * priority inheritance.
771 */
772 static struct lwp *
773 mutex_owner(wchan_t obj)
774 {
775 kmutex_t *mtx = (void *)(uintptr_t)obj; /* discard qualifiers */
776
777 MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
778 return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner);
779 }
780
781 /*
782 * mutex_tryenter:
783 *
784 * Try to acquire the mutex; return non-zero if we did.
785 */
786 int
787 mutex_tryenter(kmutex_t *mtx)
788 {
789 uintptr_t curthread;
790
791 /*
792 * Handle spin mutexes.
793 */
794 if (MUTEX_SPIN_P(mtx)) {
795 MUTEX_SPIN_SPLRAISE(mtx);
796 #ifdef FULL
797 if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
798 MUTEX_WANTLOCK(mtx);
799 MUTEX_LOCKED(mtx);
800 return 1;
801 }
802 MUTEX_SPIN_SPLRESTORE(mtx);
803 #else
804 MUTEX_WANTLOCK(mtx);
805 MUTEX_LOCKED(mtx);
806 return 1;
807 #endif
808 } else {
809 curthread = (uintptr_t)curlwp;
810 MUTEX_ASSERT(mtx, curthread != 0);
811 if (MUTEX_ACQUIRE(mtx, curthread)) {
812 MUTEX_WANTLOCK(mtx);
813 MUTEX_LOCKED(mtx);
814 MUTEX_DASSERT(mtx,
815 MUTEX_OWNER(mtx->mtx_owner) == curthread);
816 return 1;
817 }
818 }
819
820 return 0;
821 }
822
823 #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL)
824 /*
825 * mutex_spin_retry:
826 *
827 * Support routine for mutex_spin_enter(). Assumes that the caller
828 * has already raised the SPL, and adjusted counters.
829 */
830 void
831 mutex_spin_retry(kmutex_t *mtx)
832 {
833 #ifdef MULTIPROCESSOR
834 u_int count;
835 LOCKSTAT_TIMER(spintime);
836 LOCKSTAT_FLAG(lsflag);
837 #ifdef LOCKDEBUG
838 u_int spins = 0;
839 #endif /* LOCKDEBUG */
840
841 MUTEX_WANTLOCK(mtx);
842
843 LOCKSTAT_ENTER(lsflag);
844 LOCKSTAT_START_TIMER(lsflag, spintime);
845 count = SPINLOCK_BACKOFF_MIN;
846
847 /*
848 * Spin testing the lock word and do exponential backoff
849 * to reduce cache line ping-ponging between CPUs.
850 */
851 do {
852 if (panicstr != NULL)
853 break;
854 while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) {
855 SPINLOCK_BACKOFF(count);
856 #ifdef LOCKDEBUG
857 if (SPINLOCK_SPINOUT(spins))
858 MUTEX_ABORT(mtx, "spinout");
859 #endif /* LOCKDEBUG */
860 }
861 } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
862
863 LOCKSTAT_STOP_TIMER(lsflag, spintime);
864 LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
865 LOCKSTAT_EXIT(lsflag);
866
867 MUTEX_LOCKED(mtx);
868 #else /* MULTIPROCESSOR */
869 MUTEX_ABORT(mtx, "locking against myself");
870 #endif /* MULTIPROCESSOR */
871 }
872 #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */
873
874 /*
875 * sched_lock_idle:
876 *
877 * XXX Ugly hack for cpu_switch().
878 */
879 void
880 sched_lock_idle(void)
881 {
882 #ifdef FULL
883 kmutex_t *mtx = &sched_mutex;
884
885 curcpu()->ci_mtx_count--;
886
887 if (!__cpu_simple_lock_try(&mtx->mtx_lock)) {
888 mutex_spin_retry(mtx);
889 return;
890 }
891
892 MUTEX_LOCKED(mtx);
893 #else
894 curcpu()->ci_mtx_count--;
895 #endif /* FULL */
896 }
897
898 /*
899 * sched_unlock_idle:
900 *
901 * XXX Ugly hack for cpu_switch().
902 */
903 void
904 sched_unlock_idle(void)
905 {
906 #ifdef FULL
907 kmutex_t *mtx = &sched_mutex;
908
909 if (mtx->mtx_lock != __SIMPLELOCK_LOCKED)
910 MUTEX_ABORT(mtx, "sched_mutex not locked");
911
912 MUTEX_UNLOCKED(mtx);
913 __cpu_simple_unlock(&mtx->mtx_lock);
914 #endif /* FULL */
915 curcpu()->ci_mtx_count++;
916 }
917