kern_mutex.c revision 1.10 1 /* $NetBSD: kern_mutex.c,v 1.10 2007/03/09 14:08:26 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Kernel mutex implementation, modeled after those found in Solaris,
41 * a description of which can be found in:
42 *
43 * Solaris Internals: Core Kernel Architecture, Jim Mauro and
44 * Richard McDougall.
45 */
46
47 #include "opt_multiprocessor.h"
48
49 #define __MUTEX_PRIVATE
50
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.10 2007/03/09 14:08:26 ad Exp $");
53
54 #include <sys/param.h>
55 #include <sys/proc.h>
56 #include <sys/mutex.h>
57 #include <sys/sched.h>
58 #include <sys/sleepq.h>
59 #include <sys/systm.h>
60 #include <sys/lockdebug.h>
61 #include <sys/kernel.h>
62
63 #include <dev/lockstat.h>
64
65 #include <machine/intr.h>
66
67 /*
68 * When not running a debug kernel, spin mutexes are not much
69 * more than an splraiseipl() and splx() pair.
70 */
71
72 #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
73 #define FULL
74 #endif
75
76 /*
77 * Debugging support.
78 */
79
80 #define MUTEX_WANTLOCK(mtx) \
81 LOCKDEBUG_WANTLOCK(MUTEX_GETID(mtx), \
82 (uintptr_t)__builtin_return_address(0), 0)
83 #define MUTEX_LOCKED(mtx) \
84 LOCKDEBUG_LOCKED(MUTEX_GETID(mtx), \
85 (uintptr_t)__builtin_return_address(0), 0)
86 #define MUTEX_UNLOCKED(mtx) \
87 LOCKDEBUG_UNLOCKED(MUTEX_GETID(mtx), \
88 (uintptr_t)__builtin_return_address(0), 0)
89 #define MUTEX_ABORT(mtx, msg) \
90 mutex_abort(mtx, __FUNCTION__, msg)
91
92 #if defined(LOCKDEBUG)
93
94 #define MUTEX_DASSERT(mtx, cond) \
95 do { \
96 if (!(cond)) \
97 MUTEX_ABORT(mtx, "assertion failed: " #cond); \
98 } while (/* CONSTCOND */ 0);
99
100 #else /* LOCKDEBUG */
101
102 #define MUTEX_DASSERT(mtx, cond) /* nothing */
103
104 #endif /* LOCKDEBUG */
105
106 #if defined(DIAGNOSTIC)
107
108 #define MUTEX_ASSERT(mtx, cond) \
109 do { \
110 if (!(cond)) \
111 MUTEX_ABORT(mtx, "assertion failed: " #cond); \
112 } while (/* CONSTCOND */ 0)
113
114 #else /* DIAGNOSTIC */
115
116 #define MUTEX_ASSERT(mtx, cond) /* nothing */
117
118 #endif /* DIAGNOSTIC */
119
120 /*
121 * Spin mutex SPL save / restore.
122 */
123
124 #define MUTEX_SPIN_SPLRAISE(mtx) \
125 do { \
126 struct cpu_info *x__ci = curcpu(); \
127 int x__cnt, s; \
128 x__cnt = x__ci->ci_mtx_count--; \
129 s = splraiseipl(mtx->mtx_ipl); \
130 if (x__cnt == 0) \
131 x__ci->ci_mtx_oldspl = (s); \
132 } while (/* CONSTCOND */ 0)
133
134 #define MUTEX_SPIN_SPLRESTORE(mtx) \
135 do { \
136 struct cpu_info *x__ci = curcpu(); \
137 int s = x__ci->ci_mtx_oldspl; \
138 __insn_barrier(); \
139 if (++(x__ci->ci_mtx_count) == 0) \
140 splx(s); \
141 } while (/* CONSTCOND */ 0)
142
143 /*
144 * For architectures that provide 'simple' mutexes: they provide a
145 * CAS function that is either MP-safe, or does not need to be MP
146 * safe. Adaptive mutexes on these architectures do not require an
147 * additional interlock.
148 */
149
150 #ifdef __HAVE_SIMPLE_MUTEXES
151
152 #define MUTEX_OWNER(owner) \
153 (owner & MUTEX_THREAD)
154 #define MUTEX_OWNED(owner) \
155 (owner != 0)
156 #define MUTEX_HAS_WAITERS(mtx) \
157 (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0)
158
159 #define MUTEX_INITIALIZE_ADAPTIVE(mtx, id) \
160 do { \
161 (mtx)->mtx_id = (id); \
162 } while (/* CONSTCOND */ 0);
163
164 #define MUTEX_INITIALIZE_SPIN(mtx, id, ipl) \
165 do { \
166 (mtx)->mtx_owner = MUTEX_BIT_SPIN; \
167 (mtx)->mtx_ipl = makeiplcookie((ipl)); \
168 (mtx)->mtx_id = (id); \
169 __cpu_simple_lock_init(&(mtx)->mtx_lock); \
170 } while (/* CONSTCOND */ 0)
171
172 #define MUTEX_DESTROY(mtx) \
173 do { \
174 (mtx)->mtx_owner = MUTEX_THREAD; \
175 (mtx)->mtx_id = -1; \
176 } while (/* CONSTCOND */ 0);
177
178 #define MUTEX_SPIN_P(mtx) \
179 (((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0)
180 #define MUTEX_ADAPTIVE_P(mtx) \
181 (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0)
182
183 #define MUTEX_GETID(mtx) ((mtx)->mtx_id)
184
185 static inline int
186 MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
187 {
188 int rv;
189 rv = MUTEX_CAS(&mtx->mtx_owner, 0UL, curthread);
190 MUTEX_RECEIVE(mtx);
191 return rv;
192 }
193
194 static inline int
195 MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
196 {
197 int rv;
198 rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS);
199 MUTEX_RECEIVE(mtx);
200 return rv;
201 }
202
203 static inline void
204 MUTEX_RELEASE(kmutex_t *mtx)
205 {
206 MUTEX_GIVE(mtx);
207 mtx->mtx_owner = 0;
208 }
209
210 static inline void
211 MUTEX_CLEAR_WAITERS(kmutex_t *mtx)
212 {
213 /* nothing */
214 }
215 #endif /* __HAVE_SIMPLE_MUTEXES */
216
217 /*
218 * Patch in stubs via strong alias where they are not available.
219 */
220
221 #if defined(LOCKDEBUG)
222 #undef __HAVE_MUTEX_STUBS
223 #undef __HAVE_SPIN_MUTEX_STUBS
224 #endif
225
226 #ifndef __HAVE_MUTEX_STUBS
227 __strong_alias(mutex_enter,mutex_vector_enter);
228 __strong_alias(mutex_exit,mutex_vector_exit);
229 #endif
230
231 #ifndef __HAVE_SPIN_MUTEX_STUBS
232 __strong_alias(mutex_spin_enter,mutex_vector_enter);
233 __strong_alias(mutex_spin_exit,mutex_vector_exit);
234 #endif
235
236 void mutex_abort(kmutex_t *, const char *, const char *);
237 void mutex_dump(volatile void *);
238 int mutex_onproc(uintptr_t, struct cpu_info **);
239 static struct lwp *mutex_owner(wchan_t);
240
241 lockops_t mutex_spin_lockops = {
242 "Mutex",
243 0,
244 mutex_dump
245 };
246
247 lockops_t mutex_adaptive_lockops = {
248 "Mutex",
249 1,
250 mutex_dump
251 };
252
253 syncobj_t mutex_syncobj = {
254 SOBJ_SLEEPQ_SORTED,
255 turnstile_unsleep,
256 turnstile_changepri,
257 sleepq_lendpri,
258 mutex_owner,
259 };
260
261 /*
262 * mutex_dump:
263 *
264 * Dump the contents of a mutex structure.
265 */
266 void
267 mutex_dump(volatile void *cookie)
268 {
269 volatile kmutex_t *mtx = cookie;
270
271 printf_nolog("owner field : %#018lx wait/spin: %16d/%d\n",
272 (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx),
273 MUTEX_SPIN_P(mtx));
274 }
275
276 /*
277 * mutex_abort:
278 *
279 * Dump information about an error and panic the system. This
280 * generates a lot of machine code in the DIAGNOSTIC case, so
281 * we ask the compiler to not inline it.
282 */
283
284 #if __GNUC_PREREQ__(3, 0)
285 __attribute ((noinline)) __attribute ((noreturn))
286 #endif
287 void
288 mutex_abort(kmutex_t *mtx, const char *func, const char *msg)
289 {
290
291 LOCKDEBUG_ABORT(MUTEX_GETID(mtx), mtx, (MUTEX_SPIN_P(mtx) ?
292 &mutex_spin_lockops : &mutex_adaptive_lockops), func, msg);
293 /* NOTREACHED */
294 }
295
296 /*
297 * mutex_init:
298 *
299 * Initialize a mutex for use. Note that adaptive mutexes are in
300 * essence spin mutexes that can sleep to avoid deadlock and wasting
301 * CPU time. We can't easily provide a type of mutex that always
302 * sleeps - see comments in mutex_vector_enter() about releasing
303 * mutexes unlocked.
304 */
305 void
306 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
307 {
308 u_int id;
309
310 memset(mtx, 0, sizeof(*mtx));
311
312 if (type == MUTEX_DRIVER)
313 type = (ipl == IPL_NONE ? MUTEX_ADAPTIVE : MUTEX_SPIN);
314
315 switch (type) {
316 case MUTEX_ADAPTIVE:
317 case MUTEX_DEFAULT:
318 KASSERT(ipl == IPL_NONE);
319 id = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops);
320 MUTEX_INITIALIZE_ADAPTIVE(mtx, id);
321 break;
322 case MUTEX_SPIN:
323 id = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops);
324 MUTEX_INITIALIZE_SPIN(mtx, id, ipl);
325 break;
326 default:
327 panic("mutex_init: impossible type");
328 break;
329 }
330 }
331
332 /*
333 * mutex_destroy:
334 *
335 * Tear down a mutex.
336 */
337 void
338 mutex_destroy(kmutex_t *mtx)
339 {
340
341 if (MUTEX_ADAPTIVE_P(mtx)) {
342 MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) &&
343 !MUTEX_HAS_WAITERS(mtx));
344 } else {
345 MUTEX_ASSERT(mtx, mtx->mtx_lock != __SIMPLELOCK_LOCKED);
346 }
347
348 LOCKDEBUG_FREE(mtx, MUTEX_GETID(mtx));
349 MUTEX_DESTROY(mtx);
350 }
351
352 /*
353 * mutex_onproc:
354 *
355 * Return true if an adaptive mutex owner is running on a CPU in the
356 * system. If the target is waiting on the kernel big lock, then we
357 * return false immediately. This is necessary to avoid deadlock
358 * against the big lock.
359 *
360 * Note that we can't use the mutex owner field as an LWP pointer. We
361 * don't have full control over the timing of our execution, and so the
362 * pointer could be completely invalid by the time we dereference it.
363 *
364 * XXX This should be optimised further to reduce potential cache line
365 * ping-ponging and skewing of the spin time while busy waiting.
366 */
367 #ifdef MULTIPROCESSOR
368 int
369 mutex_onproc(uintptr_t owner, struct cpu_info **cip)
370 {
371 CPU_INFO_ITERATOR cii;
372 struct cpu_info *ci;
373 struct lwp *l;
374
375 if (!MUTEX_OWNED(owner))
376 return 0;
377 l = (struct lwp *)MUTEX_OWNER(owner);
378
379 if ((ci = *cip) != NULL && ci->ci_curlwp == l)
380 return ci->ci_biglock_wanted != l;
381
382 for (CPU_INFO_FOREACH(cii, ci)) {
383 if (ci->ci_curlwp == l) {
384 *cip = ci;
385 return ci->ci_biglock_wanted != l;
386 }
387 }
388
389 *cip = NULL;
390 return 0;
391 }
392 #endif
393
394 /*
395 * mutex_vector_enter:
396 *
397 * Support routine for mutex_enter() that must handles all cases. In
398 * the LOCKDEBUG case, mutex_enter() is always aliased here, even if
399 * fast-path stubs are available. If an mutex_spin_enter() stub is
400 * not available, then it is also aliased directly here.
401 */
402 void
403 mutex_vector_enter(kmutex_t *mtx)
404 {
405 uintptr_t owner, curthread;
406 turnstile_t *ts;
407 #ifdef MULTIPROCESSOR
408 struct cpu_info *ci = NULL;
409 u_int count;
410 #endif
411 LOCKSTAT_COUNTER(spincnt);
412 LOCKSTAT_COUNTER(slpcnt);
413 LOCKSTAT_TIMER(spintime);
414 LOCKSTAT_TIMER(slptime);
415 LOCKSTAT_FLAG(lsflag);
416
417 /*
418 * Handle spin mutexes.
419 */
420 if (MUTEX_SPIN_P(mtx)) {
421 #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR)
422 u_int spins = 0;
423 #endif
424 MUTEX_SPIN_SPLRAISE(mtx);
425 MUTEX_WANTLOCK(mtx);
426 #ifdef FULL
427 if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
428 MUTEX_LOCKED(mtx);
429 return;
430 }
431 #if !defined(MULTIPROCESSOR)
432 MUTEX_ABORT(mtx, "locking against myself");
433 #else /* !MULTIPROCESSOR */
434
435 LOCKSTAT_ENTER(lsflag);
436 LOCKSTAT_START_TIMER(lsflag, spintime);
437 count = SPINLOCK_BACKOFF_MIN;
438
439 /*
440 * Spin testing the lock word and do exponential backoff
441 * to reduce cache line ping-ponging between CPUs.
442 */
443 do {
444 if (panicstr != NULL)
445 break;
446 while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) {
447 SPINLOCK_BACKOFF(count);
448 #ifdef LOCKDEBUG
449 if (SPINLOCK_SPINOUT(spins))
450 MUTEX_ABORT(mtx, "spinout");
451 #endif /* LOCKDEBUG */
452 }
453 } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
454
455 if (count != SPINLOCK_BACKOFF_MIN) {
456 LOCKSTAT_STOP_TIMER(lsflag, spintime);
457 LOCKSTAT_EVENT(lsflag, mtx,
458 LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
459 }
460 LOCKSTAT_EXIT(lsflag);
461 #endif /* !MULTIPROCESSOR */
462 #endif /* FULL */
463 MUTEX_LOCKED(mtx);
464 return;
465 }
466
467 curthread = (uintptr_t)curlwp;
468
469 MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
470 MUTEX_ASSERT(mtx, curthread != 0);
471 MUTEX_WANTLOCK(mtx);
472
473 #ifdef LOCKDEBUG
474 if (panicstr == NULL) {
475 simple_lock_only_held(NULL, "mutex_enter");
476 #ifdef MULTIPROCESSOR
477 LOCKDEBUG_BARRIER(&kernel_lock, 1);
478 #else
479 LOCKDEBUG_BARRIER(NULL, 1);
480 #endif
481 }
482 #endif
483
484 LOCKSTAT_ENTER(lsflag);
485
486 /*
487 * Adaptive mutex; spin trying to acquire the mutex. If we
488 * determine that the owner is not running on a processor,
489 * then we stop spinning, and sleep instead.
490 */
491 for (;;) {
492 owner = mtx->mtx_owner;
493 if (!MUTEX_OWNED(owner)) {
494 /*
495 * Mutex owner clear could mean two things:
496 *
497 * * The mutex has been released.
498 * * The owner field hasn't been set yet.
499 *
500 * Try to acquire it again. If that fails,
501 * we'll just loop again.
502 */
503 if (MUTEX_ACQUIRE(mtx, curthread))
504 break;
505 continue;
506 }
507
508 if (panicstr != NULL)
509 return;
510 if (MUTEX_OWNER(owner) == curthread)
511 MUTEX_ABORT(mtx, "locking against myself");
512
513 #ifdef MULTIPROCESSOR
514 /*
515 * Check to see if the owner is running on a processor.
516 * If so, then we should just spin, as the owner will
517 * likely release the lock very soon.
518 */
519 if (mutex_onproc(owner, &ci)) {
520 LOCKSTAT_START_TIMER(lsflag, spintime);
521 count = SPINLOCK_BACKOFF_MIN;
522 for (;;) {
523 owner = mtx->mtx_owner;
524 if (!mutex_onproc(owner, &ci))
525 break;
526 SPINLOCK_BACKOFF(count);
527 }
528 LOCKSTAT_STOP_TIMER(lsflag, spintime);
529 LOCKSTAT_COUNT(spincnt, 1);
530 if (!MUTEX_OWNED(owner))
531 continue;
532 }
533 #endif
534
535 ts = turnstile_lookup(mtx);
536
537 /*
538 * Once we have the turnstile chain interlock, mark the
539 * mutex has having waiters. If that fails, spin again:
540 * chances are that the mutex has been released.
541 */
542 if (!MUTEX_SET_WAITERS(mtx, owner)) {
543 turnstile_exit(mtx);
544 continue;
545 }
546
547 #ifdef MULTIPROCESSOR
548 /*
549 * mutex_exit() is permitted to release the mutex without
550 * any interlocking instructions, and the following can
551 * occur as a result:
552 *
553 * CPU 1: MUTEX_SET_WAITERS() CPU2: mutex_exit()
554 * ---------------------------- ----------------------------
555 * .. acquire cache line
556 * .. test for waiters
557 * acquire cache line <- lose cache line
558 * lock cache line ..
559 * verify mutex is held ..
560 * set waiters ..
561 * unlock cache line ..
562 * lose cache line -> acquire cache line
563 * .. clear lock word, waiters
564 * return success
565 *
566 * There is a another race that can occur: a third CPU could
567 * acquire the mutex as soon as it is released. Since
568 * adaptive mutexes are primarily spin mutexes, this is not
569 * something that we need to worry about too much. What we
570 * do need to ensure is that the waiters bit gets set.
571 *
572 * To allow the unlocked release, we need to make some
573 * assumptions here:
574 *
575 * o Release is the only non-atomic/unlocked operation
576 * that can be performed on the mutex. (It must still
577 * be atomic on the local CPU, e.g. in case interrupted
578 * or preempted).
579 *
580 * o At any given time, MUTEX_SET_WAITERS() can only ever
581 * be in progress on one CPU in the system - guarenteed
582 * by the turnstile chain lock.
583 *
584 * o No other operations other than MUTEX_SET_WAITERS()
585 * and release can modify a mutex with a non-zero
586 * owner field.
587 *
588 * o The result of a successful MUTEX_SET_WAITERS() call
589 * is an unbuffered write that is immediately visible
590 * to all other processors in the system.
591 *
592 * o If the holding LWP switches away, it posts a store
593 * fence before changing curlwp, ensuring that any
594 * overwrite of the mutex waiters flag by mutex_exit()
595 * completes before the modification of curlwp becomes
596 * visible to this CPU.
597 *
598 * o cpu_switch() posts a store fence before setting curlwp
599 * and before resuming execution of an LWP.
600 *
601 * o _kernel_lock() posts a store fence before setting
602 * curcpu()->ci_biglock_wanted, and after clearing it.
603 * This ensures that any overwrite of the mutex waiters
604 * flag by mutex_exit() completes before the modification
605 * of ci_biglock_wanted becomes visible.
606 *
607 * We now post a read memory barrier (after setting the
608 * waiters field) and check the lock holder's status again.
609 * Some of the possible outcomes (not an exhaustive list):
610 *
611 * 1. The onproc check returns true: the holding LWP is
612 * running again. The lock may be released soon and
613 * we should spin. Importantly, we can't trust the
614 * value of the waiters flag.
615 *
616 * 2. The onproc check returns false: the holding LWP is
617 * not running. We now have the oppertunity to check
618 * if mutex_exit() has blatted the modifications made
619 * by MUTEX_SET_WAITERS().
620 *
621 * 3. The onproc check returns false: the holding LWP may
622 * or may not be running. It has context switched at
623 * some point during our check. Again, we have the
624 * chance to see if the waiters bit is still set or
625 * has been overwritten.
626 *
627 * 4. The onproc check returns false: the holding LWP is
628 * running on a CPU, but wants the big lock. It's OK
629 * to check the waiters field in this case.
630 *
631 * 5. The has-waiters check fails: the mutex has been
632 * released, the waiters flag cleared and another LWP
633 * now owns the mutex.
634 *
635 * 6. The has-waiters check fails: the mutex has been
636 * released.
637 *
638 * If the waiters bit is not set it's unsafe to go asleep,
639 * as we might never be awoken.
640 */
641 mb_read();
642 if (mutex_onproc(owner, &ci) || !MUTEX_HAS_WAITERS(mtx)) {
643 turnstile_exit(mtx);
644 continue;
645 }
646 #endif /* MULTIPROCESSOR */
647
648 LOCKSTAT_START_TIMER(lsflag, slptime);
649
650 turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj);
651
652 LOCKSTAT_STOP_TIMER(lsflag, slptime);
653 LOCKSTAT_COUNT(slpcnt, 1);
654
655 turnstile_unblock();
656 }
657
658 LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1,
659 slpcnt, slptime);
660 LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN,
661 spincnt, spintime);
662 LOCKSTAT_EXIT(lsflag);
663
664 MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
665 MUTEX_LOCKED(mtx);
666 }
667
668 /*
669 * mutex_vector_exit:
670 *
671 * Support routine for mutex_exit() that handles all cases.
672 */
673 void
674 mutex_vector_exit(kmutex_t *mtx)
675 {
676 turnstile_t *ts;
677 uintptr_t curthread;
678
679 if (MUTEX_SPIN_P(mtx)) {
680 #ifdef FULL
681 if (mtx->mtx_lock != __SIMPLELOCK_LOCKED)
682 MUTEX_ABORT(mtx, "exiting unheld spin mutex");
683 MUTEX_UNLOCKED(mtx);
684 __cpu_simple_unlock(&mtx->mtx_lock);
685 #endif
686 MUTEX_SPIN_SPLRESTORE(mtx);
687 return;
688 }
689
690 if (__predict_false(panicstr != NULL) || __predict_false(cold)) {
691 MUTEX_UNLOCKED(mtx);
692 MUTEX_RELEASE(mtx);
693 return;
694 }
695
696 curthread = (uintptr_t)curlwp;
697 MUTEX_DASSERT(mtx, curthread != 0);
698 MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
699 MUTEX_UNLOCKED(mtx);
700
701 /*
702 * Get this lock's turnstile. This gets the interlock on
703 * the sleep queue. Once we have that, we can clear the
704 * lock. If there was no turnstile for the lock, there
705 * were no waiters remaining.
706 */
707 ts = turnstile_lookup(mtx);
708
709 if (ts == NULL) {
710 MUTEX_RELEASE(mtx);
711 turnstile_exit(mtx);
712 } else {
713 MUTEX_RELEASE(mtx);
714 turnstile_wakeup(ts, TS_WRITER_Q,
715 TS_WAITERS(ts, TS_WRITER_Q), NULL);
716 }
717 }
718
719 #ifndef __HAVE_SIMPLE_MUTEXES
720 /*
721 * mutex_wakeup:
722 *
723 * Support routine for mutex_exit() that wakes up all waiters.
724 * We assume that the mutex has been released, but it need not
725 * be.
726 */
727 void
728 mutex_wakeup(kmutex_t *mtx)
729 {
730 turnstile_t *ts;
731
732 ts = turnstile_lookup(mtx);
733 if (ts == NULL) {
734 turnstile_exit(mtx);
735 return;
736 }
737 MUTEX_CLEAR_WAITERS(mtx);
738 turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL);
739 }
740 #endif /* !__HAVE_SIMPLE_MUTEXES */
741
742 /*
743 * mutex_owned:
744 *
745 * Return true if the current LWP (adaptive) or CPU (spin)
746 * holds the mutex.
747 */
748 int
749 mutex_owned(kmutex_t *mtx)
750 {
751
752 if (MUTEX_ADAPTIVE_P(mtx))
753 return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp;
754 #ifdef FULL
755 return mtx->mtx_lock == __SIMPLELOCK_LOCKED;
756 #else
757 return 1;
758 #endif
759 }
760
761 /*
762 * mutex_owner:
763 *
764 * Return the current owner of an adaptive mutex. Used for
765 * priority inheritance.
766 */
767 static struct lwp *
768 mutex_owner(wchan_t obj)
769 {
770 kmutex_t *mtx = (void *)(uintptr_t)obj; /* discard qualifiers */
771
772 MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
773 return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner);
774 }
775
776 /*
777 * mutex_tryenter:
778 *
779 * Try to acquire the mutex; return non-zero if we did.
780 */
781 int
782 mutex_tryenter(kmutex_t *mtx)
783 {
784 uintptr_t curthread;
785
786 /*
787 * Handle spin mutexes.
788 */
789 if (MUTEX_SPIN_P(mtx)) {
790 MUTEX_SPIN_SPLRAISE(mtx);
791 #ifdef FULL
792 if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
793 MUTEX_WANTLOCK(mtx);
794 MUTEX_LOCKED(mtx);
795 return 1;
796 }
797 MUTEX_SPIN_SPLRESTORE(mtx);
798 #else
799 MUTEX_WANTLOCK(mtx);
800 MUTEX_LOCKED(mtx);
801 return 1;
802 #endif
803 } else {
804 curthread = (uintptr_t)curlwp;
805 MUTEX_ASSERT(mtx, curthread != 0);
806 if (MUTEX_ACQUIRE(mtx, curthread)) {
807 MUTEX_WANTLOCK(mtx);
808 MUTEX_LOCKED(mtx);
809 MUTEX_DASSERT(mtx,
810 MUTEX_OWNER(mtx->mtx_owner) == curthread);
811 return 1;
812 }
813 }
814
815 return 0;
816 }
817
818 #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL)
819 /*
820 * mutex_spin_retry:
821 *
822 * Support routine for mutex_spin_enter(). Assumes that the caller
823 * has already raised the SPL, and adjusted counters.
824 */
825 void
826 mutex_spin_retry(kmutex_t *mtx)
827 {
828 #ifdef MULTIPROCESSOR
829 u_int count;
830 LOCKSTAT_TIMER(spintime);
831 LOCKSTAT_FLAG(lsflag);
832 #ifdef LOCKDEBUG
833 u_int spins = 0;
834 #endif /* LOCKDEBUG */
835
836 MUTEX_WANTLOCK(mtx);
837
838 LOCKSTAT_ENTER(lsflag);
839 LOCKSTAT_START_TIMER(lsflag, spintime);
840 count = SPINLOCK_BACKOFF_MIN;
841
842 /*
843 * Spin testing the lock word and do exponential backoff
844 * to reduce cache line ping-ponging between CPUs.
845 */
846 do {
847 if (panicstr != NULL)
848 break;
849 while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) {
850 SPINLOCK_BACKOFF(count);
851 #ifdef LOCKDEBUG
852 if (SPINLOCK_SPINOUT(spins))
853 MUTEX_ABORT(mtx, "spinout");
854 #endif /* LOCKDEBUG */
855 }
856 } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
857
858 LOCKSTAT_STOP_TIMER(lsflag, spintime);
859 LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
860 LOCKSTAT_EXIT(lsflag);
861
862 MUTEX_LOCKED(mtx);
863 #else /* MULTIPROCESSOR */
864 MUTEX_ABORT(mtx, "locking against myself");
865 #endif /* MULTIPROCESSOR */
866 }
867 #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */
868
869 /*
870 * sched_lock_idle:
871 *
872 * XXX Ugly hack for cpu_switch().
873 */
874 void
875 sched_lock_idle(void)
876 {
877 #ifdef FULL
878 kmutex_t *mtx = &sched_mutex;
879
880 curcpu()->ci_mtx_count--;
881
882 if (!__cpu_simple_lock_try(&mtx->mtx_lock)) {
883 mutex_spin_retry(mtx);
884 return;
885 }
886
887 MUTEX_LOCKED(mtx);
888 #else
889 curcpu()->ci_mtx_count--;
890 #endif /* FULL */
891 }
892
893 /*
894 * sched_unlock_idle:
895 *
896 * XXX Ugly hack for cpu_switch().
897 */
898 void
899 sched_unlock_idle(void)
900 {
901 #ifdef FULL
902 kmutex_t *mtx = &sched_mutex;
903
904 if (mtx->mtx_lock != __SIMPLELOCK_LOCKED)
905 MUTEX_ABORT(mtx, "sched_mutex not locked");
906
907 MUTEX_UNLOCKED(mtx);
908 __cpu_simple_unlock(&mtx->mtx_lock);
909 #endif /* FULL */
910 curcpu()->ci_mtx_count++;
911 }
912