kern_mutex.c revision 1.2 1 /* $NetBSD: kern_mutex.c,v 1.2 2007/02/09 21:55:30 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Kernel mutex implementation, modeled after those found in Solaris,
41 * a description of which can be found in:
42 *
43 * Solaris Internals: Core Kernel Architecture, Jim Mauro and
44 * Richard McDougall.
45 */
46
47 #include "opt_multiprocessor.h"
48
49 #define __MUTEX_PRIVATE
50
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.2 2007/02/09 21:55:30 ad Exp $");
53
54 #include <sys/param.h>
55 #include <sys/proc.h>
56 #include <sys/mutex.h>
57 #include <sys/sched.h>
58 #include <sys/sleepq.h>
59 #include <sys/systm.h>
60 #include <sys/lockdebug.h>
61 #include <sys/kernel.h>
62
63 #include <dev/lockstat.h>
64
65 #include <machine/intr.h>
66
67 /*
68 * When not running a debug kernel, spin mutexes are not much
69 * more than an splraiseipl() and splx() pair.
70 */
71
72 #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
73 #define FULL
74 #endif
75
76 /*
77 * Debugging support.
78 */
79
80 #define MUTEX_WANTLOCK(mtx) \
81 LOCKDEBUG_WANTLOCK(MUTEX_GETID(mtx), \
82 (uintptr_t)__builtin_return_address(0), 0)
83 #define MUTEX_LOCKED(mtx) \
84 LOCKDEBUG_LOCKED(MUTEX_GETID(mtx), \
85 (uintptr_t)__builtin_return_address(0), 0)
86 #define MUTEX_UNLOCKED(mtx) \
87 LOCKDEBUG_UNLOCKED(MUTEX_GETID(mtx), \
88 (uintptr_t)__builtin_return_address(0), 0)
89 #define MUTEX_ABORT(mtx, msg) \
90 mutex_abort(mtx, __FUNCTION__, msg)
91
92 #if defined(LOCKDEBUG)
93
94 #define MUTEX_DASSERT(mtx, cond) \
95 do { \
96 if (!(cond)) \
97 MUTEX_ABORT(mtx, "assertion failed: " #cond); \
98 } while (/* CONSTCOND */ 0);
99
100 #else /* LOCKDEBUG */
101
102 #define MUTEX_DASSERT(mtx, cond) /* nothing */
103
104 #endif /* LOCKDEBUG */
105
106 #if defined(DIAGNOSTIC)
107
108 #define MUTEX_ASSERT(mtx, cond) \
109 do { \
110 if (!(cond)) \
111 MUTEX_ABORT(mtx, "assertion failed: " #cond); \
112 } while (/* CONSTCOND */ 0)
113
114 #else /* DIAGNOSTIC */
115
116 #define MUTEX_ASSERT(mtx, cond) /* nothing */
117
118 #endif /* DIAGNOSTIC */
119
120 /*
121 * Spin mutex SPL save / restore.
122 */
123
124 #define MUTEX_SPIN_SPLRAISE(mtx) \
125 do { \
126 struct cpu_info *x__ci = curcpu(); \
127 int x__cnt, s; \
128 x__cnt = x__ci->ci_mtx_count--; \
129 s = splraiseipl(mtx->mtx_ipl); \
130 if (x__cnt == 0) \
131 x__ci->ci_mtx_oldspl = (s); \
132 } while (/* CONSTCOND */ 0)
133
134 #define MUTEX_SPIN_SPLRESTORE(mtx) \
135 do { \
136 struct cpu_info *x__ci = curcpu(); \
137 int s = x__ci->ci_mtx_oldspl; \
138 __insn_barrier(); \
139 if (++(x__ci->ci_mtx_count) == 0) \
140 splx(s); \
141 } while (/* CONSTCOND */ 0)
142
143 /*
144 * For architectures that provide 'simple' mutexes: they provide a
145 * CAS function that is either MP-safe, or does not need to be MP
146 * safe. Adaptive mutexes on these architectures do not require an
147 * additional interlock.
148 */
149
150 #ifdef __HAVE_SIMPLE_MUTEXES
151
152 #define MUTEX_OWNER(owner) \
153 (owner & MUTEX_THREAD)
154 #define MUTEX_OWNED(owner) \
155 (owner != 0)
156 #define MUTEX_HAS_WAITERS(mtx) \
157 (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0)
158
159 #define MUTEX_INITIALIZE_ADAPTIVE(mtx, id) \
160 do { \
161 (mtx)->mtx_id = (id); \
162 } while (/* CONSTCOND */ 0);
163
164 #define MUTEX_INITIALIZE_SPIN(mtx, id, ipl) \
165 do { \
166 (mtx)->mtx_owner = MUTEX_BIT_SPIN; \
167 (mtx)->mtx_ipl = makeiplcookie((ipl)); \
168 (mtx)->mtx_id = (id); \
169 __cpu_simple_lock_init(&(mtx)->mtx_lock); \
170 } while (/* CONSTCOND */ 0)
171
172 #define MUTEX_DESTROY(mtx) \
173 do { \
174 (mtx)->mtx_owner = MUTEX_THREAD; \
175 (mtx)->mtx_id = -1; \
176 } while (/* CONSTCOND */ 0);
177
178 #define MUTEX_SPIN_P(mtx) \
179 (((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0)
180 #define MUTEX_ADAPTIVE_P(mtx) \
181 (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0)
182
183 #define MUTEX_GETID(mtx) ((mtx)->mtx_id)
184
185 static inline int
186 MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
187 {
188 int rv;
189 rv = MUTEX_CAS(&mtx->mtx_owner, 0UL, curthread);
190 MUTEX_RECEIVE();
191 return rv;
192 }
193
194 static inline int
195 MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
196 {
197 int rv;
198 rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS);
199 MUTEX_RECEIVE();
200 return rv;
201 }
202
203 static inline void
204 MUTEX_RELEASE(kmutex_t *mtx)
205 {
206 MUTEX_GIVE();
207 mtx->mtx_owner = 0;
208 }
209 #endif /* __HAVE_SIMPLE_MUTEXES */
210
211 /*
212 * Patch in stubs via strong alias where they are not available.
213 */
214
215 #if defined(LOCKDEBUG)
216 #undef __HAVE_MUTEX_STUBS
217 #undef __HAVE_SPIN_MUTEX_STUBS
218 #endif
219
220 #ifndef __HAVE_MUTEX_STUBS
221 __strong_alias(mutex_enter, mutex_vector_enter);
222 __strong_alias(mutex_exit, mutex_vector_exit);
223 #endif
224
225 #ifndef __HAVE_SPIN_MUTEX_STUBS
226 __strong_alias(mutex_spin_enter, mutex_vector_enter);
227 __strong_alias(mutex_spin_exit, mutex_vector_exit);
228 #endif
229
230 void mutex_abort(kmutex_t *, const char *, const char *);
231 void mutex_dump(volatile void *);
232 int mutex_onproc(uintptr_t, struct cpu_info **);
233
234 lockops_t mutex_spin_lockops = {
235 "Mutex",
236 0,
237 mutex_dump
238 };
239
240 lockops_t mutex_adaptive_lockops = {
241 "Mutex",
242 1,
243 mutex_dump
244 };
245
246 /*
247 * mutex_dump:
248 *
249 * Dump the contents of a mutex structure.
250 */
251 void
252 mutex_dump(volatile void *cookie)
253 {
254 volatile kmutex_t *mtx = cookie;
255
256 printf_nolog("owner field : %#018lx wait/spin: %16d/%d\n",
257 (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx),
258 MUTEX_SPIN_P(mtx));
259 }
260
261 /*
262 * mutex_abort:
263 *
264 * Dump information about an error and panic the system.
265 */
266 __attribute ((noinline)) __attribute ((noreturn)) void
267 mutex_abort(kmutex_t *mtx, const char *func, const char *msg)
268 {
269
270 LOCKDEBUG_ABORT(MUTEX_GETID(mtx), mtx, (MUTEX_SPIN_P(mtx) ?
271 &mutex_spin_lockops : &mutex_adaptive_lockops),
272 __FUNCTION__, msg);
273 /* NOTREACHED */
274 }
275
276 /*
277 * mutex_init:
278 *
279 * Initialize a mutex for use. Note that adaptive mutexes are in
280 * essence spin mutexes that can sleep to avoid deadlock and wasting
281 * CPU time. We can't easily provide a type of mutex that always
282 * sleeps - see comments in mutex_vector_enter() about releasing
283 * mutexes unlocked.
284 */
285 void
286 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
287 {
288 u_int id;
289
290 memset(mtx, 0, sizeof(*mtx));
291
292 if (type == MUTEX_DRIVER)
293 type = (ipl == IPL_NONE ? MUTEX_ADAPTIVE : MUTEX_SPIN);
294
295 switch (type) {
296 case MUTEX_ADAPTIVE:
297 case MUTEX_DEFAULT:
298 KASSERT(ipl == IPL_NONE);
299 id = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops);
300 MUTEX_INITIALIZE_ADAPTIVE(mtx, id);
301 break;
302 case MUTEX_SPIN:
303 id = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops);
304 MUTEX_INITIALIZE_SPIN(mtx, id, ipl);
305 break;
306 default:
307 panic("mutex_init: impossible type");
308 break;
309 }
310 }
311
312 /*
313 * mutex_destroy:
314 *
315 * Tear down a mutex.
316 */
317 void
318 mutex_destroy(kmutex_t *mtx)
319 {
320
321 if (MUTEX_ADAPTIVE_P(mtx)) {
322 MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) &&
323 !MUTEX_HAS_WAITERS(mtx));
324 } else {
325 MUTEX_ASSERT(mtx, mtx->mtx_lock != __SIMPLELOCK_LOCKED);
326 }
327
328 LOCKDEBUG_FREE(mtx, MUTEX_GETID(mtx));
329 MUTEX_DESTROY(mtx);
330 }
331
332 /*
333 * mutex_onproc:
334 *
335 * Return true if an adaptive mutex owner is running on a CPU in the
336 * system. If the target is waiting on the kernel big lock, then we
337 * return false immediately. This is necessary to avoid deadlock
338 * against the big lock.
339 *
340 * Note that we can't use the mutex owner field as an LWP pointer. We
341 * don't have full control over the timing of our execution, and so the
342 * pointer could be completely invalid by the time we dereference it.
343 */
344 #ifdef MULTIPROCESSOR
345 int
346 mutex_onproc(uintptr_t owner, struct cpu_info **cip)
347 {
348 CPU_INFO_ITERATOR cii;
349 struct cpu_info *ci;
350 struct lwp *l;
351
352 if (!MUTEX_OWNED(owner))
353 return 0;
354 l = (struct lwp *)MUTEX_OWNER(owner);
355
356 if ((ci = *cip) != NULL && ci->ci_curlwp == l) {
357 mb_read(); /* XXXSMP Necessary? */
358 return ci->ci_biglock_wanted != l;
359 }
360
361 for (CPU_INFO_FOREACH(cii, ci)) {
362 if (ci->ci_curlwp == l) {
363 *cip = ci;
364 mb_read(); /* XXXSMP Necessary? */
365 return ci->ci_biglock_wanted != l;
366 }
367 }
368
369 *cip = NULL;
370 return 0;
371 }
372 #endif
373
374 /*
375 * mutex_vector_enter:
376 *
377 * Support routine for mutex_enter() that must handles all cases. In
378 * the LOCKDEBUG case, mutex_enter() is always aliased here, even if
379 * fast-path stubs are available. If an mutex_spin_enter() stub is
380 * not available, then it is also aliased directly here.
381 */
382 void
383 mutex_vector_enter(kmutex_t *mtx)
384 {
385 uintptr_t owner, curthread;
386 turnstile_t *ts;
387 #ifdef MULTIPROCESSOR
388 struct cpu_info *ci = NULL;
389 u_int count;
390 #endif
391 LOCKSTAT_COUNTER(spincnt);
392 LOCKSTAT_COUNTER(slpcnt);
393 LOCKSTAT_TIMER(spintime);
394 LOCKSTAT_TIMER(slptime);
395 LOCKSTAT_FLAG(lsflag);
396
397 /*
398 * Handle spin mutexes.
399 */
400 if (MUTEX_SPIN_P(mtx)) {
401 #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR)
402 u_int spins = 0;
403 #endif
404 MUTEX_SPIN_SPLRAISE(mtx);
405 MUTEX_WANTLOCK(mtx);
406 #ifdef FULL
407 if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
408 MUTEX_LOCKED(mtx);
409 return;
410 }
411 #if !defined(MULTIPROCESSOR)
412 MUTEX_ABORT(mtx, "locking against myself");
413 #else /* !MULTIPROCESSOR */
414
415 LOCKSTAT_ENTER(lsflag);
416 LOCKSTAT_START_TIMER(lsflag, spintime);
417 count = SPINLOCK_BACKOFF_MIN;
418
419 /*
420 * Spin testing the lock word and do exponential backoff
421 * to reduce cache line ping-ponging between CPUs.
422 */
423 do {
424 if (panicstr != NULL)
425 break;
426 while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) {
427 SPINLOCK_BACKOFF(count);
428 #ifdef LOCKDEBUG
429 if (SPINLOCK_SPINOUT(spins))
430 MUTEX_ABORT(mtx, "spinout");
431 #endif /* LOCKDEBUG */
432 }
433 } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
434
435 if (count != SPINLOCK_BACKOFF_MIN) {
436 LOCKSTAT_STOP_TIMER(lsflag, spintime);
437 LOCKSTAT_EVENT(lsflag, mtx,
438 LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
439 }
440 LOCKSTAT_EXIT(lsflag);
441 #endif /* !MULTIPROCESSOR */
442 #endif /* FULL */
443 MUTEX_LOCKED(mtx);
444 return;
445 }
446
447 curthread = (uintptr_t)curlwp;
448
449 MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
450 MUTEX_ASSERT(mtx, curthread != 0);
451 MUTEX_WANTLOCK(mtx);
452
453 #ifdef LOCKDEBUG
454 if (panicstr == NULL) {
455 simple_lock_only_held(NULL, "mutex_enter");
456 #ifdef MULTIPROCESSOR
457 LOCKDEBUG_BARRIER(&kernel_lock, 1);
458 #else
459 LOCKDEBUG_BARRIER(NULL, 1);
460 #endif
461 }
462 #endif
463
464 LOCKSTAT_ENTER(lsflag);
465
466 /*
467 * Adaptive mutex; spin trying to acquire the mutex. If we
468 * determine that the owner is not running on a processor,
469 * then we stop spinning, and sleep instead.
470 */
471 for (;;) {
472 owner = mtx->mtx_owner;
473 if (!MUTEX_OWNED(owner)) {
474 /*
475 * Mutex owner clear could mean two things:
476 *
477 * * The mutex has been released.
478 * * The owner field hasn't been set yet.
479 *
480 * Try to acquire it again. If that fails,
481 * we'll just loop again.
482 */
483 if (MUTEX_ACQUIRE(mtx, curthread))
484 break;
485 continue;
486 }
487
488 if (panicstr != NULL)
489 return;
490 if (MUTEX_OWNER(owner) == curthread)
491 MUTEX_ABORT(mtx, "locking against myself");
492
493 #ifdef MULTIPROCESSOR
494 /*
495 * Check to see if the owner is running on a processor.
496 * If so, then we should just spin, as the owner will
497 * likely release the lock very soon.
498 */
499 if (mutex_onproc(owner, &ci)) {
500 LOCKSTAT_START_TIMER(lsflag, spintime);
501 count = SPINLOCK_BACKOFF_MIN;
502 for (;;) {
503 owner = mtx->mtx_owner;
504 if (!mutex_onproc(owner, &ci))
505 break;
506 SPINLOCK_BACKOFF(count);
507 }
508 LOCKSTAT_STOP_TIMER(lsflag, spintime);
509 LOCKSTAT_COUNT(spincnt, 1);
510 if (!MUTEX_OWNED(owner))
511 continue;
512 }
513 #endif
514
515 ts = turnstile_lookup(mtx);
516
517 /*
518 * Once we have the turnstile chain interlock, mark the
519 * mutex has having waiters. If that fails, spin again:
520 * chances are that the mutex has been released.
521 */
522 if (!MUTEX_SET_WAITERS(mtx, owner)) {
523 turnstile_exit(mtx);
524 continue;
525 }
526
527 #ifdef MULTIPROCESSOR
528 /*
529 * mutex_exit() is permitted to release the mutex without
530 * any interlocking instructions, and the following can
531 * occur as a result:
532 *
533 * CPU 1: MUTEX_SET_WAITERS() CPU2: mutex_exit()
534 * ---------------------------- ----------------------------
535 * .. acquire cache line
536 * .. test for waiters
537 * acquire cache line <- lose cache line
538 * lock cache line ..
539 * verify mutex is held ..
540 * set waiters ..
541 * unlock cache line ..
542 * lose cache line -> acquire cache line
543 * .. clear lock word, waiters
544 * return success
545 *
546 * There is a another race that can occur: a third CPU could
547 * acquire the mutex as soon as it is released. Since
548 * adaptive mutexes are primarily spin mutexes, this is not
549 * something that we need to worry about too much. What we
550 * do need to ensure is that the waiters bit gets set.
551 *
552 * To allow the unlocked release, we need to make some
553 * assumptions here:
554 *
555 * o Release is the only non-atomic/unlocked operation
556 * that can be performed on the mutex. (It must still
557 * be atomic on the local CPU, e.g. in case interrupted
558 * or preempted).
559 *
560 * o At any given time, MUTEX_SET_WAITERS() can only ever
561 * be in progress on one CPU in the system - guarenteed
562 * by the turnstile chain lock.
563 *
564 * o No other operations other than MUTEX_SET_WAITERS()
565 * and release can modify a mutex with a non-zero
566 * owner field.
567 *
568 * o The result of a successful MUTEX_SET_WAITERS() call
569 * is an unbuffered write that is immediately visible
570 * to all other processors in the system.
571 *
572 * o If the holding LWP switches away, it posts a store
573 * fence before changing curlwp, ensuring that any
574 * overwrite of the mutex waiters flag by mutex_exit()
575 * completes before the modification of curlwp becomes
576 * visible to this CPU.
577 *
578 * o cpu_switch() posts a store fence before setting curlwp
579 * and before resuming execution of an LWP.
580 *
581 * o _kernel_lock() posts a store fence before setting
582 * curcpu()->ci_biglock_wanted, and after clearing it.
583 * This ensures that any overwrite of the mutex waiters
584 * flag by mutex_exit() completes before the modification
585 * of ci_biglock_wanted becomes visible.
586 *
587 * We now post a read memory barrier (after setting the
588 * waiters field) and check the lock holder's status again.
589 * Some of the possible outcomes (not an exhaustive list):
590 *
591 * 1. The onproc check returns true: the holding LWP is
592 * running again. The lock may be released soon and
593 * we should spin. Importantly, we can't trust the
594 * value of the waiters flag.
595 *
596 * 2. The onproc check returns false: the holding LWP is
597 * not running. We now have the oppertunity to check
598 * if mutex_exit() has blatted the modifications made
599 * by MUTEX_SET_WAITERS().
600 *
601 * 3. The onproc check returns false: the holding LWP may
602 * or may not be running. It has context switched at
603 * some point during our check. Again, we have the
604 * chance to see if the waiters bit is still set or
605 * has been overwritten.
606 *
607 * 4. The onproc check returns false: the holding LWP is
608 * running on a CPU, but wants the big lock. It's OK
609 * to check the waiters field in this case.
610 *
611 * 5. The has-waiters check fails: the mutex has been
612 * released, the waiters flag cleared and another LWP
613 * now owns the mutex.
614 *
615 * 6. The has-waiters check fails: the mutex has been
616 * released.
617 *
618 * If the waiters bit is not set it's unsafe to go asleep,
619 * as we might never be awoken.
620 */
621 mb_read();
622 if (mutex_onproc(owner, &ci) || !MUTEX_HAS_WAITERS(mtx)) {
623 turnstile_exit(mtx);
624 continue;
625 }
626 #endif /* MULTIPROCESSOR */
627
628 LOCKSTAT_START_TIMER(lsflag, slptime);
629
630 turnstile_block(ts, TS_WRITER_Q, mtx);
631
632 LOCKSTAT_STOP_TIMER(lsflag, slptime);
633 LOCKSTAT_COUNT(slpcnt, 1);
634
635 turnstile_unblock();
636 }
637
638 LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1,
639 slpcnt, slptime);
640 LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN,
641 spincnt, spintime);
642 LOCKSTAT_EXIT(lsflag);
643
644 MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
645 MUTEX_LOCKED(mtx);
646 }
647
648 /*
649 * mutex_vector_exit:
650 *
651 * Support routine for mutex_exit() that handles all cases.
652 */
653 void
654 mutex_vector_exit(kmutex_t *mtx)
655 {
656 turnstile_t *ts;
657 uintptr_t curthread;
658
659 if (MUTEX_SPIN_P(mtx)) {
660 #ifdef FULL
661 if (mtx->mtx_lock != __SIMPLELOCK_LOCKED)
662 MUTEX_ABORT(mtx, "exiting unheld spin mutex");
663 MUTEX_UNLOCKED(mtx);
664 __cpu_simple_unlock(&mtx->mtx_lock);
665 #endif
666 MUTEX_SPIN_SPLRESTORE(mtx);
667 return;
668 }
669
670 if (__predict_false(panicstr != NULL) || __predict_false(cold)) {
671 MUTEX_UNLOCKED(mtx);
672 MUTEX_RELEASE(mtx);
673 return;
674 }
675
676 curthread = (uintptr_t)curlwp;
677 MUTEX_DASSERT(mtx, curthread != 0);
678 MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
679 MUTEX_UNLOCKED(mtx);
680
681 /*
682 * Get this lock's turnstile. This gets the interlock on
683 * the sleep queue. Once we have that, we can clear the
684 * lock. If there was no turnstile for the lock, there
685 * were no waiters remaining.
686 */
687 ts = turnstile_lookup(mtx);
688
689 if (ts == NULL) {
690 MUTEX_RELEASE(mtx);
691 turnstile_exit(mtx);
692 } else {
693 MUTEX_RELEASE(mtx);
694 turnstile_wakeup(ts, TS_WRITER_Q,
695 TS_WAITERS(ts, TS_WRITER_Q), NULL);
696 }
697 }
698
699 /*
700 * mutex_owned:
701 *
702 * Return true if the current thread holds the mutex.
703 */
704 int
705 mutex_owned(kmutex_t *mtx)
706 {
707
708 if (MUTEX_ADAPTIVE_P(mtx))
709 return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp;
710 #ifdef FULL
711 return mtx->mtx_lock == __SIMPLELOCK_LOCKED;
712 #else
713 return 1;
714 #endif
715 }
716
717 /*
718 * mutex_owner:
719 *
720 * Return the current owner of an adaptive mutex.
721 */
722 struct lwp *
723 mutex_owner(kmutex_t *mtx)
724 {
725
726 MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
727 return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner);
728 }
729
730 /*
731 * mutex_tryenter:
732 *
733 * Try to acquire the mutex; return non-zero if we did.
734 */
735 int
736 mutex_tryenter(kmutex_t *mtx)
737 {
738 uintptr_t curthread;
739
740 MUTEX_WANTLOCK(mtx);
741
742 /*
743 * Handle spin mutexes.
744 */
745 if (MUTEX_SPIN_P(mtx)) {
746 MUTEX_SPIN_SPLRAISE(mtx);
747 #ifdef FULL
748 if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
749 MUTEX_LOCKED(mtx);
750 return 1;
751 }
752 MUTEX_SPIN_SPLRESTORE(mtx);
753 #else
754 MUTEX_LOCKED(mtx);
755 return 1;
756 #endif
757 } else {
758 curthread = (uintptr_t)curlwp;
759 MUTEX_ASSERT(mtx, curthread != 0);
760 if (MUTEX_ACQUIRE(mtx, curthread)) {
761 MUTEX_LOCKED(mtx);
762 MUTEX_DASSERT(mtx,
763 MUTEX_OWNER(mtx->mtx_owner) == curthread);
764 return 1;
765 }
766 }
767
768 return 0;
769 }
770
771 #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL)
772 /*
773 * mutex_spin_retry:
774 *
775 * Support routine for mutex_spin_enter(). Assumes that the caller
776 * has already raised the SPL, and adjusted counters.
777 */
778 void
779 mutex_spin_retry(kmutex_t *mtx)
780 {
781 #ifdef MULTIPROCESSOR
782 u_int count;
783 LOCKSTAT_TIMER(spintime);
784 LOCKSTAT_FLAG(lsflag);
785 #ifdef LOCKDEBUG
786 u_int spins = 0;
787 #endif /* LOCKDEBUG */
788
789 MUTEX_WANTLOCK(mtx);
790
791 LOCKSTAT_ENTER(lsflag);
792 LOCKSTAT_START_TIMER(lsflag, spintime);
793 count = SPINLOCK_BACKOFF_MIN;
794
795 /*
796 * Spin testing the lock word and do exponential backoff
797 * to reduce cache line ping-ponging between CPUs.
798 */
799 do {
800 if (panicstr != NULL)
801 break;
802 while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) {
803 SPINLOCK_BACKOFF(count);
804 #ifdef LOCKDEBUG
805 if (SPINLOCK_SPINOUT(spins))
806 MUTEX_ABORT(mtx, "spinout");
807 #endif /* LOCKDEBUG */
808 }
809 } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
810
811 LOCKSTAT_STOP_TIMER(lsflag, spintime);
812 LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
813 LOCKSTAT_EXIT(lsflag);
814
815 MUTEX_LOCKED(mtx);
816 #else /* MULTIPROCESSOR */
817 MUTEX_ABORT(mtx, "locking against myself");
818 #endif /* MULTIPROCESSOR */
819 }
820 #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */
821
822 /*
823 * sched_lock_idle:
824 *
825 * XXX Ugly hack for cpu_switch().
826 */
827 void
828 sched_lock_idle(void)
829 {
830 #ifdef FULL
831 kmutex_t *mtx = &sched_mutex;
832
833 curcpu()->ci_mtx_count--;
834
835 if (!__cpu_simple_lock_try(&mtx->mtx_lock)) {
836 mutex_spin_retry(mtx);
837 return;
838 }
839
840 MUTEX_LOCKED(mtx);
841 #else
842 curcpu()->ci_mtx_count--;
843 #endif /* FULL */
844 }
845
846 /*
847 * sched_unlock_idle:
848 *
849 * XXX Ugly hack for cpu_switch().
850 */
851 void
852 sched_unlock_idle(void)
853 {
854 #ifdef FULL
855 kmutex_t *mtx = &sched_mutex;
856
857 if (mtx->mtx_lock != __SIMPLELOCK_LOCKED)
858 MUTEX_ABORT(mtx, "sched_unlock_idle");
859
860 MUTEX_UNLOCKED(mtx);
861 __cpu_simple_unlock(&mtx->mtx_lock);
862 #endif /* FULL */
863 curcpu()->ci_mtx_count++;
864 }
865