kern_mutex.c revision 1.65 1 1.65 pgoyette /* $NetBSD: kern_mutex.c,v 1.65 2017/05/01 21:35:25 pgoyette Exp $ */
2 1.2 ad
3 1.2 ad /*-
4 1.30 ad * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 1.2 ad * All rights reserved.
6 1.2 ad *
7 1.2 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 ad * by Jason R. Thorpe and Andrew Doran.
9 1.2 ad *
10 1.2 ad * Redistribution and use in source and binary forms, with or without
11 1.2 ad * modification, are permitted provided that the following conditions
12 1.2 ad * are met:
13 1.2 ad * 1. Redistributions of source code must retain the above copyright
14 1.2 ad * notice, this list of conditions and the following disclaimer.
15 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 ad * notice, this list of conditions and the following disclaimer in the
17 1.2 ad * documentation and/or other materials provided with the distribution.
18 1.2 ad *
19 1.2 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 ad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 ad */
31 1.2 ad
32 1.2 ad /*
33 1.2 ad * Kernel mutex implementation, modeled after those found in Solaris,
34 1.2 ad * a description of which can be found in:
35 1.2 ad *
36 1.2 ad * Solaris Internals: Core Kernel Architecture, Jim Mauro and
37 1.2 ad * Richard McDougall.
38 1.2 ad */
39 1.2 ad
40 1.2 ad #define __MUTEX_PRIVATE
41 1.2 ad
42 1.2 ad #include <sys/cdefs.h>
43 1.65 pgoyette __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.65 2017/05/01 21:35:25 pgoyette Exp $");
44 1.2 ad
45 1.2 ad #include <sys/param.h>
46 1.46 pooka #include <sys/atomic.h>
47 1.2 ad #include <sys/proc.h>
48 1.2 ad #include <sys/mutex.h>
49 1.2 ad #include <sys/sched.h>
50 1.2 ad #include <sys/sleepq.h>
51 1.2 ad #include <sys/systm.h>
52 1.2 ad #include <sys/lockdebug.h>
53 1.2 ad #include <sys/kernel.h>
54 1.24 ad #include <sys/intr.h>
55 1.29 xtraeme #include <sys/lock.h>
56 1.50 rmind #include <sys/types.h>
57 1.2 ad
58 1.2 ad #include <dev/lockstat.h>
59 1.2 ad
60 1.28 ad #include <machine/lock.h>
61 1.28 ad
62 1.2 ad /*
63 1.2 ad * When not running a debug kernel, spin mutexes are not much
64 1.2 ad * more than an splraiseipl() and splx() pair.
65 1.2 ad */
66 1.2 ad
67 1.2 ad #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
68 1.2 ad #define FULL
69 1.2 ad #endif
70 1.2 ad
71 1.2 ad /*
72 1.2 ad * Debugging support.
73 1.2 ad */
74 1.2 ad
75 1.2 ad #define MUTEX_WANTLOCK(mtx) \
76 1.23 yamt LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \
77 1.54 mlelstv (uintptr_t)__builtin_return_address(0), 0)
78 1.65 pgoyette #define MUTEX_TESTLOCK(mtx) \
79 1.65 pgoyette LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \
80 1.65 pgoyette (uintptr_t)__builtin_return_address(0), -1)
81 1.2 ad #define MUTEX_LOCKED(mtx) \
82 1.42 ad LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), NULL, \
83 1.2 ad (uintptr_t)__builtin_return_address(0), 0)
84 1.2 ad #define MUTEX_UNLOCKED(mtx) \
85 1.23 yamt LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), \
86 1.2 ad (uintptr_t)__builtin_return_address(0), 0)
87 1.2 ad #define MUTEX_ABORT(mtx, msg) \
88 1.64 christos mutex_abort(__func__, __LINE__, mtx, msg)
89 1.2 ad
90 1.2 ad #if defined(LOCKDEBUG)
91 1.2 ad
92 1.2 ad #define MUTEX_DASSERT(mtx, cond) \
93 1.2 ad do { \
94 1.2 ad if (!(cond)) \
95 1.2 ad MUTEX_ABORT(mtx, "assertion failed: " #cond); \
96 1.2 ad } while (/* CONSTCOND */ 0);
97 1.2 ad
98 1.2 ad #else /* LOCKDEBUG */
99 1.2 ad
100 1.2 ad #define MUTEX_DASSERT(mtx, cond) /* nothing */
101 1.2 ad
102 1.2 ad #endif /* LOCKDEBUG */
103 1.2 ad
104 1.2 ad #if defined(DIAGNOSTIC)
105 1.2 ad
106 1.2 ad #define MUTEX_ASSERT(mtx, cond) \
107 1.2 ad do { \
108 1.2 ad if (!(cond)) \
109 1.2 ad MUTEX_ABORT(mtx, "assertion failed: " #cond); \
110 1.2 ad } while (/* CONSTCOND */ 0)
111 1.2 ad
112 1.2 ad #else /* DIAGNOSTIC */
113 1.2 ad
114 1.2 ad #define MUTEX_ASSERT(mtx, cond) /* nothing */
115 1.2 ad
116 1.2 ad #endif /* DIAGNOSTIC */
117 1.2 ad
118 1.2 ad /*
119 1.60 matt * Some architectures can't use __cpu_simple_lock as is so allow a way
120 1.60 matt * for them to use an alternate definition.
121 1.60 matt */
122 1.60 matt #ifndef MUTEX_SPINBIT_LOCK_INIT
123 1.60 matt #define MUTEX_SPINBIT_LOCK_INIT(mtx) __cpu_simple_lock_init(&(mtx)->mtx_lock)
124 1.60 matt #endif
125 1.60 matt #ifndef MUTEX_SPINBIT_LOCKED_P
126 1.60 matt #define MUTEX_SPINBIT_LOCKED_P(mtx) __SIMPLELOCK_LOCKED_P(&(mtx)->mtx_lock)
127 1.60 matt #endif
128 1.60 matt #ifndef MUTEX_SPINBIT_LOCK_TRY
129 1.60 matt #define MUTEX_SPINBIT_LOCK_TRY(mtx) __cpu_simple_lock_try(&(mtx)->mtx_lock)
130 1.60 matt #endif
131 1.60 matt #ifndef MUTEX_SPINBIT_LOCK_UNLOCK
132 1.60 matt #define MUTEX_SPINBIT_LOCK_UNLOCK(mtx) __cpu_simple_unlock(&(mtx)->mtx_lock)
133 1.60 matt #endif
134 1.60 matt
135 1.60 matt #ifndef MUTEX_INITIALIZE_SPIN_IPL
136 1.60 matt #define MUTEX_INITIALIZE_SPIN_IPL(mtx, ipl) \
137 1.60 matt ((mtx)->mtx_ipl = makeiplcookie((ipl)))
138 1.60 matt #endif
139 1.60 matt
140 1.60 matt /*
141 1.2 ad * Spin mutex SPL save / restore.
142 1.2 ad */
143 1.2 ad
144 1.2 ad #define MUTEX_SPIN_SPLRAISE(mtx) \
145 1.2 ad do { \
146 1.36 ad struct cpu_info *x__ci; \
147 1.2 ad int x__cnt, s; \
148 1.60 matt s = splraiseipl(MUTEX_SPIN_IPL(mtx)); \
149 1.36 ad x__ci = curcpu(); \
150 1.2 ad x__cnt = x__ci->ci_mtx_count--; \
151 1.37 ad __insn_barrier(); \
152 1.51 rmind if (x__cnt == 0) \
153 1.2 ad x__ci->ci_mtx_oldspl = (s); \
154 1.2 ad } while (/* CONSTCOND */ 0)
155 1.2 ad
156 1.2 ad #define MUTEX_SPIN_SPLRESTORE(mtx) \
157 1.2 ad do { \
158 1.2 ad struct cpu_info *x__ci = curcpu(); \
159 1.2 ad int s = x__ci->ci_mtx_oldspl; \
160 1.2 ad __insn_barrier(); \
161 1.51 rmind if (++(x__ci->ci_mtx_count) == 0) \
162 1.2 ad splx(s); \
163 1.2 ad } while (/* CONSTCOND */ 0)
164 1.2 ad
165 1.2 ad /*
166 1.2 ad * For architectures that provide 'simple' mutexes: they provide a
167 1.2 ad * CAS function that is either MP-safe, or does not need to be MP
168 1.2 ad * safe. Adaptive mutexes on these architectures do not require an
169 1.2 ad * additional interlock.
170 1.2 ad */
171 1.2 ad
172 1.2 ad #ifdef __HAVE_SIMPLE_MUTEXES
173 1.2 ad
174 1.2 ad #define MUTEX_OWNER(owner) \
175 1.2 ad (owner & MUTEX_THREAD)
176 1.2 ad #define MUTEX_HAS_WAITERS(mtx) \
177 1.2 ad (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0)
178 1.2 ad
179 1.23 yamt #define MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug) \
180 1.49 skrll if (!dodebug) \
181 1.49 skrll (mtx)->mtx_owner |= MUTEX_BIT_NODEBUG; \
182 1.2 ad do { \
183 1.2 ad } while (/* CONSTCOND */ 0);
184 1.2 ad
185 1.23 yamt #define MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl) \
186 1.2 ad do { \
187 1.2 ad (mtx)->mtx_owner = MUTEX_BIT_SPIN; \
188 1.49 skrll if (!dodebug) \
189 1.49 skrll (mtx)->mtx_owner |= MUTEX_BIT_NODEBUG; \
190 1.60 matt MUTEX_INITIALIZE_SPIN_IPL((mtx), (ipl)); \
191 1.60 matt MUTEX_SPINBIT_LOCK_INIT((mtx)); \
192 1.2 ad } while (/* CONSTCOND */ 0)
193 1.2 ad
194 1.2 ad #define MUTEX_DESTROY(mtx) \
195 1.2 ad do { \
196 1.2 ad (mtx)->mtx_owner = MUTEX_THREAD; \
197 1.2 ad } while (/* CONSTCOND */ 0);
198 1.2 ad
199 1.2 ad #define MUTEX_SPIN_P(mtx) \
200 1.2 ad (((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0)
201 1.2 ad #define MUTEX_ADAPTIVE_P(mtx) \
202 1.2 ad (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0)
203 1.2 ad
204 1.49 skrll #define MUTEX_DEBUG_P(mtx) (((mtx)->mtx_owner & MUTEX_BIT_NODEBUG) == 0)
205 1.23 yamt #if defined(LOCKDEBUG)
206 1.49 skrll #define MUTEX_OWNED(owner) (((owner) & ~MUTEX_BIT_NODEBUG) != 0)
207 1.59 matt #define MUTEX_INHERITDEBUG(n, o) (n) |= (o) & MUTEX_BIT_NODEBUG
208 1.23 yamt #else /* defined(LOCKDEBUG) */
209 1.23 yamt #define MUTEX_OWNED(owner) ((owner) != 0)
210 1.59 matt #define MUTEX_INHERITDEBUG(n, o) /* nothing */
211 1.23 yamt #endif /* defined(LOCKDEBUG) */
212 1.2 ad
213 1.2 ad static inline int
214 1.2 ad MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
215 1.2 ad {
216 1.2 ad int rv;
217 1.59 matt uintptr_t oldown = 0;
218 1.59 matt uintptr_t newown = curthread;
219 1.23 yamt
220 1.59 matt MUTEX_INHERITDEBUG(oldown, mtx->mtx_owner);
221 1.59 matt MUTEX_INHERITDEBUG(newown, oldown);
222 1.59 matt rv = MUTEX_CAS(&mtx->mtx_owner, oldown, newown);
223 1.7 itohy MUTEX_RECEIVE(mtx);
224 1.2 ad return rv;
225 1.2 ad }
226 1.2 ad
227 1.2 ad static inline int
228 1.2 ad MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
229 1.2 ad {
230 1.2 ad int rv;
231 1.2 ad rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS);
232 1.7 itohy MUTEX_RECEIVE(mtx);
233 1.2 ad return rv;
234 1.2 ad }
235 1.2 ad
236 1.2 ad static inline void
237 1.2 ad MUTEX_RELEASE(kmutex_t *mtx)
238 1.2 ad {
239 1.59 matt uintptr_t newown;
240 1.23 yamt
241 1.7 itohy MUTEX_GIVE(mtx);
242 1.59 matt newown = 0;
243 1.59 matt MUTEX_INHERITDEBUG(newown, mtx->mtx_owner);
244 1.59 matt mtx->mtx_owner = newown;
245 1.2 ad }
246 1.2 ad #endif /* __HAVE_SIMPLE_MUTEXES */
247 1.2 ad
248 1.2 ad /*
249 1.2 ad * Patch in stubs via strong alias where they are not available.
250 1.2 ad */
251 1.2 ad
252 1.2 ad #if defined(LOCKDEBUG)
253 1.2 ad #undef __HAVE_MUTEX_STUBS
254 1.2 ad #undef __HAVE_SPIN_MUTEX_STUBS
255 1.2 ad #endif
256 1.2 ad
257 1.2 ad #ifndef __HAVE_MUTEX_STUBS
258 1.8 itohy __strong_alias(mutex_enter,mutex_vector_enter);
259 1.8 itohy __strong_alias(mutex_exit,mutex_vector_exit);
260 1.2 ad #endif
261 1.2 ad
262 1.2 ad #ifndef __HAVE_SPIN_MUTEX_STUBS
263 1.8 itohy __strong_alias(mutex_spin_enter,mutex_vector_enter);
264 1.8 itohy __strong_alias(mutex_spin_exit,mutex_vector_exit);
265 1.2 ad #endif
266 1.2 ad
267 1.64 christos static void mutex_abort(const char *, size_t, kmutex_t *, const char *);
268 1.64 christos static void mutex_dump(volatile void *);
269 1.2 ad
270 1.2 ad lockops_t mutex_spin_lockops = {
271 1.2 ad "Mutex",
272 1.42 ad LOCKOPS_SPIN,
273 1.2 ad mutex_dump
274 1.2 ad };
275 1.2 ad
276 1.2 ad lockops_t mutex_adaptive_lockops = {
277 1.2 ad "Mutex",
278 1.42 ad LOCKOPS_SLEEP,
279 1.2 ad mutex_dump
280 1.2 ad };
281 1.2 ad
282 1.5 yamt syncobj_t mutex_syncobj = {
283 1.5 yamt SOBJ_SLEEPQ_SORTED,
284 1.5 yamt turnstile_unsleep,
285 1.5 yamt turnstile_changepri,
286 1.5 yamt sleepq_lendpri,
287 1.27 ad (void *)mutex_owner,
288 1.5 yamt };
289 1.5 yamt
290 1.2 ad /*
291 1.2 ad * mutex_dump:
292 1.2 ad *
293 1.2 ad * Dump the contents of a mutex structure.
294 1.2 ad */
295 1.2 ad void
296 1.2 ad mutex_dump(volatile void *cookie)
297 1.2 ad {
298 1.2 ad volatile kmutex_t *mtx = cookie;
299 1.2 ad
300 1.2 ad printf_nolog("owner field : %#018lx wait/spin: %16d/%d\n",
301 1.2 ad (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx),
302 1.2 ad MUTEX_SPIN_P(mtx));
303 1.2 ad }
304 1.2 ad
305 1.2 ad /*
306 1.2 ad * mutex_abort:
307 1.2 ad *
308 1.3 ad * Dump information about an error and panic the system. This
309 1.3 ad * generates a lot of machine code in the DIAGNOSTIC case, so
310 1.3 ad * we ask the compiler to not inline it.
311 1.2 ad */
312 1.43 ad void __noinline
313 1.64 christos mutex_abort(const char *func, size_t line, kmutex_t *mtx, const char *msg)
314 1.2 ad {
315 1.2 ad
316 1.64 christos LOCKDEBUG_ABORT(func, line, mtx, (MUTEX_SPIN_P(mtx) ?
317 1.64 christos &mutex_spin_lockops : &mutex_adaptive_lockops), msg);
318 1.2 ad }
319 1.2 ad
320 1.2 ad /*
321 1.2 ad * mutex_init:
322 1.2 ad *
323 1.2 ad * Initialize a mutex for use. Note that adaptive mutexes are in
324 1.2 ad * essence spin mutexes that can sleep to avoid deadlock and wasting
325 1.2 ad * CPU time. We can't easily provide a type of mutex that always
326 1.2 ad * sleeps - see comments in mutex_vector_enter() about releasing
327 1.2 ad * mutexes unlocked.
328 1.2 ad */
329 1.2 ad void
330 1.2 ad mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
331 1.2 ad {
332 1.23 yamt bool dodebug;
333 1.2 ad
334 1.2 ad memset(mtx, 0, sizeof(*mtx));
335 1.2 ad
336 1.15 ad switch (type) {
337 1.15 ad case MUTEX_ADAPTIVE:
338 1.15 ad KASSERT(ipl == IPL_NONE);
339 1.15 ad break;
340 1.22 ad case MUTEX_DEFAULT:
341 1.15 ad case MUTEX_DRIVER:
342 1.26 ad if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
343 1.26 ad ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
344 1.26 ad ipl == IPL_SOFTSERIAL) {
345 1.22 ad type = MUTEX_ADAPTIVE;
346 1.26 ad } else {
347 1.22 ad type = MUTEX_SPIN;
348 1.22 ad }
349 1.15 ad break;
350 1.15 ad default:
351 1.15 ad break;
352 1.15 ad }
353 1.2 ad
354 1.2 ad switch (type) {
355 1.11 ad case MUTEX_NODEBUG:
356 1.23 yamt dodebug = LOCKDEBUG_ALLOC(mtx, NULL,
357 1.19 ad (uintptr_t)__builtin_return_address(0));
358 1.23 yamt MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
359 1.11 ad break;
360 1.2 ad case MUTEX_ADAPTIVE:
361 1.23 yamt dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops,
362 1.19 ad (uintptr_t)__builtin_return_address(0));
363 1.23 yamt MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug);
364 1.2 ad break;
365 1.2 ad case MUTEX_SPIN:
366 1.23 yamt dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops,
367 1.19 ad (uintptr_t)__builtin_return_address(0));
368 1.23 yamt MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
369 1.2 ad break;
370 1.2 ad default:
371 1.2 ad panic("mutex_init: impossible type");
372 1.2 ad break;
373 1.2 ad }
374 1.2 ad }
375 1.2 ad
376 1.2 ad /*
377 1.2 ad * mutex_destroy:
378 1.2 ad *
379 1.2 ad * Tear down a mutex.
380 1.2 ad */
381 1.2 ad void
382 1.2 ad mutex_destroy(kmutex_t *mtx)
383 1.2 ad {
384 1.2 ad
385 1.2 ad if (MUTEX_ADAPTIVE_P(mtx)) {
386 1.2 ad MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) &&
387 1.2 ad !MUTEX_HAS_WAITERS(mtx));
388 1.2 ad } else {
389 1.60 matt MUTEX_ASSERT(mtx, !MUTEX_SPINBIT_LOCKED_P(mtx));
390 1.2 ad }
391 1.2 ad
392 1.23 yamt LOCKDEBUG_FREE(MUTEX_DEBUG_P(mtx), mtx);
393 1.2 ad MUTEX_DESTROY(mtx);
394 1.2 ad }
395 1.2 ad
396 1.50 rmind #ifdef MULTIPROCESSOR
397 1.2 ad /*
398 1.50 rmind * mutex_oncpu:
399 1.2 ad *
400 1.2 ad * Return true if an adaptive mutex owner is running on a CPU in the
401 1.2 ad * system. If the target is waiting on the kernel big lock, then we
402 1.15 ad * must release it. This is necessary to avoid deadlock.
403 1.2 ad */
404 1.50 rmind static bool
405 1.50 rmind mutex_oncpu(uintptr_t owner)
406 1.2 ad {
407 1.2 ad struct cpu_info *ci;
408 1.50 rmind lwp_t *l;
409 1.2 ad
410 1.50 rmind KASSERT(kpreempt_disabled());
411 1.50 rmind
412 1.50 rmind if (!MUTEX_OWNED(owner)) {
413 1.50 rmind return false;
414 1.50 rmind }
415 1.2 ad
416 1.50 rmind /*
417 1.50 rmind * See lwp_dtor() why dereference of the LWP pointer is safe.
418 1.50 rmind * We must have kernel preemption disabled for that.
419 1.50 rmind */
420 1.50 rmind l = (lwp_t *)MUTEX_OWNER(owner);
421 1.50 rmind ci = l->l_cpu;
422 1.2 ad
423 1.50 rmind if (ci && ci->ci_curlwp == l) {
424 1.50 rmind /* Target is running; do we need to block? */
425 1.50 rmind return (ci->ci_biglock_wanted != l);
426 1.50 rmind }
427 1.15 ad
428 1.50 rmind /* Not running. It may be safe to block now. */
429 1.50 rmind return false;
430 1.2 ad }
431 1.15 ad #endif /* MULTIPROCESSOR */
432 1.2 ad
433 1.2 ad /*
434 1.2 ad * mutex_vector_enter:
435 1.2 ad *
436 1.45 rmind * Support routine for mutex_enter() that must handle all cases. In
437 1.2 ad * the LOCKDEBUG case, mutex_enter() is always aliased here, even if
438 1.62 prlw1 * fast-path stubs are available. If a mutex_spin_enter() stub is
439 1.2 ad * not available, then it is also aliased directly here.
440 1.2 ad */
441 1.2 ad void
442 1.2 ad mutex_vector_enter(kmutex_t *mtx)
443 1.2 ad {
444 1.2 ad uintptr_t owner, curthread;
445 1.2 ad turnstile_t *ts;
446 1.2 ad #ifdef MULTIPROCESSOR
447 1.2 ad u_int count;
448 1.2 ad #endif
449 1.2 ad LOCKSTAT_COUNTER(spincnt);
450 1.2 ad LOCKSTAT_COUNTER(slpcnt);
451 1.2 ad LOCKSTAT_TIMER(spintime);
452 1.2 ad LOCKSTAT_TIMER(slptime);
453 1.2 ad LOCKSTAT_FLAG(lsflag);
454 1.2 ad
455 1.2 ad /*
456 1.2 ad * Handle spin mutexes.
457 1.2 ad */
458 1.2 ad if (MUTEX_SPIN_P(mtx)) {
459 1.2 ad #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR)
460 1.2 ad u_int spins = 0;
461 1.2 ad #endif
462 1.2 ad MUTEX_SPIN_SPLRAISE(mtx);
463 1.2 ad MUTEX_WANTLOCK(mtx);
464 1.2 ad #ifdef FULL
465 1.60 matt if (MUTEX_SPINBIT_LOCK_TRY(mtx)) {
466 1.2 ad MUTEX_LOCKED(mtx);
467 1.2 ad return;
468 1.2 ad }
469 1.2 ad #if !defined(MULTIPROCESSOR)
470 1.2 ad MUTEX_ABORT(mtx, "locking against myself");
471 1.2 ad #else /* !MULTIPROCESSOR */
472 1.2 ad
473 1.2 ad LOCKSTAT_ENTER(lsflag);
474 1.2 ad LOCKSTAT_START_TIMER(lsflag, spintime);
475 1.2 ad count = SPINLOCK_BACKOFF_MIN;
476 1.2 ad
477 1.2 ad /*
478 1.2 ad * Spin testing the lock word and do exponential backoff
479 1.2 ad * to reduce cache line ping-ponging between CPUs.
480 1.2 ad */
481 1.2 ad do {
482 1.2 ad if (panicstr != NULL)
483 1.2 ad break;
484 1.60 matt while (MUTEX_SPINBIT_LOCKED_P(mtx)) {
485 1.63 msaitoh SPINLOCK_BACKOFF(count);
486 1.2 ad #ifdef LOCKDEBUG
487 1.2 ad if (SPINLOCK_SPINOUT(spins))
488 1.2 ad MUTEX_ABORT(mtx, "spinout");
489 1.2 ad #endif /* LOCKDEBUG */
490 1.2 ad }
491 1.60 matt } while (!MUTEX_SPINBIT_LOCK_TRY(mtx));
492 1.2 ad
493 1.2 ad if (count != SPINLOCK_BACKOFF_MIN) {
494 1.2 ad LOCKSTAT_STOP_TIMER(lsflag, spintime);
495 1.2 ad LOCKSTAT_EVENT(lsflag, mtx,
496 1.2 ad LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
497 1.2 ad }
498 1.2 ad LOCKSTAT_EXIT(lsflag);
499 1.2 ad #endif /* !MULTIPROCESSOR */
500 1.2 ad #endif /* FULL */
501 1.2 ad MUTEX_LOCKED(mtx);
502 1.2 ad return;
503 1.2 ad }
504 1.2 ad
505 1.2 ad curthread = (uintptr_t)curlwp;
506 1.2 ad
507 1.2 ad MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
508 1.2 ad MUTEX_ASSERT(mtx, curthread != 0);
509 1.2 ad MUTEX_WANTLOCK(mtx);
510 1.2 ad
511 1.2 ad if (panicstr == NULL) {
512 1.2 ad LOCKDEBUG_BARRIER(&kernel_lock, 1);
513 1.2 ad }
514 1.2 ad
515 1.2 ad LOCKSTAT_ENTER(lsflag);
516 1.2 ad
517 1.2 ad /*
518 1.2 ad * Adaptive mutex; spin trying to acquire the mutex. If we
519 1.2 ad * determine that the owner is not running on a processor,
520 1.2 ad * then we stop spinning, and sleep instead.
521 1.2 ad */
522 1.50 rmind KPREEMPT_DISABLE(curlwp);
523 1.34 ad for (owner = mtx->mtx_owner;;) {
524 1.2 ad if (!MUTEX_OWNED(owner)) {
525 1.2 ad /*
526 1.2 ad * Mutex owner clear could mean two things:
527 1.2 ad *
528 1.2 ad * * The mutex has been released.
529 1.2 ad * * The owner field hasn't been set yet.
530 1.2 ad *
531 1.2 ad * Try to acquire it again. If that fails,
532 1.2 ad * we'll just loop again.
533 1.2 ad */
534 1.2 ad if (MUTEX_ACQUIRE(mtx, curthread))
535 1.2 ad break;
536 1.34 ad owner = mtx->mtx_owner;
537 1.2 ad continue;
538 1.2 ad }
539 1.50 rmind if (__predict_false(panicstr != NULL)) {
540 1.61 uebayasi KPREEMPT_ENABLE(curlwp);
541 1.2 ad return;
542 1.50 rmind }
543 1.50 rmind if (__predict_false(MUTEX_OWNER(owner) == curthread)) {
544 1.2 ad MUTEX_ABORT(mtx, "locking against myself");
545 1.50 rmind }
546 1.2 ad #ifdef MULTIPROCESSOR
547 1.2 ad /*
548 1.2 ad * Check to see if the owner is running on a processor.
549 1.2 ad * If so, then we should just spin, as the owner will
550 1.2 ad * likely release the lock very soon.
551 1.2 ad */
552 1.50 rmind if (mutex_oncpu(owner)) {
553 1.2 ad LOCKSTAT_START_TIMER(lsflag, spintime);
554 1.2 ad count = SPINLOCK_BACKOFF_MIN;
555 1.50 rmind do {
556 1.53 rmind KPREEMPT_ENABLE(curlwp);
557 1.34 ad SPINLOCK_BACKOFF(count);
558 1.53 rmind KPREEMPT_DISABLE(curlwp);
559 1.2 ad owner = mtx->mtx_owner;
560 1.50 rmind } while (mutex_oncpu(owner));
561 1.2 ad LOCKSTAT_STOP_TIMER(lsflag, spintime);
562 1.2 ad LOCKSTAT_COUNT(spincnt, 1);
563 1.2 ad if (!MUTEX_OWNED(owner))
564 1.2 ad continue;
565 1.2 ad }
566 1.2 ad #endif
567 1.2 ad
568 1.2 ad ts = turnstile_lookup(mtx);
569 1.2 ad
570 1.2 ad /*
571 1.2 ad * Once we have the turnstile chain interlock, mark the
572 1.2 ad * mutex has having waiters. If that fails, spin again:
573 1.2 ad * chances are that the mutex has been released.
574 1.2 ad */
575 1.2 ad if (!MUTEX_SET_WAITERS(mtx, owner)) {
576 1.2 ad turnstile_exit(mtx);
577 1.34 ad owner = mtx->mtx_owner;
578 1.2 ad continue;
579 1.2 ad }
580 1.2 ad
581 1.2 ad #ifdef MULTIPROCESSOR
582 1.2 ad /*
583 1.2 ad * mutex_exit() is permitted to release the mutex without
584 1.2 ad * any interlocking instructions, and the following can
585 1.2 ad * occur as a result:
586 1.2 ad *
587 1.2 ad * CPU 1: MUTEX_SET_WAITERS() CPU2: mutex_exit()
588 1.2 ad * ---------------------------- ----------------------------
589 1.2 ad * .. acquire cache line
590 1.2 ad * .. test for waiters
591 1.2 ad * acquire cache line <- lose cache line
592 1.2 ad * lock cache line ..
593 1.2 ad * verify mutex is held ..
594 1.2 ad * set waiters ..
595 1.2 ad * unlock cache line ..
596 1.2 ad * lose cache line -> acquire cache line
597 1.2 ad * .. clear lock word, waiters
598 1.2 ad * return success
599 1.2 ad *
600 1.50 rmind * There is another race that can occur: a third CPU could
601 1.2 ad * acquire the mutex as soon as it is released. Since
602 1.2 ad * adaptive mutexes are primarily spin mutexes, this is not
603 1.2 ad * something that we need to worry about too much. What we
604 1.2 ad * do need to ensure is that the waiters bit gets set.
605 1.2 ad *
606 1.2 ad * To allow the unlocked release, we need to make some
607 1.2 ad * assumptions here:
608 1.2 ad *
609 1.2 ad * o Release is the only non-atomic/unlocked operation
610 1.2 ad * that can be performed on the mutex. (It must still
611 1.2 ad * be atomic on the local CPU, e.g. in case interrupted
612 1.2 ad * or preempted).
613 1.2 ad *
614 1.2 ad * o At any given time, MUTEX_SET_WAITERS() can only ever
615 1.21 pooka * be in progress on one CPU in the system - guaranteed
616 1.2 ad * by the turnstile chain lock.
617 1.2 ad *
618 1.2 ad * o No other operations other than MUTEX_SET_WAITERS()
619 1.2 ad * and release can modify a mutex with a non-zero
620 1.2 ad * owner field.
621 1.2 ad *
622 1.2 ad * o The result of a successful MUTEX_SET_WAITERS() call
623 1.2 ad * is an unbuffered write that is immediately visible
624 1.2 ad * to all other processors in the system.
625 1.2 ad *
626 1.2 ad * o If the holding LWP switches away, it posts a store
627 1.2 ad * fence before changing curlwp, ensuring that any
628 1.2 ad * overwrite of the mutex waiters flag by mutex_exit()
629 1.2 ad * completes before the modification of curlwp becomes
630 1.2 ad * visible to this CPU.
631 1.2 ad *
632 1.14 yamt * o mi_switch() posts a store fence before setting curlwp
633 1.2 ad * and before resuming execution of an LWP.
634 1.2 ad *
635 1.2 ad * o _kernel_lock() posts a store fence before setting
636 1.2 ad * curcpu()->ci_biglock_wanted, and after clearing it.
637 1.2 ad * This ensures that any overwrite of the mutex waiters
638 1.2 ad * flag by mutex_exit() completes before the modification
639 1.2 ad * of ci_biglock_wanted becomes visible.
640 1.2 ad *
641 1.2 ad * We now post a read memory barrier (after setting the
642 1.2 ad * waiters field) and check the lock holder's status again.
643 1.2 ad * Some of the possible outcomes (not an exhaustive list):
644 1.2 ad *
645 1.50 rmind * 1. The on-CPU check returns true: the holding LWP is
646 1.2 ad * running again. The lock may be released soon and
647 1.2 ad * we should spin. Importantly, we can't trust the
648 1.2 ad * value of the waiters flag.
649 1.2 ad *
650 1.50 rmind * 2. The on-CPU check returns false: the holding LWP is
651 1.39 yamt * not running. We now have the opportunity to check
652 1.2 ad * if mutex_exit() has blatted the modifications made
653 1.2 ad * by MUTEX_SET_WAITERS().
654 1.2 ad *
655 1.50 rmind * 3. The on-CPU check returns false: the holding LWP may
656 1.2 ad * or may not be running. It has context switched at
657 1.2 ad * some point during our check. Again, we have the
658 1.2 ad * chance to see if the waiters bit is still set or
659 1.2 ad * has been overwritten.
660 1.2 ad *
661 1.50 rmind * 4. The on-CPU check returns false: the holding LWP is
662 1.2 ad * running on a CPU, but wants the big lock. It's OK
663 1.2 ad * to check the waiters field in this case.
664 1.2 ad *
665 1.2 ad * 5. The has-waiters check fails: the mutex has been
666 1.2 ad * released, the waiters flag cleared and another LWP
667 1.2 ad * now owns the mutex.
668 1.2 ad *
669 1.2 ad * 6. The has-waiters check fails: the mutex has been
670 1.2 ad * released.
671 1.2 ad *
672 1.2 ad * If the waiters bit is not set it's unsafe to go asleep,
673 1.2 ad * as we might never be awoken.
674 1.2 ad */
675 1.50 rmind if ((membar_consumer(), mutex_oncpu(owner)) ||
676 1.24 ad (membar_consumer(), !MUTEX_HAS_WAITERS(mtx))) {
677 1.2 ad turnstile_exit(mtx);
678 1.34 ad owner = mtx->mtx_owner;
679 1.2 ad continue;
680 1.2 ad }
681 1.2 ad #endif /* MULTIPROCESSOR */
682 1.2 ad
683 1.2 ad LOCKSTAT_START_TIMER(lsflag, slptime);
684 1.2 ad
685 1.5 yamt turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj);
686 1.2 ad
687 1.2 ad LOCKSTAT_STOP_TIMER(lsflag, slptime);
688 1.2 ad LOCKSTAT_COUNT(slpcnt, 1);
689 1.34 ad
690 1.34 ad owner = mtx->mtx_owner;
691 1.2 ad }
692 1.50 rmind KPREEMPT_ENABLE(curlwp);
693 1.2 ad
694 1.2 ad LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1,
695 1.2 ad slpcnt, slptime);
696 1.2 ad LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN,
697 1.2 ad spincnt, spintime);
698 1.2 ad LOCKSTAT_EXIT(lsflag);
699 1.2 ad
700 1.2 ad MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
701 1.2 ad MUTEX_LOCKED(mtx);
702 1.2 ad }
703 1.2 ad
704 1.2 ad /*
705 1.2 ad * mutex_vector_exit:
706 1.2 ad *
707 1.2 ad * Support routine for mutex_exit() that handles all cases.
708 1.2 ad */
709 1.2 ad void
710 1.2 ad mutex_vector_exit(kmutex_t *mtx)
711 1.2 ad {
712 1.2 ad turnstile_t *ts;
713 1.2 ad uintptr_t curthread;
714 1.2 ad
715 1.2 ad if (MUTEX_SPIN_P(mtx)) {
716 1.2 ad #ifdef FULL
717 1.60 matt if (__predict_false(!MUTEX_SPINBIT_LOCKED_P(mtx))) {
718 1.33 ad if (panicstr != NULL)
719 1.33 ad return;
720 1.2 ad MUTEX_ABORT(mtx, "exiting unheld spin mutex");
721 1.33 ad }
722 1.2 ad MUTEX_UNLOCKED(mtx);
723 1.60 matt MUTEX_SPINBIT_LOCK_UNLOCK(mtx);
724 1.2 ad #endif
725 1.2 ad MUTEX_SPIN_SPLRESTORE(mtx);
726 1.2 ad return;
727 1.2 ad }
728 1.2 ad
729 1.11 ad if (__predict_false((uintptr_t)panicstr | cold)) {
730 1.2 ad MUTEX_UNLOCKED(mtx);
731 1.2 ad MUTEX_RELEASE(mtx);
732 1.2 ad return;
733 1.2 ad }
734 1.2 ad
735 1.2 ad curthread = (uintptr_t)curlwp;
736 1.2 ad MUTEX_DASSERT(mtx, curthread != 0);
737 1.2 ad MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
738 1.2 ad MUTEX_UNLOCKED(mtx);
739 1.58 mrg #if !defined(LOCKDEBUG)
740 1.58 mrg __USE(curthread);
741 1.58 mrg #endif
742 1.2 ad
743 1.15 ad #ifdef LOCKDEBUG
744 1.15 ad /*
745 1.15 ad * Avoid having to take the turnstile chain lock every time
746 1.15 ad * around. Raise the priority level to splhigh() in order
747 1.15 ad * to disable preemption and so make the following atomic.
748 1.15 ad */
749 1.15 ad {
750 1.15 ad int s = splhigh();
751 1.15 ad if (!MUTEX_HAS_WAITERS(mtx)) {
752 1.15 ad MUTEX_RELEASE(mtx);
753 1.15 ad splx(s);
754 1.15 ad return;
755 1.15 ad }
756 1.15 ad splx(s);
757 1.15 ad }
758 1.15 ad #endif
759 1.15 ad
760 1.2 ad /*
761 1.2 ad * Get this lock's turnstile. This gets the interlock on
762 1.2 ad * the sleep queue. Once we have that, we can clear the
763 1.2 ad * lock. If there was no turnstile for the lock, there
764 1.2 ad * were no waiters remaining.
765 1.2 ad */
766 1.2 ad ts = turnstile_lookup(mtx);
767 1.2 ad
768 1.2 ad if (ts == NULL) {
769 1.2 ad MUTEX_RELEASE(mtx);
770 1.2 ad turnstile_exit(mtx);
771 1.2 ad } else {
772 1.2 ad MUTEX_RELEASE(mtx);
773 1.2 ad turnstile_wakeup(ts, TS_WRITER_Q,
774 1.2 ad TS_WAITERS(ts, TS_WRITER_Q), NULL);
775 1.2 ad }
776 1.2 ad }
777 1.2 ad
778 1.4 ad #ifndef __HAVE_SIMPLE_MUTEXES
779 1.4 ad /*
780 1.4 ad * mutex_wakeup:
781 1.4 ad *
782 1.4 ad * Support routine for mutex_exit() that wakes up all waiters.
783 1.4 ad * We assume that the mutex has been released, but it need not
784 1.4 ad * be.
785 1.4 ad */
786 1.4 ad void
787 1.4 ad mutex_wakeup(kmutex_t *mtx)
788 1.4 ad {
789 1.4 ad turnstile_t *ts;
790 1.4 ad
791 1.4 ad ts = turnstile_lookup(mtx);
792 1.4 ad if (ts == NULL) {
793 1.4 ad turnstile_exit(mtx);
794 1.4 ad return;
795 1.4 ad }
796 1.4 ad MUTEX_CLEAR_WAITERS(mtx);
797 1.4 ad turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL);
798 1.4 ad }
799 1.4 ad #endif /* !__HAVE_SIMPLE_MUTEXES */
800 1.4 ad
801 1.2 ad /*
802 1.2 ad * mutex_owned:
803 1.2 ad *
804 1.3 ad * Return true if the current LWP (adaptive) or CPU (spin)
805 1.3 ad * holds the mutex.
806 1.2 ad */
807 1.2 ad int
808 1.2 ad mutex_owned(kmutex_t *mtx)
809 1.2 ad {
810 1.2 ad
811 1.35 ad if (mtx == NULL)
812 1.35 ad return 0;
813 1.2 ad if (MUTEX_ADAPTIVE_P(mtx))
814 1.2 ad return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp;
815 1.2 ad #ifdef FULL
816 1.60 matt return MUTEX_SPINBIT_LOCKED_P(mtx);
817 1.2 ad #else
818 1.2 ad return 1;
819 1.2 ad #endif
820 1.2 ad }
821 1.2 ad
822 1.2 ad /*
823 1.2 ad * mutex_owner:
824 1.2 ad *
825 1.6 ad * Return the current owner of an adaptive mutex. Used for
826 1.6 ad * priority inheritance.
827 1.2 ad */
828 1.27 ad lwp_t *
829 1.27 ad mutex_owner(kmutex_t *mtx)
830 1.2 ad {
831 1.2 ad
832 1.2 ad MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
833 1.2 ad return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner);
834 1.2 ad }
835 1.2 ad
836 1.2 ad /*
837 1.65 pgoyette * mutex_ownable:
838 1.65 pgoyette *
839 1.65 pgoyette * When compiled with DEBUG and LOCKDEBUG defined, ensure that
840 1.65 pgoyette * the mutex is available. We cannot use !mutex_owned() since
841 1.65 pgoyette * that won't work correctly for spin mutexes.
842 1.65 pgoyette */
843 1.65 pgoyette int
844 1.65 pgoyette mutex_ownable(kmutex_t *mtx)
845 1.65 pgoyette {
846 1.65 pgoyette
847 1.65 pgoyette #ifdef LOCKDEBUG
848 1.65 pgoyette MUTEX_TESTLOCK(mtx);
849 1.65 pgoyette #endif
850 1.65 pgoyette return 1;
851 1.65 pgoyette }
852 1.65 pgoyette
853 1.65 pgoyette /*
854 1.2 ad * mutex_tryenter:
855 1.2 ad *
856 1.2 ad * Try to acquire the mutex; return non-zero if we did.
857 1.2 ad */
858 1.2 ad int
859 1.2 ad mutex_tryenter(kmutex_t *mtx)
860 1.2 ad {
861 1.2 ad uintptr_t curthread;
862 1.2 ad
863 1.2 ad /*
864 1.2 ad * Handle spin mutexes.
865 1.2 ad */
866 1.2 ad if (MUTEX_SPIN_P(mtx)) {
867 1.2 ad MUTEX_SPIN_SPLRAISE(mtx);
868 1.2 ad #ifdef FULL
869 1.60 matt if (MUTEX_SPINBIT_LOCK_TRY(mtx)) {
870 1.4 ad MUTEX_WANTLOCK(mtx);
871 1.2 ad MUTEX_LOCKED(mtx);
872 1.2 ad return 1;
873 1.2 ad }
874 1.2 ad MUTEX_SPIN_SPLRESTORE(mtx);
875 1.2 ad #else
876 1.4 ad MUTEX_WANTLOCK(mtx);
877 1.2 ad MUTEX_LOCKED(mtx);
878 1.2 ad return 1;
879 1.2 ad #endif
880 1.2 ad } else {
881 1.2 ad curthread = (uintptr_t)curlwp;
882 1.2 ad MUTEX_ASSERT(mtx, curthread != 0);
883 1.2 ad if (MUTEX_ACQUIRE(mtx, curthread)) {
884 1.4 ad MUTEX_WANTLOCK(mtx);
885 1.2 ad MUTEX_LOCKED(mtx);
886 1.2 ad MUTEX_DASSERT(mtx,
887 1.2 ad MUTEX_OWNER(mtx->mtx_owner) == curthread);
888 1.2 ad return 1;
889 1.2 ad }
890 1.2 ad }
891 1.2 ad
892 1.2 ad return 0;
893 1.2 ad }
894 1.2 ad
895 1.2 ad #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL)
896 1.2 ad /*
897 1.2 ad * mutex_spin_retry:
898 1.2 ad *
899 1.2 ad * Support routine for mutex_spin_enter(). Assumes that the caller
900 1.2 ad * has already raised the SPL, and adjusted counters.
901 1.2 ad */
902 1.2 ad void
903 1.2 ad mutex_spin_retry(kmutex_t *mtx)
904 1.2 ad {
905 1.2 ad #ifdef MULTIPROCESSOR
906 1.2 ad u_int count;
907 1.2 ad LOCKSTAT_TIMER(spintime);
908 1.2 ad LOCKSTAT_FLAG(lsflag);
909 1.2 ad #ifdef LOCKDEBUG
910 1.2 ad u_int spins = 0;
911 1.2 ad #endif /* LOCKDEBUG */
912 1.2 ad
913 1.2 ad MUTEX_WANTLOCK(mtx);
914 1.2 ad
915 1.2 ad LOCKSTAT_ENTER(lsflag);
916 1.2 ad LOCKSTAT_START_TIMER(lsflag, spintime);
917 1.2 ad count = SPINLOCK_BACKOFF_MIN;
918 1.2 ad
919 1.2 ad /*
920 1.2 ad * Spin testing the lock word and do exponential backoff
921 1.2 ad * to reduce cache line ping-ponging between CPUs.
922 1.2 ad */
923 1.2 ad do {
924 1.2 ad if (panicstr != NULL)
925 1.2 ad break;
926 1.60 matt while (MUTEX_SPINBIT_LOCKED_P(mtx)) {
927 1.63 msaitoh SPINLOCK_BACKOFF(count);
928 1.2 ad #ifdef LOCKDEBUG
929 1.2 ad if (SPINLOCK_SPINOUT(spins))
930 1.2 ad MUTEX_ABORT(mtx, "spinout");
931 1.2 ad #endif /* LOCKDEBUG */
932 1.2 ad }
933 1.60 matt } while (!MUTEX_SPINBIT_LOCK_TRY(mtx));
934 1.2 ad
935 1.2 ad LOCKSTAT_STOP_TIMER(lsflag, spintime);
936 1.2 ad LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
937 1.2 ad LOCKSTAT_EXIT(lsflag);
938 1.2 ad
939 1.2 ad MUTEX_LOCKED(mtx);
940 1.2 ad #else /* MULTIPROCESSOR */
941 1.2 ad MUTEX_ABORT(mtx, "locking against myself");
942 1.2 ad #endif /* MULTIPROCESSOR */
943 1.2 ad }
944 1.2 ad #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */
945