kern_mutex.c revision 1.80 1 1.80 ad /* $NetBSD: kern_mutex.c,v 1.80 2019/11/29 19:44:59 ad Exp $ */
2 1.2 ad
3 1.2 ad /*-
4 1.30 ad * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 1.2 ad * All rights reserved.
6 1.2 ad *
7 1.2 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 ad * by Jason R. Thorpe and Andrew Doran.
9 1.2 ad *
10 1.2 ad * Redistribution and use in source and binary forms, with or without
11 1.2 ad * modification, are permitted provided that the following conditions
12 1.2 ad * are met:
13 1.2 ad * 1. Redistributions of source code must retain the above copyright
14 1.2 ad * notice, this list of conditions and the following disclaimer.
15 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 ad * notice, this list of conditions and the following disclaimer in the
17 1.2 ad * documentation and/or other materials provided with the distribution.
18 1.2 ad *
19 1.2 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 ad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 ad */
31 1.2 ad
32 1.2 ad /*
33 1.2 ad * Kernel mutex implementation, modeled after those found in Solaris,
34 1.2 ad * a description of which can be found in:
35 1.2 ad *
36 1.2 ad * Solaris Internals: Core Kernel Architecture, Jim Mauro and
37 1.2 ad * Richard McDougall.
38 1.2 ad */
39 1.2 ad
40 1.2 ad #define __MUTEX_PRIVATE
41 1.2 ad
42 1.2 ad #include <sys/cdefs.h>
43 1.80 ad __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.80 2019/11/29 19:44:59 ad Exp $");
44 1.2 ad
45 1.2 ad #include <sys/param.h>
46 1.46 pooka #include <sys/atomic.h>
47 1.2 ad #include <sys/proc.h>
48 1.2 ad #include <sys/mutex.h>
49 1.2 ad #include <sys/sched.h>
50 1.2 ad #include <sys/sleepq.h>
51 1.2 ad #include <sys/systm.h>
52 1.2 ad #include <sys/lockdebug.h>
53 1.2 ad #include <sys/kernel.h>
54 1.24 ad #include <sys/intr.h>
55 1.29 xtraeme #include <sys/lock.h>
56 1.50 rmind #include <sys/types.h>
57 1.72 ozaki #include <sys/cpu.h>
58 1.74 ozaki #include <sys/pserialize.h>
59 1.2 ad
60 1.2 ad #include <dev/lockstat.h>
61 1.2 ad
62 1.28 ad #include <machine/lock.h>
63 1.28 ad
64 1.73 chs #define MUTEX_PANIC_SKIP_SPIN 1
65 1.73 chs #define MUTEX_PANIC_SKIP_ADAPTIVE 1
66 1.73 chs
67 1.2 ad /*
68 1.2 ad * When not running a debug kernel, spin mutexes are not much
69 1.2 ad * more than an splraiseipl() and splx() pair.
70 1.2 ad */
71 1.2 ad
72 1.2 ad #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
73 1.2 ad #define FULL
74 1.2 ad #endif
75 1.2 ad
76 1.2 ad /*
77 1.2 ad * Debugging support.
78 1.2 ad */
79 1.2 ad
80 1.2 ad #define MUTEX_WANTLOCK(mtx) \
81 1.23 yamt LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \
82 1.54 mlelstv (uintptr_t)__builtin_return_address(0), 0)
83 1.65 pgoyette #define MUTEX_TESTLOCK(mtx) \
84 1.65 pgoyette LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \
85 1.65 pgoyette (uintptr_t)__builtin_return_address(0), -1)
86 1.2 ad #define MUTEX_LOCKED(mtx) \
87 1.42 ad LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), NULL, \
88 1.2 ad (uintptr_t)__builtin_return_address(0), 0)
89 1.2 ad #define MUTEX_UNLOCKED(mtx) \
90 1.23 yamt LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), \
91 1.2 ad (uintptr_t)__builtin_return_address(0), 0)
92 1.2 ad #define MUTEX_ABORT(mtx, msg) \
93 1.64 christos mutex_abort(__func__, __LINE__, mtx, msg)
94 1.2 ad
95 1.2 ad #if defined(LOCKDEBUG)
96 1.2 ad
97 1.2 ad #define MUTEX_DASSERT(mtx, cond) \
98 1.2 ad do { \
99 1.75 ozaki if (__predict_false(!(cond))) \
100 1.2 ad MUTEX_ABORT(mtx, "assertion failed: " #cond); \
101 1.76 skrll } while (/* CONSTCOND */ 0)
102 1.2 ad
103 1.2 ad #else /* LOCKDEBUG */
104 1.2 ad
105 1.2 ad #define MUTEX_DASSERT(mtx, cond) /* nothing */
106 1.2 ad
107 1.2 ad #endif /* LOCKDEBUG */
108 1.2 ad
109 1.2 ad #if defined(DIAGNOSTIC)
110 1.2 ad
111 1.2 ad #define MUTEX_ASSERT(mtx, cond) \
112 1.2 ad do { \
113 1.75 ozaki if (__predict_false(!(cond))) \
114 1.2 ad MUTEX_ABORT(mtx, "assertion failed: " #cond); \
115 1.2 ad } while (/* CONSTCOND */ 0)
116 1.2 ad
117 1.2 ad #else /* DIAGNOSTIC */
118 1.2 ad
119 1.2 ad #define MUTEX_ASSERT(mtx, cond) /* nothing */
120 1.2 ad
121 1.2 ad #endif /* DIAGNOSTIC */
122 1.2 ad
123 1.2 ad /*
124 1.60 matt * Some architectures can't use __cpu_simple_lock as is so allow a way
125 1.60 matt * for them to use an alternate definition.
126 1.60 matt */
127 1.60 matt #ifndef MUTEX_SPINBIT_LOCK_INIT
128 1.60 matt #define MUTEX_SPINBIT_LOCK_INIT(mtx) __cpu_simple_lock_init(&(mtx)->mtx_lock)
129 1.60 matt #endif
130 1.60 matt #ifndef MUTEX_SPINBIT_LOCKED_P
131 1.60 matt #define MUTEX_SPINBIT_LOCKED_P(mtx) __SIMPLELOCK_LOCKED_P(&(mtx)->mtx_lock)
132 1.60 matt #endif
133 1.60 matt #ifndef MUTEX_SPINBIT_LOCK_TRY
134 1.60 matt #define MUTEX_SPINBIT_LOCK_TRY(mtx) __cpu_simple_lock_try(&(mtx)->mtx_lock)
135 1.60 matt #endif
136 1.60 matt #ifndef MUTEX_SPINBIT_LOCK_UNLOCK
137 1.60 matt #define MUTEX_SPINBIT_LOCK_UNLOCK(mtx) __cpu_simple_unlock(&(mtx)->mtx_lock)
138 1.60 matt #endif
139 1.60 matt
140 1.60 matt #ifndef MUTEX_INITIALIZE_SPIN_IPL
141 1.60 matt #define MUTEX_INITIALIZE_SPIN_IPL(mtx, ipl) \
142 1.60 matt ((mtx)->mtx_ipl = makeiplcookie((ipl)))
143 1.60 matt #endif
144 1.60 matt
145 1.60 matt /*
146 1.2 ad * Spin mutex SPL save / restore.
147 1.2 ad */
148 1.2 ad
149 1.2 ad #define MUTEX_SPIN_SPLRAISE(mtx) \
150 1.2 ad do { \
151 1.36 ad struct cpu_info *x__ci; \
152 1.2 ad int x__cnt, s; \
153 1.60 matt s = splraiseipl(MUTEX_SPIN_IPL(mtx)); \
154 1.36 ad x__ci = curcpu(); \
155 1.2 ad x__cnt = x__ci->ci_mtx_count--; \
156 1.37 ad __insn_barrier(); \
157 1.51 rmind if (x__cnt == 0) \
158 1.2 ad x__ci->ci_mtx_oldspl = (s); \
159 1.2 ad } while (/* CONSTCOND */ 0)
160 1.2 ad
161 1.2 ad #define MUTEX_SPIN_SPLRESTORE(mtx) \
162 1.2 ad do { \
163 1.2 ad struct cpu_info *x__ci = curcpu(); \
164 1.2 ad int s = x__ci->ci_mtx_oldspl; \
165 1.2 ad __insn_barrier(); \
166 1.51 rmind if (++(x__ci->ci_mtx_count) == 0) \
167 1.2 ad splx(s); \
168 1.2 ad } while (/* CONSTCOND */ 0)
169 1.2 ad
170 1.2 ad /*
171 1.80 ad * Memory barriers.
172 1.80 ad */
173 1.80 ad #ifdef __HAVE_ATOMIC_AS_MEMBAR
174 1.80 ad #define MUTEX_MEMBAR_ENTER()
175 1.80 ad #define MUTEX_MEMBAR_EXIT()
176 1.80 ad #else
177 1.80 ad #define MUTEX_MEMBAR_ENTER() membar_enter()
178 1.80 ad #define MUTEX_MEMBAR_EXIT() membar_exit()
179 1.80 ad #endif
180 1.80 ad
181 1.80 ad /*
182 1.2 ad * For architectures that provide 'simple' mutexes: they provide a
183 1.2 ad * CAS function that is either MP-safe, or does not need to be MP
184 1.2 ad * safe. Adaptive mutexes on these architectures do not require an
185 1.2 ad * additional interlock.
186 1.2 ad */
187 1.2 ad
188 1.2 ad #ifdef __HAVE_SIMPLE_MUTEXES
189 1.2 ad
190 1.2 ad #define MUTEX_OWNER(owner) \
191 1.2 ad (owner & MUTEX_THREAD)
192 1.2 ad #define MUTEX_HAS_WAITERS(mtx) \
193 1.2 ad (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0)
194 1.2 ad
195 1.23 yamt #define MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug) \
196 1.76 skrll do { \
197 1.49 skrll if (!dodebug) \
198 1.49 skrll (mtx)->mtx_owner |= MUTEX_BIT_NODEBUG; \
199 1.76 skrll } while (/* CONSTCOND */ 0)
200 1.2 ad
201 1.23 yamt #define MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl) \
202 1.2 ad do { \
203 1.2 ad (mtx)->mtx_owner = MUTEX_BIT_SPIN; \
204 1.49 skrll if (!dodebug) \
205 1.49 skrll (mtx)->mtx_owner |= MUTEX_BIT_NODEBUG; \
206 1.60 matt MUTEX_INITIALIZE_SPIN_IPL((mtx), (ipl)); \
207 1.60 matt MUTEX_SPINBIT_LOCK_INIT((mtx)); \
208 1.2 ad } while (/* CONSTCOND */ 0)
209 1.2 ad
210 1.2 ad #define MUTEX_DESTROY(mtx) \
211 1.2 ad do { \
212 1.2 ad (mtx)->mtx_owner = MUTEX_THREAD; \
213 1.76 skrll } while (/* CONSTCOND */ 0)
214 1.2 ad
215 1.2 ad #define MUTEX_SPIN_P(mtx) \
216 1.2 ad (((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0)
217 1.2 ad #define MUTEX_ADAPTIVE_P(mtx) \
218 1.2 ad (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0)
219 1.2 ad
220 1.49 skrll #define MUTEX_DEBUG_P(mtx) (((mtx)->mtx_owner & MUTEX_BIT_NODEBUG) == 0)
221 1.23 yamt #if defined(LOCKDEBUG)
222 1.49 skrll #define MUTEX_OWNED(owner) (((owner) & ~MUTEX_BIT_NODEBUG) != 0)
223 1.59 matt #define MUTEX_INHERITDEBUG(n, o) (n) |= (o) & MUTEX_BIT_NODEBUG
224 1.23 yamt #else /* defined(LOCKDEBUG) */
225 1.23 yamt #define MUTEX_OWNED(owner) ((owner) != 0)
226 1.59 matt #define MUTEX_INHERITDEBUG(n, o) /* nothing */
227 1.23 yamt #endif /* defined(LOCKDEBUG) */
228 1.2 ad
229 1.2 ad static inline int
230 1.2 ad MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
231 1.2 ad {
232 1.2 ad int rv;
233 1.59 matt uintptr_t oldown = 0;
234 1.59 matt uintptr_t newown = curthread;
235 1.23 yamt
236 1.59 matt MUTEX_INHERITDEBUG(oldown, mtx->mtx_owner);
237 1.59 matt MUTEX_INHERITDEBUG(newown, oldown);
238 1.59 matt rv = MUTEX_CAS(&mtx->mtx_owner, oldown, newown);
239 1.80 ad MUTEX_MEMBAR_ENTER();
240 1.2 ad return rv;
241 1.2 ad }
242 1.2 ad
243 1.2 ad static inline int
244 1.2 ad MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
245 1.2 ad {
246 1.2 ad int rv;
247 1.2 ad rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS);
248 1.80 ad MUTEX_MEMBAR_ENTER();
249 1.2 ad return rv;
250 1.2 ad }
251 1.2 ad
252 1.2 ad static inline void
253 1.2 ad MUTEX_RELEASE(kmutex_t *mtx)
254 1.2 ad {
255 1.59 matt uintptr_t newown;
256 1.23 yamt
257 1.80 ad MUTEX_MEMBAR_EXIT();
258 1.59 matt newown = 0;
259 1.59 matt MUTEX_INHERITDEBUG(newown, mtx->mtx_owner);
260 1.59 matt mtx->mtx_owner = newown;
261 1.2 ad }
262 1.2 ad #endif /* __HAVE_SIMPLE_MUTEXES */
263 1.2 ad
264 1.2 ad /*
265 1.2 ad * Patch in stubs via strong alias where they are not available.
266 1.2 ad */
267 1.2 ad
268 1.2 ad #if defined(LOCKDEBUG)
269 1.2 ad #undef __HAVE_MUTEX_STUBS
270 1.2 ad #undef __HAVE_SPIN_MUTEX_STUBS
271 1.2 ad #endif
272 1.2 ad
273 1.2 ad #ifndef __HAVE_MUTEX_STUBS
274 1.8 itohy __strong_alias(mutex_enter,mutex_vector_enter);
275 1.8 itohy __strong_alias(mutex_exit,mutex_vector_exit);
276 1.2 ad #endif
277 1.2 ad
278 1.2 ad #ifndef __HAVE_SPIN_MUTEX_STUBS
279 1.8 itohy __strong_alias(mutex_spin_enter,mutex_vector_enter);
280 1.8 itohy __strong_alias(mutex_spin_exit,mutex_vector_exit);
281 1.2 ad #endif
282 1.2 ad
283 1.67 christos static void mutex_abort(const char *, size_t, const kmutex_t *,
284 1.67 christos const char *);
285 1.79 ozaki static void mutex_dump(const volatile void *, lockop_printer_t);
286 1.2 ad
287 1.2 ad lockops_t mutex_spin_lockops = {
288 1.68 ozaki .lo_name = "Mutex",
289 1.68 ozaki .lo_type = LOCKOPS_SPIN,
290 1.68 ozaki .lo_dump = mutex_dump,
291 1.2 ad };
292 1.2 ad
293 1.2 ad lockops_t mutex_adaptive_lockops = {
294 1.68 ozaki .lo_name = "Mutex",
295 1.68 ozaki .lo_type = LOCKOPS_SLEEP,
296 1.68 ozaki .lo_dump = mutex_dump,
297 1.2 ad };
298 1.2 ad
299 1.5 yamt syncobj_t mutex_syncobj = {
300 1.70 ozaki .sobj_flag = SOBJ_SLEEPQ_SORTED,
301 1.70 ozaki .sobj_unsleep = turnstile_unsleep,
302 1.70 ozaki .sobj_changepri = turnstile_changepri,
303 1.70 ozaki .sobj_lendpri = sleepq_lendpri,
304 1.70 ozaki .sobj_owner = (void *)mutex_owner,
305 1.5 yamt };
306 1.5 yamt
307 1.2 ad /*
308 1.2 ad * mutex_dump:
309 1.2 ad *
310 1.2 ad * Dump the contents of a mutex structure.
311 1.2 ad */
312 1.78 ozaki static void
313 1.79 ozaki mutex_dump(const volatile void *cookie, lockop_printer_t pr)
314 1.2 ad {
315 1.67 christos const volatile kmutex_t *mtx = cookie;
316 1.2 ad
317 1.79 ozaki pr("owner field : %#018lx wait/spin: %16d/%d\n",
318 1.2 ad (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx),
319 1.2 ad MUTEX_SPIN_P(mtx));
320 1.2 ad }
321 1.2 ad
322 1.2 ad /*
323 1.2 ad * mutex_abort:
324 1.2 ad *
325 1.3 ad * Dump information about an error and panic the system. This
326 1.3 ad * generates a lot of machine code in the DIAGNOSTIC case, so
327 1.3 ad * we ask the compiler to not inline it.
328 1.2 ad */
329 1.78 ozaki static void __noinline
330 1.67 christos mutex_abort(const char *func, size_t line, const kmutex_t *mtx, const char *msg)
331 1.2 ad {
332 1.2 ad
333 1.64 christos LOCKDEBUG_ABORT(func, line, mtx, (MUTEX_SPIN_P(mtx) ?
334 1.64 christos &mutex_spin_lockops : &mutex_adaptive_lockops), msg);
335 1.2 ad }
336 1.2 ad
337 1.2 ad /*
338 1.2 ad * mutex_init:
339 1.2 ad *
340 1.2 ad * Initialize a mutex for use. Note that adaptive mutexes are in
341 1.2 ad * essence spin mutexes that can sleep to avoid deadlock and wasting
342 1.2 ad * CPU time. We can't easily provide a type of mutex that always
343 1.2 ad * sleeps - see comments in mutex_vector_enter() about releasing
344 1.2 ad * mutexes unlocked.
345 1.2 ad */
346 1.71 ozaki void _mutex_init(kmutex_t *, kmutex_type_t, int, uintptr_t);
347 1.2 ad void
348 1.71 ozaki _mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl,
349 1.71 ozaki uintptr_t return_address)
350 1.2 ad {
351 1.23 yamt bool dodebug;
352 1.2 ad
353 1.2 ad memset(mtx, 0, sizeof(*mtx));
354 1.2 ad
355 1.15 ad switch (type) {
356 1.15 ad case MUTEX_ADAPTIVE:
357 1.15 ad KASSERT(ipl == IPL_NONE);
358 1.15 ad break;
359 1.22 ad case MUTEX_DEFAULT:
360 1.15 ad case MUTEX_DRIVER:
361 1.26 ad if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
362 1.26 ad ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
363 1.26 ad ipl == IPL_SOFTSERIAL) {
364 1.22 ad type = MUTEX_ADAPTIVE;
365 1.26 ad } else {
366 1.22 ad type = MUTEX_SPIN;
367 1.22 ad }
368 1.15 ad break;
369 1.15 ad default:
370 1.15 ad break;
371 1.15 ad }
372 1.2 ad
373 1.2 ad switch (type) {
374 1.11 ad case MUTEX_NODEBUG:
375 1.71 ozaki dodebug = LOCKDEBUG_ALLOC(mtx, NULL, return_address);
376 1.23 yamt MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
377 1.11 ad break;
378 1.2 ad case MUTEX_ADAPTIVE:
379 1.23 yamt dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops,
380 1.71 ozaki return_address);
381 1.23 yamt MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug);
382 1.2 ad break;
383 1.2 ad case MUTEX_SPIN:
384 1.23 yamt dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops,
385 1.71 ozaki return_address);
386 1.23 yamt MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
387 1.2 ad break;
388 1.2 ad default:
389 1.2 ad panic("mutex_init: impossible type");
390 1.2 ad break;
391 1.2 ad }
392 1.2 ad }
393 1.2 ad
394 1.71 ozaki void
395 1.71 ozaki mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
396 1.71 ozaki {
397 1.71 ozaki
398 1.71 ozaki _mutex_init(mtx, type, ipl, (uintptr_t)__builtin_return_address(0));
399 1.71 ozaki }
400 1.71 ozaki
401 1.2 ad /*
402 1.2 ad * mutex_destroy:
403 1.2 ad *
404 1.2 ad * Tear down a mutex.
405 1.2 ad */
406 1.2 ad void
407 1.2 ad mutex_destroy(kmutex_t *mtx)
408 1.2 ad {
409 1.2 ad
410 1.2 ad if (MUTEX_ADAPTIVE_P(mtx)) {
411 1.2 ad MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) &&
412 1.2 ad !MUTEX_HAS_WAITERS(mtx));
413 1.2 ad } else {
414 1.60 matt MUTEX_ASSERT(mtx, !MUTEX_SPINBIT_LOCKED_P(mtx));
415 1.2 ad }
416 1.2 ad
417 1.23 yamt LOCKDEBUG_FREE(MUTEX_DEBUG_P(mtx), mtx);
418 1.2 ad MUTEX_DESTROY(mtx);
419 1.2 ad }
420 1.2 ad
421 1.50 rmind #ifdef MULTIPROCESSOR
422 1.2 ad /*
423 1.50 rmind * mutex_oncpu:
424 1.2 ad *
425 1.2 ad * Return true if an adaptive mutex owner is running on a CPU in the
426 1.2 ad * system. If the target is waiting on the kernel big lock, then we
427 1.15 ad * must release it. This is necessary to avoid deadlock.
428 1.2 ad */
429 1.50 rmind static bool
430 1.50 rmind mutex_oncpu(uintptr_t owner)
431 1.2 ad {
432 1.2 ad struct cpu_info *ci;
433 1.50 rmind lwp_t *l;
434 1.2 ad
435 1.50 rmind KASSERT(kpreempt_disabled());
436 1.50 rmind
437 1.50 rmind if (!MUTEX_OWNED(owner)) {
438 1.50 rmind return false;
439 1.50 rmind }
440 1.2 ad
441 1.50 rmind /*
442 1.50 rmind * See lwp_dtor() why dereference of the LWP pointer is safe.
443 1.50 rmind * We must have kernel preemption disabled for that.
444 1.50 rmind */
445 1.50 rmind l = (lwp_t *)MUTEX_OWNER(owner);
446 1.50 rmind ci = l->l_cpu;
447 1.2 ad
448 1.50 rmind if (ci && ci->ci_curlwp == l) {
449 1.50 rmind /* Target is running; do we need to block? */
450 1.50 rmind return (ci->ci_biglock_wanted != l);
451 1.50 rmind }
452 1.15 ad
453 1.50 rmind /* Not running. It may be safe to block now. */
454 1.50 rmind return false;
455 1.2 ad }
456 1.15 ad #endif /* MULTIPROCESSOR */
457 1.2 ad
458 1.2 ad /*
459 1.2 ad * mutex_vector_enter:
460 1.2 ad *
461 1.45 rmind * Support routine for mutex_enter() that must handle all cases. In
462 1.2 ad * the LOCKDEBUG case, mutex_enter() is always aliased here, even if
463 1.62 prlw1 * fast-path stubs are available. If a mutex_spin_enter() stub is
464 1.2 ad * not available, then it is also aliased directly here.
465 1.2 ad */
466 1.2 ad void
467 1.2 ad mutex_vector_enter(kmutex_t *mtx)
468 1.2 ad {
469 1.2 ad uintptr_t owner, curthread;
470 1.2 ad turnstile_t *ts;
471 1.2 ad #ifdef MULTIPROCESSOR
472 1.2 ad u_int count;
473 1.2 ad #endif
474 1.2 ad LOCKSTAT_COUNTER(spincnt);
475 1.2 ad LOCKSTAT_COUNTER(slpcnt);
476 1.2 ad LOCKSTAT_TIMER(spintime);
477 1.2 ad LOCKSTAT_TIMER(slptime);
478 1.2 ad LOCKSTAT_FLAG(lsflag);
479 1.2 ad
480 1.2 ad /*
481 1.2 ad * Handle spin mutexes.
482 1.2 ad */
483 1.2 ad if (MUTEX_SPIN_P(mtx)) {
484 1.2 ad #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR)
485 1.2 ad u_int spins = 0;
486 1.2 ad #endif
487 1.2 ad MUTEX_SPIN_SPLRAISE(mtx);
488 1.2 ad MUTEX_WANTLOCK(mtx);
489 1.2 ad #ifdef FULL
490 1.60 matt if (MUTEX_SPINBIT_LOCK_TRY(mtx)) {
491 1.2 ad MUTEX_LOCKED(mtx);
492 1.2 ad return;
493 1.2 ad }
494 1.2 ad #if !defined(MULTIPROCESSOR)
495 1.2 ad MUTEX_ABORT(mtx, "locking against myself");
496 1.2 ad #else /* !MULTIPROCESSOR */
497 1.2 ad
498 1.2 ad LOCKSTAT_ENTER(lsflag);
499 1.2 ad LOCKSTAT_START_TIMER(lsflag, spintime);
500 1.2 ad count = SPINLOCK_BACKOFF_MIN;
501 1.2 ad
502 1.2 ad /*
503 1.2 ad * Spin testing the lock word and do exponential backoff
504 1.2 ad * to reduce cache line ping-ponging between CPUs.
505 1.2 ad */
506 1.2 ad do {
507 1.73 chs #if MUTEX_PANIC_SKIP_SPIN
508 1.2 ad if (panicstr != NULL)
509 1.2 ad break;
510 1.73 chs #endif
511 1.60 matt while (MUTEX_SPINBIT_LOCKED_P(mtx)) {
512 1.63 msaitoh SPINLOCK_BACKOFF(count);
513 1.2 ad #ifdef LOCKDEBUG
514 1.2 ad if (SPINLOCK_SPINOUT(spins))
515 1.2 ad MUTEX_ABORT(mtx, "spinout");
516 1.2 ad #endif /* LOCKDEBUG */
517 1.2 ad }
518 1.60 matt } while (!MUTEX_SPINBIT_LOCK_TRY(mtx));
519 1.2 ad
520 1.2 ad if (count != SPINLOCK_BACKOFF_MIN) {
521 1.2 ad LOCKSTAT_STOP_TIMER(lsflag, spintime);
522 1.2 ad LOCKSTAT_EVENT(lsflag, mtx,
523 1.2 ad LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
524 1.2 ad }
525 1.2 ad LOCKSTAT_EXIT(lsflag);
526 1.2 ad #endif /* !MULTIPROCESSOR */
527 1.2 ad #endif /* FULL */
528 1.2 ad MUTEX_LOCKED(mtx);
529 1.2 ad return;
530 1.2 ad }
531 1.2 ad
532 1.2 ad curthread = (uintptr_t)curlwp;
533 1.2 ad
534 1.2 ad MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
535 1.2 ad MUTEX_ASSERT(mtx, curthread != 0);
536 1.72 ozaki MUTEX_ASSERT(mtx, !cpu_intr_p());
537 1.2 ad MUTEX_WANTLOCK(mtx);
538 1.2 ad
539 1.2 ad if (panicstr == NULL) {
540 1.77 ozaki KDASSERT(pserialize_not_in_read_section());
541 1.2 ad LOCKDEBUG_BARRIER(&kernel_lock, 1);
542 1.2 ad }
543 1.2 ad
544 1.2 ad LOCKSTAT_ENTER(lsflag);
545 1.2 ad
546 1.2 ad /*
547 1.2 ad * Adaptive mutex; spin trying to acquire the mutex. If we
548 1.2 ad * determine that the owner is not running on a processor,
549 1.2 ad * then we stop spinning, and sleep instead.
550 1.2 ad */
551 1.50 rmind KPREEMPT_DISABLE(curlwp);
552 1.34 ad for (owner = mtx->mtx_owner;;) {
553 1.2 ad if (!MUTEX_OWNED(owner)) {
554 1.2 ad /*
555 1.2 ad * Mutex owner clear could mean two things:
556 1.2 ad *
557 1.2 ad * * The mutex has been released.
558 1.2 ad * * The owner field hasn't been set yet.
559 1.2 ad *
560 1.2 ad * Try to acquire it again. If that fails,
561 1.2 ad * we'll just loop again.
562 1.2 ad */
563 1.2 ad if (MUTEX_ACQUIRE(mtx, curthread))
564 1.2 ad break;
565 1.34 ad owner = mtx->mtx_owner;
566 1.2 ad continue;
567 1.2 ad }
568 1.73 chs #if MUTEX_PANIC_SKIP_ADAPTIVE
569 1.50 rmind if (__predict_false(panicstr != NULL)) {
570 1.61 uebayasi KPREEMPT_ENABLE(curlwp);
571 1.2 ad return;
572 1.50 rmind }
573 1.73 chs #endif
574 1.50 rmind if (__predict_false(MUTEX_OWNER(owner) == curthread)) {
575 1.2 ad MUTEX_ABORT(mtx, "locking against myself");
576 1.50 rmind }
577 1.2 ad #ifdef MULTIPROCESSOR
578 1.2 ad /*
579 1.2 ad * Check to see if the owner is running on a processor.
580 1.2 ad * If so, then we should just spin, as the owner will
581 1.2 ad * likely release the lock very soon.
582 1.2 ad */
583 1.50 rmind if (mutex_oncpu(owner)) {
584 1.2 ad LOCKSTAT_START_TIMER(lsflag, spintime);
585 1.2 ad count = SPINLOCK_BACKOFF_MIN;
586 1.50 rmind do {
587 1.53 rmind KPREEMPT_ENABLE(curlwp);
588 1.34 ad SPINLOCK_BACKOFF(count);
589 1.53 rmind KPREEMPT_DISABLE(curlwp);
590 1.2 ad owner = mtx->mtx_owner;
591 1.50 rmind } while (mutex_oncpu(owner));
592 1.2 ad LOCKSTAT_STOP_TIMER(lsflag, spintime);
593 1.2 ad LOCKSTAT_COUNT(spincnt, 1);
594 1.2 ad if (!MUTEX_OWNED(owner))
595 1.2 ad continue;
596 1.2 ad }
597 1.2 ad #endif
598 1.2 ad
599 1.2 ad ts = turnstile_lookup(mtx);
600 1.2 ad
601 1.2 ad /*
602 1.2 ad * Once we have the turnstile chain interlock, mark the
603 1.69 skrll * mutex as having waiters. If that fails, spin again:
604 1.2 ad * chances are that the mutex has been released.
605 1.2 ad */
606 1.2 ad if (!MUTEX_SET_WAITERS(mtx, owner)) {
607 1.2 ad turnstile_exit(mtx);
608 1.34 ad owner = mtx->mtx_owner;
609 1.2 ad continue;
610 1.2 ad }
611 1.2 ad
612 1.2 ad #ifdef MULTIPROCESSOR
613 1.2 ad /*
614 1.2 ad * mutex_exit() is permitted to release the mutex without
615 1.2 ad * any interlocking instructions, and the following can
616 1.2 ad * occur as a result:
617 1.2 ad *
618 1.2 ad * CPU 1: MUTEX_SET_WAITERS() CPU2: mutex_exit()
619 1.2 ad * ---------------------------- ----------------------------
620 1.2 ad * .. acquire cache line
621 1.2 ad * .. test for waiters
622 1.2 ad * acquire cache line <- lose cache line
623 1.2 ad * lock cache line ..
624 1.2 ad * verify mutex is held ..
625 1.2 ad * set waiters ..
626 1.2 ad * unlock cache line ..
627 1.2 ad * lose cache line -> acquire cache line
628 1.2 ad * .. clear lock word, waiters
629 1.2 ad * return success
630 1.2 ad *
631 1.50 rmind * There is another race that can occur: a third CPU could
632 1.2 ad * acquire the mutex as soon as it is released. Since
633 1.2 ad * adaptive mutexes are primarily spin mutexes, this is not
634 1.2 ad * something that we need to worry about too much. What we
635 1.2 ad * do need to ensure is that the waiters bit gets set.
636 1.2 ad *
637 1.2 ad * To allow the unlocked release, we need to make some
638 1.2 ad * assumptions here:
639 1.2 ad *
640 1.2 ad * o Release is the only non-atomic/unlocked operation
641 1.2 ad * that can be performed on the mutex. (It must still
642 1.2 ad * be atomic on the local CPU, e.g. in case interrupted
643 1.2 ad * or preempted).
644 1.2 ad *
645 1.2 ad * o At any given time, MUTEX_SET_WAITERS() can only ever
646 1.21 pooka * be in progress on one CPU in the system - guaranteed
647 1.2 ad * by the turnstile chain lock.
648 1.2 ad *
649 1.2 ad * o No other operations other than MUTEX_SET_WAITERS()
650 1.2 ad * and release can modify a mutex with a non-zero
651 1.2 ad * owner field.
652 1.2 ad *
653 1.2 ad * o The result of a successful MUTEX_SET_WAITERS() call
654 1.2 ad * is an unbuffered write that is immediately visible
655 1.2 ad * to all other processors in the system.
656 1.2 ad *
657 1.2 ad * o If the holding LWP switches away, it posts a store
658 1.2 ad * fence before changing curlwp, ensuring that any
659 1.2 ad * overwrite of the mutex waiters flag by mutex_exit()
660 1.2 ad * completes before the modification of curlwp becomes
661 1.2 ad * visible to this CPU.
662 1.2 ad *
663 1.14 yamt * o mi_switch() posts a store fence before setting curlwp
664 1.2 ad * and before resuming execution of an LWP.
665 1.2 ad *
666 1.2 ad * o _kernel_lock() posts a store fence before setting
667 1.2 ad * curcpu()->ci_biglock_wanted, and after clearing it.
668 1.2 ad * This ensures that any overwrite of the mutex waiters
669 1.2 ad * flag by mutex_exit() completes before the modification
670 1.2 ad * of ci_biglock_wanted becomes visible.
671 1.2 ad *
672 1.2 ad * We now post a read memory barrier (after setting the
673 1.2 ad * waiters field) and check the lock holder's status again.
674 1.2 ad * Some of the possible outcomes (not an exhaustive list):
675 1.2 ad *
676 1.50 rmind * 1. The on-CPU check returns true: the holding LWP is
677 1.2 ad * running again. The lock may be released soon and
678 1.2 ad * we should spin. Importantly, we can't trust the
679 1.2 ad * value of the waiters flag.
680 1.2 ad *
681 1.50 rmind * 2. The on-CPU check returns false: the holding LWP is
682 1.39 yamt * not running. We now have the opportunity to check
683 1.2 ad * if mutex_exit() has blatted the modifications made
684 1.2 ad * by MUTEX_SET_WAITERS().
685 1.2 ad *
686 1.50 rmind * 3. The on-CPU check returns false: the holding LWP may
687 1.2 ad * or may not be running. It has context switched at
688 1.2 ad * some point during our check. Again, we have the
689 1.2 ad * chance to see if the waiters bit is still set or
690 1.2 ad * has been overwritten.
691 1.2 ad *
692 1.50 rmind * 4. The on-CPU check returns false: the holding LWP is
693 1.2 ad * running on a CPU, but wants the big lock. It's OK
694 1.2 ad * to check the waiters field in this case.
695 1.2 ad *
696 1.2 ad * 5. The has-waiters check fails: the mutex has been
697 1.2 ad * released, the waiters flag cleared and another LWP
698 1.2 ad * now owns the mutex.
699 1.2 ad *
700 1.2 ad * 6. The has-waiters check fails: the mutex has been
701 1.2 ad * released.
702 1.2 ad *
703 1.2 ad * If the waiters bit is not set it's unsafe to go asleep,
704 1.2 ad * as we might never be awoken.
705 1.2 ad */
706 1.50 rmind if ((membar_consumer(), mutex_oncpu(owner)) ||
707 1.24 ad (membar_consumer(), !MUTEX_HAS_WAITERS(mtx))) {
708 1.2 ad turnstile_exit(mtx);
709 1.34 ad owner = mtx->mtx_owner;
710 1.2 ad continue;
711 1.2 ad }
712 1.2 ad #endif /* MULTIPROCESSOR */
713 1.2 ad
714 1.2 ad LOCKSTAT_START_TIMER(lsflag, slptime);
715 1.2 ad
716 1.5 yamt turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj);
717 1.2 ad
718 1.2 ad LOCKSTAT_STOP_TIMER(lsflag, slptime);
719 1.2 ad LOCKSTAT_COUNT(slpcnt, 1);
720 1.34 ad
721 1.34 ad owner = mtx->mtx_owner;
722 1.2 ad }
723 1.50 rmind KPREEMPT_ENABLE(curlwp);
724 1.2 ad
725 1.2 ad LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1,
726 1.2 ad slpcnt, slptime);
727 1.2 ad LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN,
728 1.2 ad spincnt, spintime);
729 1.2 ad LOCKSTAT_EXIT(lsflag);
730 1.2 ad
731 1.2 ad MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
732 1.2 ad MUTEX_LOCKED(mtx);
733 1.2 ad }
734 1.2 ad
735 1.2 ad /*
736 1.2 ad * mutex_vector_exit:
737 1.2 ad *
738 1.2 ad * Support routine for mutex_exit() that handles all cases.
739 1.2 ad */
740 1.2 ad void
741 1.2 ad mutex_vector_exit(kmutex_t *mtx)
742 1.2 ad {
743 1.2 ad turnstile_t *ts;
744 1.2 ad uintptr_t curthread;
745 1.2 ad
746 1.2 ad if (MUTEX_SPIN_P(mtx)) {
747 1.2 ad #ifdef FULL
748 1.60 matt if (__predict_false(!MUTEX_SPINBIT_LOCKED_P(mtx))) {
749 1.73 chs #if MUTEX_PANIC_SKIP_SPIN
750 1.33 ad if (panicstr != NULL)
751 1.33 ad return;
752 1.73 chs #endif
753 1.2 ad MUTEX_ABORT(mtx, "exiting unheld spin mutex");
754 1.33 ad }
755 1.2 ad MUTEX_UNLOCKED(mtx);
756 1.60 matt MUTEX_SPINBIT_LOCK_UNLOCK(mtx);
757 1.2 ad #endif
758 1.2 ad MUTEX_SPIN_SPLRESTORE(mtx);
759 1.2 ad return;
760 1.2 ad }
761 1.2 ad
762 1.73 chs #ifdef MUTEX_PANIC_SKIP_ADAPTIVE
763 1.11 ad if (__predict_false((uintptr_t)panicstr | cold)) {
764 1.2 ad MUTEX_UNLOCKED(mtx);
765 1.2 ad MUTEX_RELEASE(mtx);
766 1.2 ad return;
767 1.2 ad }
768 1.73 chs #endif
769 1.2 ad
770 1.2 ad curthread = (uintptr_t)curlwp;
771 1.2 ad MUTEX_DASSERT(mtx, curthread != 0);
772 1.2 ad MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
773 1.2 ad MUTEX_UNLOCKED(mtx);
774 1.58 mrg #if !defined(LOCKDEBUG)
775 1.58 mrg __USE(curthread);
776 1.58 mrg #endif
777 1.2 ad
778 1.15 ad #ifdef LOCKDEBUG
779 1.15 ad /*
780 1.15 ad * Avoid having to take the turnstile chain lock every time
781 1.15 ad * around. Raise the priority level to splhigh() in order
782 1.15 ad * to disable preemption and so make the following atomic.
783 1.15 ad */
784 1.15 ad {
785 1.15 ad int s = splhigh();
786 1.15 ad if (!MUTEX_HAS_WAITERS(mtx)) {
787 1.15 ad MUTEX_RELEASE(mtx);
788 1.15 ad splx(s);
789 1.15 ad return;
790 1.15 ad }
791 1.15 ad splx(s);
792 1.15 ad }
793 1.15 ad #endif
794 1.15 ad
795 1.2 ad /*
796 1.2 ad * Get this lock's turnstile. This gets the interlock on
797 1.2 ad * the sleep queue. Once we have that, we can clear the
798 1.2 ad * lock. If there was no turnstile for the lock, there
799 1.2 ad * were no waiters remaining.
800 1.2 ad */
801 1.2 ad ts = turnstile_lookup(mtx);
802 1.2 ad
803 1.2 ad if (ts == NULL) {
804 1.2 ad MUTEX_RELEASE(mtx);
805 1.2 ad turnstile_exit(mtx);
806 1.2 ad } else {
807 1.2 ad MUTEX_RELEASE(mtx);
808 1.2 ad turnstile_wakeup(ts, TS_WRITER_Q,
809 1.2 ad TS_WAITERS(ts, TS_WRITER_Q), NULL);
810 1.2 ad }
811 1.2 ad }
812 1.2 ad
813 1.4 ad #ifndef __HAVE_SIMPLE_MUTEXES
814 1.4 ad /*
815 1.4 ad * mutex_wakeup:
816 1.4 ad *
817 1.4 ad * Support routine for mutex_exit() that wakes up all waiters.
818 1.4 ad * We assume that the mutex has been released, but it need not
819 1.4 ad * be.
820 1.4 ad */
821 1.4 ad void
822 1.4 ad mutex_wakeup(kmutex_t *mtx)
823 1.4 ad {
824 1.4 ad turnstile_t *ts;
825 1.4 ad
826 1.4 ad ts = turnstile_lookup(mtx);
827 1.4 ad if (ts == NULL) {
828 1.4 ad turnstile_exit(mtx);
829 1.4 ad return;
830 1.4 ad }
831 1.4 ad MUTEX_CLEAR_WAITERS(mtx);
832 1.4 ad turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL);
833 1.4 ad }
834 1.4 ad #endif /* !__HAVE_SIMPLE_MUTEXES */
835 1.4 ad
836 1.2 ad /*
837 1.2 ad * mutex_owned:
838 1.2 ad *
839 1.3 ad * Return true if the current LWP (adaptive) or CPU (spin)
840 1.3 ad * holds the mutex.
841 1.2 ad */
842 1.2 ad int
843 1.66 christos mutex_owned(const kmutex_t *mtx)
844 1.2 ad {
845 1.2 ad
846 1.35 ad if (mtx == NULL)
847 1.35 ad return 0;
848 1.2 ad if (MUTEX_ADAPTIVE_P(mtx))
849 1.2 ad return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp;
850 1.2 ad #ifdef FULL
851 1.60 matt return MUTEX_SPINBIT_LOCKED_P(mtx);
852 1.2 ad #else
853 1.2 ad return 1;
854 1.2 ad #endif
855 1.2 ad }
856 1.2 ad
857 1.2 ad /*
858 1.2 ad * mutex_owner:
859 1.2 ad *
860 1.6 ad * Return the current owner of an adaptive mutex. Used for
861 1.6 ad * priority inheritance.
862 1.2 ad */
863 1.27 ad lwp_t *
864 1.66 christos mutex_owner(const kmutex_t *mtx)
865 1.2 ad {
866 1.2 ad
867 1.2 ad MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
868 1.2 ad return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner);
869 1.2 ad }
870 1.2 ad
871 1.2 ad /*
872 1.65 pgoyette * mutex_ownable:
873 1.65 pgoyette *
874 1.65 pgoyette * When compiled with DEBUG and LOCKDEBUG defined, ensure that
875 1.65 pgoyette * the mutex is available. We cannot use !mutex_owned() since
876 1.65 pgoyette * that won't work correctly for spin mutexes.
877 1.65 pgoyette */
878 1.65 pgoyette int
879 1.66 christos mutex_ownable(const kmutex_t *mtx)
880 1.65 pgoyette {
881 1.65 pgoyette
882 1.65 pgoyette #ifdef LOCKDEBUG
883 1.65 pgoyette MUTEX_TESTLOCK(mtx);
884 1.65 pgoyette #endif
885 1.65 pgoyette return 1;
886 1.65 pgoyette }
887 1.65 pgoyette
888 1.65 pgoyette /*
889 1.2 ad * mutex_tryenter:
890 1.2 ad *
891 1.2 ad * Try to acquire the mutex; return non-zero if we did.
892 1.2 ad */
893 1.2 ad int
894 1.2 ad mutex_tryenter(kmutex_t *mtx)
895 1.2 ad {
896 1.2 ad uintptr_t curthread;
897 1.2 ad
898 1.2 ad /*
899 1.2 ad * Handle spin mutexes.
900 1.2 ad */
901 1.2 ad if (MUTEX_SPIN_P(mtx)) {
902 1.2 ad MUTEX_SPIN_SPLRAISE(mtx);
903 1.2 ad #ifdef FULL
904 1.60 matt if (MUTEX_SPINBIT_LOCK_TRY(mtx)) {
905 1.4 ad MUTEX_WANTLOCK(mtx);
906 1.2 ad MUTEX_LOCKED(mtx);
907 1.2 ad return 1;
908 1.2 ad }
909 1.2 ad MUTEX_SPIN_SPLRESTORE(mtx);
910 1.2 ad #else
911 1.4 ad MUTEX_WANTLOCK(mtx);
912 1.2 ad MUTEX_LOCKED(mtx);
913 1.2 ad return 1;
914 1.2 ad #endif
915 1.2 ad } else {
916 1.2 ad curthread = (uintptr_t)curlwp;
917 1.2 ad MUTEX_ASSERT(mtx, curthread != 0);
918 1.2 ad if (MUTEX_ACQUIRE(mtx, curthread)) {
919 1.4 ad MUTEX_WANTLOCK(mtx);
920 1.2 ad MUTEX_LOCKED(mtx);
921 1.2 ad MUTEX_DASSERT(mtx,
922 1.2 ad MUTEX_OWNER(mtx->mtx_owner) == curthread);
923 1.2 ad return 1;
924 1.2 ad }
925 1.2 ad }
926 1.2 ad
927 1.2 ad return 0;
928 1.2 ad }
929 1.2 ad
930 1.2 ad #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL)
931 1.2 ad /*
932 1.2 ad * mutex_spin_retry:
933 1.2 ad *
934 1.2 ad * Support routine for mutex_spin_enter(). Assumes that the caller
935 1.2 ad * has already raised the SPL, and adjusted counters.
936 1.2 ad */
937 1.2 ad void
938 1.2 ad mutex_spin_retry(kmutex_t *mtx)
939 1.2 ad {
940 1.2 ad #ifdef MULTIPROCESSOR
941 1.2 ad u_int count;
942 1.2 ad LOCKSTAT_TIMER(spintime);
943 1.2 ad LOCKSTAT_FLAG(lsflag);
944 1.2 ad #ifdef LOCKDEBUG
945 1.2 ad u_int spins = 0;
946 1.2 ad #endif /* LOCKDEBUG */
947 1.2 ad
948 1.2 ad MUTEX_WANTLOCK(mtx);
949 1.2 ad
950 1.2 ad LOCKSTAT_ENTER(lsflag);
951 1.2 ad LOCKSTAT_START_TIMER(lsflag, spintime);
952 1.2 ad count = SPINLOCK_BACKOFF_MIN;
953 1.2 ad
954 1.2 ad /*
955 1.2 ad * Spin testing the lock word and do exponential backoff
956 1.2 ad * to reduce cache line ping-ponging between CPUs.
957 1.2 ad */
958 1.2 ad do {
959 1.73 chs #if MUTEX_PANIC_SKIP_SPIN
960 1.2 ad if (panicstr != NULL)
961 1.2 ad break;
962 1.73 chs #endif
963 1.60 matt while (MUTEX_SPINBIT_LOCKED_P(mtx)) {
964 1.63 msaitoh SPINLOCK_BACKOFF(count);
965 1.2 ad #ifdef LOCKDEBUG
966 1.2 ad if (SPINLOCK_SPINOUT(spins))
967 1.2 ad MUTEX_ABORT(mtx, "spinout");
968 1.2 ad #endif /* LOCKDEBUG */
969 1.2 ad }
970 1.60 matt } while (!MUTEX_SPINBIT_LOCK_TRY(mtx));
971 1.2 ad
972 1.2 ad LOCKSTAT_STOP_TIMER(lsflag, spintime);
973 1.2 ad LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
974 1.2 ad LOCKSTAT_EXIT(lsflag);
975 1.2 ad
976 1.2 ad MUTEX_LOCKED(mtx);
977 1.2 ad #else /* MULTIPROCESSOR */
978 1.2 ad MUTEX_ABORT(mtx, "locking against myself");
979 1.2 ad #endif /* MULTIPROCESSOR */
980 1.2 ad }
981 1.2 ad #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */
982