kern_mutex.c revision 1.15 1 1.15 ad /* $NetBSD: kern_mutex.c,v 1.15 2007/07/09 21:10:53 ad Exp $ */
2 1.2 ad
3 1.2 ad /*-
4 1.2 ad * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
5 1.2 ad * All rights reserved.
6 1.2 ad *
7 1.2 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 ad * by Jason R. Thorpe and Andrew Doran.
9 1.2 ad *
10 1.2 ad * Redistribution and use in source and binary forms, with or without
11 1.2 ad * modification, are permitted provided that the following conditions
12 1.2 ad * are met:
13 1.2 ad * 1. Redistributions of source code must retain the above copyright
14 1.2 ad * notice, this list of conditions and the following disclaimer.
15 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 ad * notice, this list of conditions and the following disclaimer in the
17 1.2 ad * documentation and/or other materials provided with the distribution.
18 1.2 ad * 3. All advertising materials mentioning features or use of this software
19 1.2 ad * must display the following acknowledgement:
20 1.2 ad * This product includes software developed by the NetBSD
21 1.2 ad * Foundation, Inc. and its contributors.
22 1.2 ad * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.2 ad * contributors may be used to endorse or promote products derived
24 1.2 ad * from this software without specific prior written permission.
25 1.2 ad *
26 1.2 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.2 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.2 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.2 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.2 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.2 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.2 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.2 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.2 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.2 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.2 ad * POSSIBILITY OF SUCH DAMAGE.
37 1.2 ad */
38 1.2 ad
39 1.2 ad /*
40 1.2 ad * Kernel mutex implementation, modeled after those found in Solaris,
41 1.2 ad * a description of which can be found in:
42 1.2 ad *
43 1.2 ad * Solaris Internals: Core Kernel Architecture, Jim Mauro and
44 1.2 ad * Richard McDougall.
45 1.2 ad */
46 1.2 ad
47 1.2 ad #include "opt_multiprocessor.h"
48 1.2 ad
49 1.2 ad #define __MUTEX_PRIVATE
50 1.2 ad
51 1.2 ad #include <sys/cdefs.h>
52 1.15 ad __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.15 2007/07/09 21:10:53 ad Exp $");
53 1.2 ad
54 1.2 ad #include <sys/param.h>
55 1.2 ad #include <sys/proc.h>
56 1.2 ad #include <sys/mutex.h>
57 1.2 ad #include <sys/sched.h>
58 1.2 ad #include <sys/sleepq.h>
59 1.2 ad #include <sys/systm.h>
60 1.2 ad #include <sys/lockdebug.h>
61 1.2 ad #include <sys/kernel.h>
62 1.2 ad
63 1.2 ad #include <dev/lockstat.h>
64 1.2 ad
65 1.2 ad #include <machine/intr.h>
66 1.2 ad
67 1.2 ad /*
68 1.2 ad * When not running a debug kernel, spin mutexes are not much
69 1.2 ad * more than an splraiseipl() and splx() pair.
70 1.2 ad */
71 1.2 ad
72 1.2 ad #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
73 1.2 ad #define FULL
74 1.2 ad #endif
75 1.2 ad
76 1.2 ad /*
77 1.2 ad * Debugging support.
78 1.2 ad */
79 1.2 ad
80 1.2 ad #define MUTEX_WANTLOCK(mtx) \
81 1.2 ad LOCKDEBUG_WANTLOCK(MUTEX_GETID(mtx), \
82 1.2 ad (uintptr_t)__builtin_return_address(0), 0)
83 1.2 ad #define MUTEX_LOCKED(mtx) \
84 1.2 ad LOCKDEBUG_LOCKED(MUTEX_GETID(mtx), \
85 1.2 ad (uintptr_t)__builtin_return_address(0), 0)
86 1.2 ad #define MUTEX_UNLOCKED(mtx) \
87 1.2 ad LOCKDEBUG_UNLOCKED(MUTEX_GETID(mtx), \
88 1.2 ad (uintptr_t)__builtin_return_address(0), 0)
89 1.2 ad #define MUTEX_ABORT(mtx, msg) \
90 1.2 ad mutex_abort(mtx, __FUNCTION__, msg)
91 1.2 ad
92 1.2 ad #if defined(LOCKDEBUG)
93 1.2 ad
94 1.2 ad #define MUTEX_DASSERT(mtx, cond) \
95 1.2 ad do { \
96 1.2 ad if (!(cond)) \
97 1.2 ad MUTEX_ABORT(mtx, "assertion failed: " #cond); \
98 1.2 ad } while (/* CONSTCOND */ 0);
99 1.2 ad
100 1.2 ad #else /* LOCKDEBUG */
101 1.2 ad
102 1.2 ad #define MUTEX_DASSERT(mtx, cond) /* nothing */
103 1.2 ad
104 1.2 ad #endif /* LOCKDEBUG */
105 1.2 ad
106 1.2 ad #if defined(DIAGNOSTIC)
107 1.2 ad
108 1.2 ad #define MUTEX_ASSERT(mtx, cond) \
109 1.2 ad do { \
110 1.2 ad if (!(cond)) \
111 1.2 ad MUTEX_ABORT(mtx, "assertion failed: " #cond); \
112 1.2 ad } while (/* CONSTCOND */ 0)
113 1.2 ad
114 1.2 ad #else /* DIAGNOSTIC */
115 1.2 ad
116 1.2 ad #define MUTEX_ASSERT(mtx, cond) /* nothing */
117 1.2 ad
118 1.2 ad #endif /* DIAGNOSTIC */
119 1.2 ad
120 1.2 ad /*
121 1.2 ad * Spin mutex SPL save / restore.
122 1.2 ad */
123 1.12 matt #ifndef MUTEX_COUNT_BIAS
124 1.12 matt #define MUTEX_COUNT_BIAS 0
125 1.12 matt #endif
126 1.2 ad
127 1.2 ad #define MUTEX_SPIN_SPLRAISE(mtx) \
128 1.2 ad do { \
129 1.2 ad struct cpu_info *x__ci = curcpu(); \
130 1.2 ad int x__cnt, s; \
131 1.2 ad x__cnt = x__ci->ci_mtx_count--; \
132 1.2 ad s = splraiseipl(mtx->mtx_ipl); \
133 1.12 matt if (x__cnt == MUTEX_COUNT_BIAS) \
134 1.2 ad x__ci->ci_mtx_oldspl = (s); \
135 1.2 ad } while (/* CONSTCOND */ 0)
136 1.2 ad
137 1.2 ad #define MUTEX_SPIN_SPLRESTORE(mtx) \
138 1.2 ad do { \
139 1.2 ad struct cpu_info *x__ci = curcpu(); \
140 1.2 ad int s = x__ci->ci_mtx_oldspl; \
141 1.2 ad __insn_barrier(); \
142 1.12 matt if (++(x__ci->ci_mtx_count) == MUTEX_COUNT_BIAS) \
143 1.2 ad splx(s); \
144 1.2 ad } while (/* CONSTCOND */ 0)
145 1.2 ad
146 1.2 ad /*
147 1.2 ad * For architectures that provide 'simple' mutexes: they provide a
148 1.2 ad * CAS function that is either MP-safe, or does not need to be MP
149 1.2 ad * safe. Adaptive mutexes on these architectures do not require an
150 1.2 ad * additional interlock.
151 1.2 ad */
152 1.2 ad
153 1.2 ad #ifdef __HAVE_SIMPLE_MUTEXES
154 1.2 ad
155 1.2 ad #define MUTEX_OWNER(owner) \
156 1.2 ad (owner & MUTEX_THREAD)
157 1.2 ad #define MUTEX_OWNED(owner) \
158 1.2 ad (owner != 0)
159 1.2 ad #define MUTEX_HAS_WAITERS(mtx) \
160 1.2 ad (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0)
161 1.2 ad
162 1.2 ad #define MUTEX_INITIALIZE_ADAPTIVE(mtx, id) \
163 1.2 ad do { \
164 1.2 ad (mtx)->mtx_id = (id); \
165 1.2 ad } while (/* CONSTCOND */ 0);
166 1.2 ad
167 1.2 ad #define MUTEX_INITIALIZE_SPIN(mtx, id, ipl) \
168 1.2 ad do { \
169 1.2 ad (mtx)->mtx_owner = MUTEX_BIT_SPIN; \
170 1.2 ad (mtx)->mtx_ipl = makeiplcookie((ipl)); \
171 1.2 ad (mtx)->mtx_id = (id); \
172 1.2 ad __cpu_simple_lock_init(&(mtx)->mtx_lock); \
173 1.2 ad } while (/* CONSTCOND */ 0)
174 1.2 ad
175 1.2 ad #define MUTEX_DESTROY(mtx) \
176 1.2 ad do { \
177 1.2 ad (mtx)->mtx_owner = MUTEX_THREAD; \
178 1.2 ad (mtx)->mtx_id = -1; \
179 1.2 ad } while (/* CONSTCOND */ 0);
180 1.2 ad
181 1.2 ad #define MUTEX_SPIN_P(mtx) \
182 1.2 ad (((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0)
183 1.2 ad #define MUTEX_ADAPTIVE_P(mtx) \
184 1.2 ad (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0)
185 1.2 ad
186 1.2 ad #define MUTEX_GETID(mtx) ((mtx)->mtx_id)
187 1.2 ad
188 1.2 ad static inline int
189 1.2 ad MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
190 1.2 ad {
191 1.2 ad int rv;
192 1.2 ad rv = MUTEX_CAS(&mtx->mtx_owner, 0UL, curthread);
193 1.7 itohy MUTEX_RECEIVE(mtx);
194 1.2 ad return rv;
195 1.2 ad }
196 1.2 ad
197 1.2 ad static inline int
198 1.2 ad MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
199 1.2 ad {
200 1.2 ad int rv;
201 1.2 ad rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS);
202 1.7 itohy MUTEX_RECEIVE(mtx);
203 1.2 ad return rv;
204 1.2 ad }
205 1.2 ad
206 1.2 ad static inline void
207 1.2 ad MUTEX_RELEASE(kmutex_t *mtx)
208 1.2 ad {
209 1.7 itohy MUTEX_GIVE(mtx);
210 1.2 ad mtx->mtx_owner = 0;
211 1.2 ad }
212 1.4 ad
213 1.4 ad static inline void
214 1.4 ad MUTEX_CLEAR_WAITERS(kmutex_t *mtx)
215 1.4 ad {
216 1.4 ad /* nothing */
217 1.4 ad }
218 1.2 ad #endif /* __HAVE_SIMPLE_MUTEXES */
219 1.2 ad
220 1.2 ad /*
221 1.2 ad * Patch in stubs via strong alias where they are not available.
222 1.2 ad */
223 1.2 ad
224 1.2 ad #if defined(LOCKDEBUG)
225 1.2 ad #undef __HAVE_MUTEX_STUBS
226 1.2 ad #undef __HAVE_SPIN_MUTEX_STUBS
227 1.2 ad #endif
228 1.2 ad
229 1.2 ad #ifndef __HAVE_MUTEX_STUBS
230 1.8 itohy __strong_alias(mutex_enter,mutex_vector_enter);
231 1.8 itohy __strong_alias(mutex_exit,mutex_vector_exit);
232 1.2 ad #endif
233 1.2 ad
234 1.2 ad #ifndef __HAVE_SPIN_MUTEX_STUBS
235 1.8 itohy __strong_alias(mutex_spin_enter,mutex_vector_enter);
236 1.8 itohy __strong_alias(mutex_spin_exit,mutex_vector_exit);
237 1.2 ad #endif
238 1.2 ad
239 1.2 ad void mutex_abort(kmutex_t *, const char *, const char *);
240 1.2 ad void mutex_dump(volatile void *);
241 1.2 ad int mutex_onproc(uintptr_t, struct cpu_info **);
242 1.6 ad static struct lwp *mutex_owner(wchan_t);
243 1.2 ad
244 1.2 ad lockops_t mutex_spin_lockops = {
245 1.2 ad "Mutex",
246 1.2 ad 0,
247 1.2 ad mutex_dump
248 1.2 ad };
249 1.2 ad
250 1.2 ad lockops_t mutex_adaptive_lockops = {
251 1.2 ad "Mutex",
252 1.2 ad 1,
253 1.2 ad mutex_dump
254 1.2 ad };
255 1.2 ad
256 1.5 yamt syncobj_t mutex_syncobj = {
257 1.5 yamt SOBJ_SLEEPQ_SORTED,
258 1.5 yamt turnstile_unsleep,
259 1.5 yamt turnstile_changepri,
260 1.5 yamt sleepq_lendpri,
261 1.6 ad mutex_owner,
262 1.5 yamt };
263 1.5 yamt
264 1.2 ad /*
265 1.2 ad * mutex_dump:
266 1.2 ad *
267 1.2 ad * Dump the contents of a mutex structure.
268 1.2 ad */
269 1.2 ad void
270 1.2 ad mutex_dump(volatile void *cookie)
271 1.2 ad {
272 1.2 ad volatile kmutex_t *mtx = cookie;
273 1.2 ad
274 1.2 ad printf_nolog("owner field : %#018lx wait/spin: %16d/%d\n",
275 1.2 ad (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx),
276 1.2 ad MUTEX_SPIN_P(mtx));
277 1.2 ad }
278 1.2 ad
279 1.2 ad /*
280 1.2 ad * mutex_abort:
281 1.2 ad *
282 1.3 ad * Dump information about an error and panic the system. This
283 1.3 ad * generates a lot of machine code in the DIAGNOSTIC case, so
284 1.3 ad * we ask the compiler to not inline it.
285 1.2 ad */
286 1.8 itohy
287 1.8 itohy #if __GNUC_PREREQ__(3, 0)
288 1.8 itohy __attribute ((noinline)) __attribute ((noreturn))
289 1.8 itohy #endif
290 1.8 itohy void
291 1.2 ad mutex_abort(kmutex_t *mtx, const char *func, const char *msg)
292 1.2 ad {
293 1.2 ad
294 1.2 ad LOCKDEBUG_ABORT(MUTEX_GETID(mtx), mtx, (MUTEX_SPIN_P(mtx) ?
295 1.3 ad &mutex_spin_lockops : &mutex_adaptive_lockops), func, msg);
296 1.2 ad /* NOTREACHED */
297 1.2 ad }
298 1.2 ad
299 1.2 ad /*
300 1.2 ad * mutex_init:
301 1.2 ad *
302 1.2 ad * Initialize a mutex for use. Note that adaptive mutexes are in
303 1.2 ad * essence spin mutexes that can sleep to avoid deadlock and wasting
304 1.2 ad * CPU time. We can't easily provide a type of mutex that always
305 1.2 ad * sleeps - see comments in mutex_vector_enter() about releasing
306 1.2 ad * mutexes unlocked.
307 1.2 ad */
308 1.2 ad void
309 1.2 ad mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
310 1.2 ad {
311 1.2 ad u_int id;
312 1.2 ad
313 1.2 ad memset(mtx, 0, sizeof(*mtx));
314 1.2 ad
315 1.15 ad switch (type) {
316 1.15 ad case MUTEX_ADAPTIVE:
317 1.15 ad case MUTEX_DEFAULT:
318 1.15 ad KASSERT(ipl == IPL_NONE);
319 1.15 ad break;
320 1.15 ad case MUTEX_DRIVER:
321 1.2 ad type = (ipl == IPL_NONE ? MUTEX_ADAPTIVE : MUTEX_SPIN);
322 1.15 ad break;
323 1.15 ad default:
324 1.15 ad break;
325 1.15 ad }
326 1.2 ad
327 1.2 ad switch (type) {
328 1.11 ad case MUTEX_NODEBUG:
329 1.11 ad id = LOCKDEBUG_ALLOC(mtx, NULL);
330 1.15 ad MUTEX_INITIALIZE_SPIN(mtx, id, ipl);
331 1.11 ad break;
332 1.2 ad case MUTEX_ADAPTIVE:
333 1.2 ad case MUTEX_DEFAULT:
334 1.2 ad id = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops);
335 1.2 ad MUTEX_INITIALIZE_ADAPTIVE(mtx, id);
336 1.2 ad break;
337 1.2 ad case MUTEX_SPIN:
338 1.2 ad id = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops);
339 1.2 ad MUTEX_INITIALIZE_SPIN(mtx, id, ipl);
340 1.2 ad break;
341 1.2 ad default:
342 1.2 ad panic("mutex_init: impossible type");
343 1.2 ad break;
344 1.2 ad }
345 1.2 ad }
346 1.2 ad
347 1.2 ad /*
348 1.2 ad * mutex_destroy:
349 1.2 ad *
350 1.2 ad * Tear down a mutex.
351 1.2 ad */
352 1.2 ad void
353 1.2 ad mutex_destroy(kmutex_t *mtx)
354 1.2 ad {
355 1.2 ad
356 1.2 ad if (MUTEX_ADAPTIVE_P(mtx)) {
357 1.2 ad MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) &&
358 1.2 ad !MUTEX_HAS_WAITERS(mtx));
359 1.2 ad } else {
360 1.2 ad MUTEX_ASSERT(mtx, mtx->mtx_lock != __SIMPLELOCK_LOCKED);
361 1.2 ad }
362 1.2 ad
363 1.2 ad LOCKDEBUG_FREE(mtx, MUTEX_GETID(mtx));
364 1.2 ad MUTEX_DESTROY(mtx);
365 1.2 ad }
366 1.2 ad
367 1.2 ad /*
368 1.2 ad * mutex_onproc:
369 1.2 ad *
370 1.2 ad * Return true if an adaptive mutex owner is running on a CPU in the
371 1.2 ad * system. If the target is waiting on the kernel big lock, then we
372 1.15 ad * must release it. This is necessary to avoid deadlock.
373 1.2 ad *
374 1.2 ad * Note that we can't use the mutex owner field as an LWP pointer. We
375 1.2 ad * don't have full control over the timing of our execution, and so the
376 1.2 ad * pointer could be completely invalid by the time we dereference it.
377 1.2 ad */
378 1.2 ad #ifdef MULTIPROCESSOR
379 1.2 ad int
380 1.2 ad mutex_onproc(uintptr_t owner, struct cpu_info **cip)
381 1.2 ad {
382 1.2 ad CPU_INFO_ITERATOR cii;
383 1.2 ad struct cpu_info *ci;
384 1.2 ad struct lwp *l;
385 1.2 ad
386 1.2 ad if (!MUTEX_OWNED(owner))
387 1.2 ad return 0;
388 1.2 ad l = (struct lwp *)MUTEX_OWNER(owner);
389 1.2 ad
390 1.15 ad /* See if the target is running on a CPU somewhere. */
391 1.10 ad if ((ci = *cip) != NULL && ci->ci_curlwp == l)
392 1.15 ad goto run;
393 1.15 ad for (CPU_INFO_FOREACH(cii, ci))
394 1.15 ad if (ci->ci_curlwp == l)
395 1.15 ad goto run;
396 1.2 ad
397 1.15 ad /* No: it may be safe to block now. */
398 1.2 ad *cip = NULL;
399 1.2 ad return 0;
400 1.15 ad
401 1.15 ad run:
402 1.15 ad /* Target is running; do we need to block? */
403 1.15 ad *cip = ci;
404 1.15 ad return ci->ci_biglock_wanted != l;
405 1.2 ad }
406 1.15 ad #endif /* MULTIPROCESSOR */
407 1.2 ad
408 1.2 ad /*
409 1.2 ad * mutex_vector_enter:
410 1.2 ad *
411 1.2 ad * Support routine for mutex_enter() that must handles all cases. In
412 1.2 ad * the LOCKDEBUG case, mutex_enter() is always aliased here, even if
413 1.2 ad * fast-path stubs are available. If an mutex_spin_enter() stub is
414 1.2 ad * not available, then it is also aliased directly here.
415 1.2 ad */
416 1.2 ad void
417 1.2 ad mutex_vector_enter(kmutex_t *mtx)
418 1.2 ad {
419 1.2 ad uintptr_t owner, curthread;
420 1.2 ad turnstile_t *ts;
421 1.2 ad #ifdef MULTIPROCESSOR
422 1.2 ad struct cpu_info *ci = NULL;
423 1.2 ad u_int count;
424 1.2 ad #endif
425 1.2 ad LOCKSTAT_COUNTER(spincnt);
426 1.2 ad LOCKSTAT_COUNTER(slpcnt);
427 1.2 ad LOCKSTAT_TIMER(spintime);
428 1.2 ad LOCKSTAT_TIMER(slptime);
429 1.2 ad LOCKSTAT_FLAG(lsflag);
430 1.2 ad
431 1.2 ad /*
432 1.2 ad * Handle spin mutexes.
433 1.2 ad */
434 1.2 ad if (MUTEX_SPIN_P(mtx)) {
435 1.2 ad #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR)
436 1.2 ad u_int spins = 0;
437 1.2 ad #endif
438 1.2 ad MUTEX_SPIN_SPLRAISE(mtx);
439 1.2 ad MUTEX_WANTLOCK(mtx);
440 1.2 ad #ifdef FULL
441 1.2 ad if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
442 1.2 ad MUTEX_LOCKED(mtx);
443 1.2 ad return;
444 1.2 ad }
445 1.2 ad #if !defined(MULTIPROCESSOR)
446 1.2 ad MUTEX_ABORT(mtx, "locking against myself");
447 1.2 ad #else /* !MULTIPROCESSOR */
448 1.2 ad
449 1.2 ad LOCKSTAT_ENTER(lsflag);
450 1.2 ad LOCKSTAT_START_TIMER(lsflag, spintime);
451 1.2 ad count = SPINLOCK_BACKOFF_MIN;
452 1.2 ad
453 1.2 ad /*
454 1.2 ad * Spin testing the lock word and do exponential backoff
455 1.2 ad * to reduce cache line ping-ponging between CPUs.
456 1.2 ad */
457 1.2 ad do {
458 1.2 ad if (panicstr != NULL)
459 1.2 ad break;
460 1.2 ad while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) {
461 1.2 ad SPINLOCK_BACKOFF(count);
462 1.2 ad #ifdef LOCKDEBUG
463 1.2 ad if (SPINLOCK_SPINOUT(spins))
464 1.2 ad MUTEX_ABORT(mtx, "spinout");
465 1.2 ad #endif /* LOCKDEBUG */
466 1.2 ad }
467 1.2 ad } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
468 1.2 ad
469 1.2 ad if (count != SPINLOCK_BACKOFF_MIN) {
470 1.2 ad LOCKSTAT_STOP_TIMER(lsflag, spintime);
471 1.2 ad LOCKSTAT_EVENT(lsflag, mtx,
472 1.2 ad LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
473 1.2 ad }
474 1.2 ad LOCKSTAT_EXIT(lsflag);
475 1.2 ad #endif /* !MULTIPROCESSOR */
476 1.2 ad #endif /* FULL */
477 1.2 ad MUTEX_LOCKED(mtx);
478 1.2 ad return;
479 1.2 ad }
480 1.2 ad
481 1.2 ad curthread = (uintptr_t)curlwp;
482 1.2 ad
483 1.2 ad MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
484 1.2 ad MUTEX_ASSERT(mtx, curthread != 0);
485 1.2 ad MUTEX_WANTLOCK(mtx);
486 1.2 ad
487 1.2 ad #ifdef LOCKDEBUG
488 1.2 ad if (panicstr == NULL) {
489 1.2 ad simple_lock_only_held(NULL, "mutex_enter");
490 1.2 ad #ifdef MULTIPROCESSOR
491 1.2 ad LOCKDEBUG_BARRIER(&kernel_lock, 1);
492 1.2 ad #else
493 1.2 ad LOCKDEBUG_BARRIER(NULL, 1);
494 1.2 ad #endif
495 1.2 ad }
496 1.2 ad #endif
497 1.2 ad
498 1.2 ad LOCKSTAT_ENTER(lsflag);
499 1.2 ad
500 1.2 ad /*
501 1.2 ad * Adaptive mutex; spin trying to acquire the mutex. If we
502 1.2 ad * determine that the owner is not running on a processor,
503 1.2 ad * then we stop spinning, and sleep instead.
504 1.2 ad */
505 1.2 ad for (;;) {
506 1.2 ad owner = mtx->mtx_owner;
507 1.2 ad if (!MUTEX_OWNED(owner)) {
508 1.2 ad /*
509 1.2 ad * Mutex owner clear could mean two things:
510 1.2 ad *
511 1.2 ad * * The mutex has been released.
512 1.2 ad * * The owner field hasn't been set yet.
513 1.2 ad *
514 1.2 ad * Try to acquire it again. If that fails,
515 1.2 ad * we'll just loop again.
516 1.2 ad */
517 1.2 ad if (MUTEX_ACQUIRE(mtx, curthread))
518 1.2 ad break;
519 1.2 ad continue;
520 1.2 ad }
521 1.2 ad
522 1.2 ad if (panicstr != NULL)
523 1.2 ad return;
524 1.2 ad if (MUTEX_OWNER(owner) == curthread)
525 1.2 ad MUTEX_ABORT(mtx, "locking against myself");
526 1.2 ad
527 1.2 ad #ifdef MULTIPROCESSOR
528 1.2 ad /*
529 1.2 ad * Check to see if the owner is running on a processor.
530 1.2 ad * If so, then we should just spin, as the owner will
531 1.2 ad * likely release the lock very soon.
532 1.2 ad */
533 1.2 ad if (mutex_onproc(owner, &ci)) {
534 1.2 ad LOCKSTAT_START_TIMER(lsflag, spintime);
535 1.2 ad count = SPINLOCK_BACKOFF_MIN;
536 1.2 ad for (;;) {
537 1.2 ad owner = mtx->mtx_owner;
538 1.2 ad if (!mutex_onproc(owner, &ci))
539 1.2 ad break;
540 1.2 ad SPINLOCK_BACKOFF(count);
541 1.2 ad }
542 1.2 ad LOCKSTAT_STOP_TIMER(lsflag, spintime);
543 1.2 ad LOCKSTAT_COUNT(spincnt, 1);
544 1.2 ad if (!MUTEX_OWNED(owner))
545 1.2 ad continue;
546 1.2 ad }
547 1.2 ad #endif
548 1.2 ad
549 1.2 ad ts = turnstile_lookup(mtx);
550 1.2 ad
551 1.2 ad /*
552 1.2 ad * Once we have the turnstile chain interlock, mark the
553 1.2 ad * mutex has having waiters. If that fails, spin again:
554 1.2 ad * chances are that the mutex has been released.
555 1.2 ad */
556 1.2 ad if (!MUTEX_SET_WAITERS(mtx, owner)) {
557 1.2 ad turnstile_exit(mtx);
558 1.2 ad continue;
559 1.2 ad }
560 1.2 ad
561 1.2 ad #ifdef MULTIPROCESSOR
562 1.2 ad /*
563 1.2 ad * mutex_exit() is permitted to release the mutex without
564 1.2 ad * any interlocking instructions, and the following can
565 1.2 ad * occur as a result:
566 1.2 ad *
567 1.2 ad * CPU 1: MUTEX_SET_WAITERS() CPU2: mutex_exit()
568 1.2 ad * ---------------------------- ----------------------------
569 1.2 ad * .. acquire cache line
570 1.2 ad * .. test for waiters
571 1.2 ad * acquire cache line <- lose cache line
572 1.2 ad * lock cache line ..
573 1.2 ad * verify mutex is held ..
574 1.2 ad * set waiters ..
575 1.2 ad * unlock cache line ..
576 1.2 ad * lose cache line -> acquire cache line
577 1.2 ad * .. clear lock word, waiters
578 1.2 ad * return success
579 1.2 ad *
580 1.2 ad * There is a another race that can occur: a third CPU could
581 1.2 ad * acquire the mutex as soon as it is released. Since
582 1.2 ad * adaptive mutexes are primarily spin mutexes, this is not
583 1.2 ad * something that we need to worry about too much. What we
584 1.2 ad * do need to ensure is that the waiters bit gets set.
585 1.2 ad *
586 1.2 ad * To allow the unlocked release, we need to make some
587 1.2 ad * assumptions here:
588 1.2 ad *
589 1.2 ad * o Release is the only non-atomic/unlocked operation
590 1.2 ad * that can be performed on the mutex. (It must still
591 1.2 ad * be atomic on the local CPU, e.g. in case interrupted
592 1.2 ad * or preempted).
593 1.2 ad *
594 1.2 ad * o At any given time, MUTEX_SET_WAITERS() can only ever
595 1.2 ad * be in progress on one CPU in the system - guarenteed
596 1.2 ad * by the turnstile chain lock.
597 1.2 ad *
598 1.2 ad * o No other operations other than MUTEX_SET_WAITERS()
599 1.2 ad * and release can modify a mutex with a non-zero
600 1.2 ad * owner field.
601 1.2 ad *
602 1.2 ad * o The result of a successful MUTEX_SET_WAITERS() call
603 1.2 ad * is an unbuffered write that is immediately visible
604 1.2 ad * to all other processors in the system.
605 1.2 ad *
606 1.2 ad * o If the holding LWP switches away, it posts a store
607 1.2 ad * fence before changing curlwp, ensuring that any
608 1.2 ad * overwrite of the mutex waiters flag by mutex_exit()
609 1.2 ad * completes before the modification of curlwp becomes
610 1.2 ad * visible to this CPU.
611 1.2 ad *
612 1.14 yamt * o mi_switch() posts a store fence before setting curlwp
613 1.2 ad * and before resuming execution of an LWP.
614 1.2 ad *
615 1.2 ad * o _kernel_lock() posts a store fence before setting
616 1.2 ad * curcpu()->ci_biglock_wanted, and after clearing it.
617 1.2 ad * This ensures that any overwrite of the mutex waiters
618 1.2 ad * flag by mutex_exit() completes before the modification
619 1.2 ad * of ci_biglock_wanted becomes visible.
620 1.2 ad *
621 1.2 ad * We now post a read memory barrier (after setting the
622 1.2 ad * waiters field) and check the lock holder's status again.
623 1.2 ad * Some of the possible outcomes (not an exhaustive list):
624 1.2 ad *
625 1.2 ad * 1. The onproc check returns true: the holding LWP is
626 1.2 ad * running again. The lock may be released soon and
627 1.2 ad * we should spin. Importantly, we can't trust the
628 1.2 ad * value of the waiters flag.
629 1.2 ad *
630 1.2 ad * 2. The onproc check returns false: the holding LWP is
631 1.2 ad * not running. We now have the oppertunity to check
632 1.2 ad * if mutex_exit() has blatted the modifications made
633 1.2 ad * by MUTEX_SET_WAITERS().
634 1.2 ad *
635 1.2 ad * 3. The onproc check returns false: the holding LWP may
636 1.2 ad * or may not be running. It has context switched at
637 1.2 ad * some point during our check. Again, we have the
638 1.2 ad * chance to see if the waiters bit is still set or
639 1.2 ad * has been overwritten.
640 1.2 ad *
641 1.2 ad * 4. The onproc check returns false: the holding LWP is
642 1.2 ad * running on a CPU, but wants the big lock. It's OK
643 1.2 ad * to check the waiters field in this case.
644 1.2 ad *
645 1.2 ad * 5. The has-waiters check fails: the mutex has been
646 1.2 ad * released, the waiters flag cleared and another LWP
647 1.2 ad * now owns the mutex.
648 1.2 ad *
649 1.2 ad * 6. The has-waiters check fails: the mutex has been
650 1.2 ad * released.
651 1.2 ad *
652 1.2 ad * If the waiters bit is not set it's unsafe to go asleep,
653 1.2 ad * as we might never be awoken.
654 1.2 ad */
655 1.13 ad if ((mb_read(), mutex_onproc(owner, &ci)) ||
656 1.13 ad (mb_read(), !MUTEX_HAS_WAITERS(mtx))) {
657 1.2 ad turnstile_exit(mtx);
658 1.2 ad continue;
659 1.2 ad }
660 1.2 ad #endif /* MULTIPROCESSOR */
661 1.2 ad
662 1.2 ad LOCKSTAT_START_TIMER(lsflag, slptime);
663 1.2 ad
664 1.5 yamt turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj);
665 1.2 ad
666 1.2 ad LOCKSTAT_STOP_TIMER(lsflag, slptime);
667 1.2 ad LOCKSTAT_COUNT(slpcnt, 1);
668 1.2 ad }
669 1.2 ad
670 1.2 ad LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1,
671 1.2 ad slpcnt, slptime);
672 1.2 ad LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN,
673 1.2 ad spincnt, spintime);
674 1.2 ad LOCKSTAT_EXIT(lsflag);
675 1.2 ad
676 1.2 ad MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
677 1.2 ad MUTEX_LOCKED(mtx);
678 1.2 ad }
679 1.2 ad
680 1.2 ad /*
681 1.2 ad * mutex_vector_exit:
682 1.2 ad *
683 1.2 ad * Support routine for mutex_exit() that handles all cases.
684 1.2 ad */
685 1.2 ad void
686 1.2 ad mutex_vector_exit(kmutex_t *mtx)
687 1.2 ad {
688 1.2 ad turnstile_t *ts;
689 1.2 ad uintptr_t curthread;
690 1.2 ad
691 1.2 ad if (MUTEX_SPIN_P(mtx)) {
692 1.2 ad #ifdef FULL
693 1.2 ad if (mtx->mtx_lock != __SIMPLELOCK_LOCKED)
694 1.2 ad MUTEX_ABORT(mtx, "exiting unheld spin mutex");
695 1.2 ad MUTEX_UNLOCKED(mtx);
696 1.2 ad __cpu_simple_unlock(&mtx->mtx_lock);
697 1.2 ad #endif
698 1.2 ad MUTEX_SPIN_SPLRESTORE(mtx);
699 1.2 ad return;
700 1.2 ad }
701 1.2 ad
702 1.11 ad if (__predict_false((uintptr_t)panicstr | cold)) {
703 1.2 ad MUTEX_UNLOCKED(mtx);
704 1.2 ad MUTEX_RELEASE(mtx);
705 1.2 ad return;
706 1.2 ad }
707 1.2 ad
708 1.2 ad curthread = (uintptr_t)curlwp;
709 1.2 ad MUTEX_DASSERT(mtx, curthread != 0);
710 1.2 ad MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
711 1.2 ad MUTEX_UNLOCKED(mtx);
712 1.2 ad
713 1.15 ad #ifdef LOCKDEBUG
714 1.15 ad /*
715 1.15 ad * Avoid having to take the turnstile chain lock every time
716 1.15 ad * around. Raise the priority level to splhigh() in order
717 1.15 ad * to disable preemption and so make the following atomic.
718 1.15 ad */
719 1.15 ad {
720 1.15 ad int s = splhigh();
721 1.15 ad if (!MUTEX_HAS_WAITERS(mtx)) {
722 1.15 ad MUTEX_RELEASE(mtx);
723 1.15 ad splx(s);
724 1.15 ad return;
725 1.15 ad }
726 1.15 ad splx(s);
727 1.15 ad }
728 1.15 ad #endif
729 1.15 ad
730 1.2 ad /*
731 1.2 ad * Get this lock's turnstile. This gets the interlock on
732 1.2 ad * the sleep queue. Once we have that, we can clear the
733 1.2 ad * lock. If there was no turnstile for the lock, there
734 1.2 ad * were no waiters remaining.
735 1.2 ad */
736 1.2 ad ts = turnstile_lookup(mtx);
737 1.2 ad
738 1.2 ad if (ts == NULL) {
739 1.2 ad MUTEX_RELEASE(mtx);
740 1.2 ad turnstile_exit(mtx);
741 1.2 ad } else {
742 1.2 ad MUTEX_RELEASE(mtx);
743 1.2 ad turnstile_wakeup(ts, TS_WRITER_Q,
744 1.2 ad TS_WAITERS(ts, TS_WRITER_Q), NULL);
745 1.2 ad }
746 1.2 ad }
747 1.2 ad
748 1.4 ad #ifndef __HAVE_SIMPLE_MUTEXES
749 1.4 ad /*
750 1.4 ad * mutex_wakeup:
751 1.4 ad *
752 1.4 ad * Support routine for mutex_exit() that wakes up all waiters.
753 1.4 ad * We assume that the mutex has been released, but it need not
754 1.4 ad * be.
755 1.4 ad */
756 1.4 ad void
757 1.4 ad mutex_wakeup(kmutex_t *mtx)
758 1.4 ad {
759 1.4 ad turnstile_t *ts;
760 1.4 ad
761 1.4 ad ts = turnstile_lookup(mtx);
762 1.4 ad if (ts == NULL) {
763 1.4 ad turnstile_exit(mtx);
764 1.4 ad return;
765 1.4 ad }
766 1.4 ad MUTEX_CLEAR_WAITERS(mtx);
767 1.4 ad turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL);
768 1.4 ad }
769 1.4 ad #endif /* !__HAVE_SIMPLE_MUTEXES */
770 1.4 ad
771 1.2 ad /*
772 1.2 ad * mutex_owned:
773 1.2 ad *
774 1.3 ad * Return true if the current LWP (adaptive) or CPU (spin)
775 1.3 ad * holds the mutex.
776 1.2 ad */
777 1.2 ad int
778 1.2 ad mutex_owned(kmutex_t *mtx)
779 1.2 ad {
780 1.2 ad
781 1.2 ad if (MUTEX_ADAPTIVE_P(mtx))
782 1.2 ad return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp;
783 1.2 ad #ifdef FULL
784 1.2 ad return mtx->mtx_lock == __SIMPLELOCK_LOCKED;
785 1.2 ad #else
786 1.2 ad return 1;
787 1.2 ad #endif
788 1.2 ad }
789 1.2 ad
790 1.2 ad /*
791 1.2 ad * mutex_owner:
792 1.2 ad *
793 1.6 ad * Return the current owner of an adaptive mutex. Used for
794 1.6 ad * priority inheritance.
795 1.2 ad */
796 1.6 ad static struct lwp *
797 1.6 ad mutex_owner(wchan_t obj)
798 1.2 ad {
799 1.6 ad kmutex_t *mtx = (void *)(uintptr_t)obj; /* discard qualifiers */
800 1.2 ad
801 1.2 ad MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
802 1.2 ad return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner);
803 1.2 ad }
804 1.2 ad
805 1.2 ad /*
806 1.2 ad * mutex_tryenter:
807 1.2 ad *
808 1.2 ad * Try to acquire the mutex; return non-zero if we did.
809 1.2 ad */
810 1.2 ad int
811 1.2 ad mutex_tryenter(kmutex_t *mtx)
812 1.2 ad {
813 1.2 ad uintptr_t curthread;
814 1.2 ad
815 1.2 ad /*
816 1.2 ad * Handle spin mutexes.
817 1.2 ad */
818 1.2 ad if (MUTEX_SPIN_P(mtx)) {
819 1.2 ad MUTEX_SPIN_SPLRAISE(mtx);
820 1.2 ad #ifdef FULL
821 1.2 ad if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
822 1.4 ad MUTEX_WANTLOCK(mtx);
823 1.2 ad MUTEX_LOCKED(mtx);
824 1.2 ad return 1;
825 1.2 ad }
826 1.2 ad MUTEX_SPIN_SPLRESTORE(mtx);
827 1.2 ad #else
828 1.4 ad MUTEX_WANTLOCK(mtx);
829 1.2 ad MUTEX_LOCKED(mtx);
830 1.2 ad return 1;
831 1.2 ad #endif
832 1.2 ad } else {
833 1.2 ad curthread = (uintptr_t)curlwp;
834 1.2 ad MUTEX_ASSERT(mtx, curthread != 0);
835 1.2 ad if (MUTEX_ACQUIRE(mtx, curthread)) {
836 1.4 ad MUTEX_WANTLOCK(mtx);
837 1.2 ad MUTEX_LOCKED(mtx);
838 1.2 ad MUTEX_DASSERT(mtx,
839 1.2 ad MUTEX_OWNER(mtx->mtx_owner) == curthread);
840 1.2 ad return 1;
841 1.2 ad }
842 1.2 ad }
843 1.2 ad
844 1.2 ad return 0;
845 1.2 ad }
846 1.2 ad
847 1.2 ad #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL)
848 1.2 ad /*
849 1.2 ad * mutex_spin_retry:
850 1.2 ad *
851 1.2 ad * Support routine for mutex_spin_enter(). Assumes that the caller
852 1.2 ad * has already raised the SPL, and adjusted counters.
853 1.2 ad */
854 1.2 ad void
855 1.2 ad mutex_spin_retry(kmutex_t *mtx)
856 1.2 ad {
857 1.2 ad #ifdef MULTIPROCESSOR
858 1.2 ad u_int count;
859 1.2 ad LOCKSTAT_TIMER(spintime);
860 1.2 ad LOCKSTAT_FLAG(lsflag);
861 1.2 ad #ifdef LOCKDEBUG
862 1.2 ad u_int spins = 0;
863 1.2 ad #endif /* LOCKDEBUG */
864 1.2 ad
865 1.2 ad MUTEX_WANTLOCK(mtx);
866 1.2 ad
867 1.2 ad LOCKSTAT_ENTER(lsflag);
868 1.2 ad LOCKSTAT_START_TIMER(lsflag, spintime);
869 1.2 ad count = SPINLOCK_BACKOFF_MIN;
870 1.2 ad
871 1.2 ad /*
872 1.2 ad * Spin testing the lock word and do exponential backoff
873 1.2 ad * to reduce cache line ping-ponging between CPUs.
874 1.2 ad */
875 1.2 ad do {
876 1.2 ad if (panicstr != NULL)
877 1.2 ad break;
878 1.2 ad while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) {
879 1.2 ad SPINLOCK_BACKOFF(count);
880 1.2 ad #ifdef LOCKDEBUG
881 1.2 ad if (SPINLOCK_SPINOUT(spins))
882 1.2 ad MUTEX_ABORT(mtx, "spinout");
883 1.2 ad #endif /* LOCKDEBUG */
884 1.2 ad }
885 1.2 ad } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
886 1.2 ad
887 1.2 ad LOCKSTAT_STOP_TIMER(lsflag, spintime);
888 1.2 ad LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
889 1.2 ad LOCKSTAT_EXIT(lsflag);
890 1.2 ad
891 1.2 ad MUTEX_LOCKED(mtx);
892 1.2 ad #else /* MULTIPROCESSOR */
893 1.2 ad MUTEX_ABORT(mtx, "locking against myself");
894 1.2 ad #endif /* MULTIPROCESSOR */
895 1.2 ad }
896 1.2 ad #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */
897