kern_lock.c revision 1.51.2.2 1 /* $NetBSD: kern_lock.c,v 1.51.2.2 2001/06/21 20:06:50 nathanw Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * This code is derived from software contributed to The NetBSD Foundation
12 * by Ross Harvey.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the NetBSD
25 * Foundation, Inc. and its contributors.
26 * 4. Neither the name of The NetBSD Foundation nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43 /*
44 * Copyright (c) 1995
45 * The Regents of the University of California. All rights reserved.
46 *
47 * This code contains ideas from software contributed to Berkeley by
48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
49 * System project at Carnegie-Mellon University.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 * 1. Redistributions of source code must retain the above copyright
55 * notice, this list of conditions and the following disclaimer.
56 * 2. Redistributions in binary form must reproduce the above copyright
57 * notice, this list of conditions and the following disclaimer in the
58 * documentation and/or other materials provided with the distribution.
59 * 3. All advertising materials mentioning features or use of this software
60 * must display the following acknowledgement:
61 * This product includes software developed by the University of
62 * California, Berkeley and its contributors.
63 * 4. Neither the name of the University nor the names of its contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
71 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
72 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
73 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
74 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
75 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
76 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77 * SUCH DAMAGE.
78 *
79 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
80 */
81
82 #include "opt_multiprocessor.h"
83 #include "opt_lockdebug.h"
84 #include "opt_ddb.h"
85
86 #include <sys/param.h>
87 #include <sys/lwp.h>
88 #include <sys/proc.h>
89 #include <sys/lock.h>
90 #include <sys/systm.h>
91 #include <machine/cpu.h>
92
93 #if defined(LOCKDEBUG)
94 #include <sys/syslog.h>
95 /*
96 * note that stdarg.h and the ansi style va_start macro is used for both
97 * ansi and traditional c compiles.
98 * XXX: this requires that stdarg.h define: va_alist and va_dcl
99 */
100 #include <machine/stdarg.h>
101
102 void lock_printf(const char *fmt, ...)
103 __attribute__((__format__(__printf__,1,2)));
104
105 int lock_debug_syslog = 0; /* defaults to syslog, but can be patched */
106
107 #ifdef DDB
108 #include <ddb/ddbvar.h>
109 #include <machine/db_machdep.h>
110 #include <ddb/db_command.h>
111 #include <ddb/db_interface.h>
112 #endif
113 #endif
114
115 /*
116 * Locking primitives implementation.
117 * Locks provide shared/exclusive sychronization.
118 */
119
120 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
121 #if defined(MULTIPROCESSOR) /* { */
122 #define COUNT_CPU(cpu_id, x) \
123 curcpu()->ci_spin_locks += (x)
124 #else
125 u_long spin_locks;
126 #define COUNT_CPU(cpu_id, x) spin_locks += (x)
127 #endif /* MULTIPROCESSOR */ /* } */
128
129 #define COUNT(lkp, p, cpu_id, x) \
130 do { \
131 if ((lkp)->lk_flags & LK_SPIN) \
132 COUNT_CPU((cpu_id), (x)); \
133 else \
134 (p)->p_locks += (x); \
135 } while (/*CONSTCOND*/0)
136 #else
137 #define COUNT(lkp, p, cpu_id, x)
138 #define COUNT_CPU(cpu_id, x)
139 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
140
141 #ifndef SPINLOCK_SPIN_HOOK /* from <machine/lock.h> */
142 #define SPINLOCK_SPIN_HOOK /* nothing */
143 #endif
144
145 #define INTERLOCK_ACQUIRE(lkp, flags, s) \
146 do { \
147 if ((flags) & LK_SPIN) \
148 s = splsched(); \
149 simple_lock(&(lkp)->lk_interlock); \
150 } while (0)
151
152 #define INTERLOCK_RELEASE(lkp, flags, s) \
153 do { \
154 simple_unlock(&(lkp)->lk_interlock); \
155 if ((flags) & LK_SPIN) \
156 splx(s); \
157 } while (0)
158
159 #if defined(LOCKDEBUG)
160 #if defined(DDB)
161 #define SPINLOCK_SPINCHECK_DEBUGGER Debugger()
162 #else
163 #define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */
164 #endif
165
166 #define SPINLOCK_SPINCHECK_DECL \
167 /* 32-bits of count -- wrap constitutes a "spinout" */ \
168 uint32_t __spinc = 0
169
170 #define SPINLOCK_SPINCHECK \
171 do { \
172 if (++__spinc == 0) { \
173 printf("LK_SPIN spinout, excl %d, share %d\n", \
174 lkp->lk_exclusivecount, lkp->lk_sharecount); \
175 if (lkp->lk_exclusivecount) \
176 printf("held by CPU %lu\n", \
177 (u_long) lkp->lk_cpu); \
178 if (lkp->lk_lock_file) \
179 printf("last locked at %s:%d\n", \
180 lkp->lk_lock_file, lkp->lk_lock_line); \
181 if (lkp->lk_unlock_file) \
182 printf("last unlocked at %s:%d\n", \
183 lkp->lk_unlock_file, lkp->lk_unlock_line); \
184 SPINLOCK_SPINCHECK_DEBUGGER; \
185 } \
186 } while (0)
187 #else
188 #define SPINLOCK_SPINCHECK_DECL /* nothing */
189 #define SPINLOCK_SPINCHECK /* nothing */
190 #endif /* LOCKDEBUG && DDB */
191
192 /*
193 * Acquire a resource.
194 */
195 #define ACQUIRE(lkp, error, extflags, drain, wanted) \
196 if ((extflags) & LK_SPIN) { \
197 int interlocked; \
198 SPINLOCK_SPINCHECK_DECL; \
199 \
200 if ((drain) == 0) \
201 (lkp)->lk_waitcount++; \
202 for (interlocked = 1;;) { \
203 SPINLOCK_SPINCHECK; \
204 if (wanted) { \
205 if (interlocked) { \
206 INTERLOCK_RELEASE((lkp), \
207 LK_SPIN, s); \
208 interlocked = 0; \
209 } \
210 SPINLOCK_SPIN_HOOK; \
211 } else if (interlocked) { \
212 break; \
213 } else { \
214 INTERLOCK_ACQUIRE((lkp), LK_SPIN, s); \
215 interlocked = 1; \
216 } \
217 } \
218 if ((drain) == 0) \
219 (lkp)->lk_waitcount--; \
220 KASSERT((wanted) == 0); \
221 error = 0; /* sanity */ \
222 } else { \
223 for (error = 0; wanted; ) { \
224 if ((drain)) \
225 (lkp)->lk_flags |= LK_WAITDRAIN; \
226 else \
227 (lkp)->lk_waitcount++; \
228 /* XXX Cast away volatile. */ \
229 error = ltsleep((drain) ? \
230 (void *)&(lkp)->lk_flags : \
231 (void *)(lkp), (lkp)->lk_prio, \
232 (lkp)->lk_wmesg, (lkp)->lk_timo, \
233 &(lkp)->lk_interlock); \
234 if ((drain) == 0) \
235 (lkp)->lk_waitcount--; \
236 if (error) \
237 break; \
238 if ((extflags) & LK_SLEEPFAIL) { \
239 error = ENOLCK; \
240 break; \
241 } \
242 } \
243 }
244
245 #define SETHOLDER(lkp, pid, cpu_id) \
246 do { \
247 if ((lkp)->lk_flags & LK_SPIN) \
248 (lkp)->lk_cpu = cpu_id; \
249 else \
250 (lkp)->lk_lockholder = pid; \
251 } while (/*CONSTCOND*/0)
252
253 #define WEHOLDIT(lkp, pid, cpu_id) \
254 (((lkp)->lk_flags & LK_SPIN) != 0 ? \
255 ((lkp)->lk_cpu == (cpu_id)) : ((lkp)->lk_lockholder == (pid)))
256
257 #define WAKEUP_WAITER(lkp) \
258 do { \
259 if (((lkp)->lk_flags & LK_SPIN) == 0 && (lkp)->lk_waitcount) { \
260 /* XXX Cast away volatile. */ \
261 wakeup_one((void *)(lkp)); \
262 } \
263 } while (/*CONSTCOND*/0)
264
265 #if defined(LOCKDEBUG) /* { */
266 #if defined(MULTIPROCESSOR) /* { */
267 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
268
269 #define SPINLOCK_LIST_LOCK() \
270 __cpu_simple_lock(&spinlock_list_slock.lock_data)
271
272 #define SPINLOCK_LIST_UNLOCK() \
273 __cpu_simple_unlock(&spinlock_list_slock.lock_data)
274 #else
275 #define SPINLOCK_LIST_LOCK() /* nothing */
276
277 #define SPINLOCK_LIST_UNLOCK() /* nothing */
278 #endif /* MULTIPROCESSOR */ /* } */
279
280 TAILQ_HEAD(, lock) spinlock_list =
281 TAILQ_HEAD_INITIALIZER(spinlock_list);
282
283 #define HAVEIT(lkp) \
284 do { \
285 if ((lkp)->lk_flags & LK_SPIN) { \
286 int s = spllock(); \
287 SPINLOCK_LIST_LOCK(); \
288 /* XXX Cast away volatile. */ \
289 TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \
290 lk_list); \
291 SPINLOCK_LIST_UNLOCK(); \
292 splx(s); \
293 } \
294 } while (/*CONSTCOND*/0)
295
296 #define DONTHAVEIT(lkp) \
297 do { \
298 if ((lkp)->lk_flags & LK_SPIN) { \
299 int s = spllock(); \
300 SPINLOCK_LIST_LOCK(); \
301 /* XXX Cast away volatile. */ \
302 TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp), \
303 lk_list); \
304 SPINLOCK_LIST_UNLOCK(); \
305 splx(s); \
306 } \
307 } while (/*CONSTCOND*/0)
308 #else
309 #define HAVEIT(lkp) /* nothing */
310
311 #define DONTHAVEIT(lkp) /* nothing */
312 #endif /* LOCKDEBUG */ /* } */
313
314 #if defined(LOCKDEBUG)
315 /*
316 * Lock debug printing routine; can be configured to print to console
317 * or log to syslog.
318 */
319 void
320 lock_printf(const char *fmt, ...)
321 {
322 va_list ap;
323
324 va_start(ap, fmt);
325 if (lock_debug_syslog)
326 vlog(LOG_DEBUG, fmt, ap);
327 else
328 vprintf(fmt, ap);
329 va_end(ap);
330 }
331 #endif /* LOCKDEBUG */
332
333 /*
334 * Initialize a lock; required before use.
335 */
336 void
337 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags)
338 {
339
340 memset(lkp, 0, sizeof(struct lock));
341 simple_lock_init(&lkp->lk_interlock);
342 lkp->lk_flags = flags & LK_EXTFLG_MASK;
343 if (flags & LK_SPIN)
344 lkp->lk_cpu = LK_NOCPU;
345 else {
346 lkp->lk_lockholder = LK_NOPROC;
347 lkp->lk_prio = prio;
348 lkp->lk_timo = timo;
349 }
350 lkp->lk_wmesg = wmesg; /* just a name for spin locks */
351 #if defined(LOCKDEBUG)
352 lkp->lk_lock_file = NULL;
353 lkp->lk_unlock_file = NULL;
354 #endif
355 }
356
357 /*
358 * Determine the status of a lock.
359 */
360 int
361 lockstatus(struct lock *lkp)
362 {
363 int s, lock_type = 0;
364
365 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
366 if (lkp->lk_exclusivecount != 0)
367 lock_type = LK_EXCLUSIVE;
368 else if (lkp->lk_sharecount != 0)
369 lock_type = LK_SHARED;
370 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
371 return (lock_type);
372 }
373
374 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
375 /*
376 * Make sure no spin locks are held by a CPU that is about
377 * to context switch.
378 */
379 void
380 spinlock_switchcheck(void)
381 {
382 u_long cnt;
383 int s;
384
385 s = spllock();
386 #if defined(MULTIPROCESSOR)
387 cnt = curcpu()->ci_spin_locks;
388 #else
389 cnt = spin_locks;
390 #endif
391 splx(s);
392
393 if (cnt != 0)
394 panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
395 (u_long) cpu_number(), cnt);
396 }
397 #endif /* LOCKDEBUG || DIAGNOSTIC */
398
399 /*
400 * Locks and IPLs (interrupt priority levels):
401 *
402 * Locks which may be taken from interrupt context must be handled
403 * very carefully; you must spl to the highest IPL where the lock
404 * is needed before acquiring the lock.
405 *
406 * It is also important to avoid deadlock, since certain (very high
407 * priority) interrupts are often needed to keep the system as a whole
408 * from deadlocking, and must not be blocked while you are spinning
409 * waiting for a lower-priority lock.
410 *
411 * In addition, the lock-debugging hooks themselves need to use locks!
412 *
413 * A raw __cpu_simple_lock may be used from interrupts are long as it
414 * is acquired and held at a single IPL.
415 *
416 * A simple_lock (which is a __cpu_simple_lock wrapped with some
417 * debugging hooks) may be used at or below spllock(), which is
418 * typically at or just below splhigh() (i.e. blocks everything
419 * but certain machine-dependent extremely high priority interrupts).
420 *
421 * spinlockmgr spinlocks should be used at or below splsched().
422 *
423 * Some platforms may have interrupts of higher priority than splsched(),
424 * including hard serial interrupts, inter-processor interrupts, and
425 * kernel debugger traps.
426 */
427
428 /*
429 * XXX XXX kludge around another kludge..
430 *
431 * vfs_shutdown() may be called from interrupt context, either as a result
432 * of a panic, or from the debugger. It proceeds to call
433 * sys_sync(&proc0, ...), pretending its running on behalf of proc0
434 *
435 * We would like to make an attempt to sync the filesystems in this case, so
436 * if this happens, we treat attempts to acquire locks specially.
437 * All locks are acquired on behalf of proc0.
438 *
439 * If we've already paniced, we don't block waiting for locks, but
440 * just barge right ahead since we're already going down in flames.
441 */
442
443 /*
444 * Set, change, or release a lock.
445 *
446 * Shared requests increment the shared count. Exclusive requests set the
447 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
448 * accepted shared locks and shared-to-exclusive upgrades to go away.
449 */
450 int
451 #if defined(LOCKDEBUG)
452 _lockmgr(__volatile struct lock *lkp, u_int flags,
453 struct simplelock *interlkp, const char *file, int line)
454 #else
455 lockmgr(__volatile struct lock *lkp, u_int flags,
456 struct simplelock *interlkp)
457 #endif
458 {
459 int error;
460 pid_t pid;
461 int extflags;
462 cpuid_t cpu_id;
463 struct lwp *l = curproc;
464 struct proc *p = (l == NULL) ? NULL : l->l_proc;
465 int lock_shutdown_noblock = 0;
466 int s;
467
468 error = 0;
469
470 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
471 if (flags & LK_INTERLOCK)
472 simple_unlock(interlkp);
473 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
474
475 #ifdef DIAGNOSTIC /* { */
476 /*
477 * Don't allow spins on sleep locks and don't allow sleeps
478 * on spin locks.
479 */
480 if ((flags ^ lkp->lk_flags) & LK_SPIN)
481 panic("lockmgr: sleep/spin mismatch\n");
482 #endif /* } */
483
484 if (extflags & LK_SPIN)
485 pid = LK_KERNPROC;
486 else {
487 if (p == NULL) {
488 if (!doing_shutdown) {
489 #ifdef DIAGNOSTIC
490 panic("lockmgr: no context");
491 #endif
492 } else {
493 p = &proc0;
494 if (panicstr && (!(flags & LK_NOWAIT))) {
495 flags |= LK_NOWAIT;
496 lock_shutdown_noblock = 1;
497 }
498 }
499 }
500 pid = p->p_pid;
501 }
502 cpu_id = cpu_number();
503
504 /*
505 * Once a lock has drained, the LK_DRAINING flag is set and an
506 * exclusive lock is returned. The only valid operation thereafter
507 * is a single release of that exclusive lock. This final release
508 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
509 * further requests of any sort will result in a panic. The bits
510 * selected for these two flags are chosen so that they will be set
511 * in memory that is freed (freed memory is filled with 0xdeadbeef).
512 * The final release is permitted to give a new lease on life to
513 * the lock by specifying LK_REENABLE.
514 */
515 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
516 #ifdef DIAGNOSTIC /* { */
517 if (lkp->lk_flags & LK_DRAINED)
518 panic("lockmgr: using decommissioned lock");
519 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
520 WEHOLDIT(lkp, pid, cpu_id) == 0)
521 panic("lockmgr: non-release on draining lock: %d\n",
522 flags & LK_TYPE_MASK);
523 #endif /* DIAGNOSTIC */ /* } */
524 lkp->lk_flags &= ~LK_DRAINING;
525 if ((flags & LK_REENABLE) == 0)
526 lkp->lk_flags |= LK_DRAINED;
527 }
528
529 switch (flags & LK_TYPE_MASK) {
530
531 case LK_SHARED:
532 if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
533 /*
534 * If just polling, check to see if we will block.
535 */
536 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
537 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
538 error = EBUSY;
539 break;
540 }
541 /*
542 * Wait for exclusive locks and upgrades to clear.
543 */
544 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
545 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
546 if (error)
547 break;
548 lkp->lk_sharecount++;
549 COUNT(lkp, p, cpu_id, 1);
550 break;
551 }
552 /*
553 * We hold an exclusive lock, so downgrade it to shared.
554 * An alternative would be to fail with EDEADLK.
555 */
556 lkp->lk_sharecount++;
557 COUNT(lkp, p, cpu_id, 1);
558 /* fall into downgrade */
559
560 case LK_DOWNGRADE:
561 if (WEHOLDIT(lkp, pid, cpu_id) == 0 ||
562 lkp->lk_exclusivecount == 0)
563 panic("lockmgr: not holding exclusive lock");
564 lkp->lk_sharecount += lkp->lk_exclusivecount;
565 lkp->lk_exclusivecount = 0;
566 lkp->lk_recurselevel = 0;
567 lkp->lk_flags &= ~LK_HAVE_EXCL;
568 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
569 #if defined(LOCKDEBUG)
570 lkp->lk_unlock_file = file;
571 lkp->lk_unlock_line = line;
572 #endif
573 DONTHAVEIT(lkp);
574 WAKEUP_WAITER(lkp);
575 break;
576
577 case LK_EXCLUPGRADE:
578 /*
579 * If another process is ahead of us to get an upgrade,
580 * then we want to fail rather than have an intervening
581 * exclusive access.
582 */
583 if (lkp->lk_flags & LK_WANT_UPGRADE) {
584 lkp->lk_sharecount--;
585 COUNT(lkp, p, cpu_id, -1);
586 error = EBUSY;
587 break;
588 }
589 /* fall into normal upgrade */
590
591 case LK_UPGRADE:
592 /*
593 * Upgrade a shared lock to an exclusive one. If another
594 * shared lock has already requested an upgrade to an
595 * exclusive lock, our shared lock is released and an
596 * exclusive lock is requested (which will be granted
597 * after the upgrade). If we return an error, the file
598 * will always be unlocked.
599 */
600 if (WEHOLDIT(lkp, pid, cpu_id) || lkp->lk_sharecount <= 0)
601 panic("lockmgr: upgrade exclusive lock");
602 lkp->lk_sharecount--;
603 COUNT(lkp, p, cpu_id, -1);
604 /*
605 * If we are just polling, check to see if we will block.
606 */
607 if ((extflags & LK_NOWAIT) &&
608 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
609 lkp->lk_sharecount > 1)) {
610 error = EBUSY;
611 break;
612 }
613 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
614 /*
615 * We are first shared lock to request an upgrade, so
616 * request upgrade and wait for the shared count to
617 * drop to zero, then take exclusive lock.
618 */
619 lkp->lk_flags |= LK_WANT_UPGRADE;
620 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount);
621 lkp->lk_flags &= ~LK_WANT_UPGRADE;
622 if (error)
623 break;
624 lkp->lk_flags |= LK_HAVE_EXCL;
625 SETHOLDER(lkp, pid, cpu_id);
626 #if defined(LOCKDEBUG)
627 lkp->lk_lock_file = file;
628 lkp->lk_lock_line = line;
629 #endif
630 HAVEIT(lkp);
631 if (lkp->lk_exclusivecount != 0)
632 panic("lockmgr: non-zero exclusive count");
633 lkp->lk_exclusivecount = 1;
634 if (extflags & LK_SETRECURSE)
635 lkp->lk_recurselevel = 1;
636 COUNT(lkp, p, cpu_id, 1);
637 break;
638 }
639 /*
640 * Someone else has requested upgrade. Release our shared
641 * lock, awaken upgrade requestor if we are the last shared
642 * lock, then request an exclusive lock.
643 */
644 if (lkp->lk_sharecount == 0)
645 WAKEUP_WAITER(lkp);
646 /* fall into exclusive request */
647
648 case LK_EXCLUSIVE:
649 if (WEHOLDIT(lkp, pid, cpu_id)) {
650 /*
651 * Recursive lock.
652 */
653 if ((extflags & LK_CANRECURSE) == 0 &&
654 lkp->lk_recurselevel == 0) {
655 if (extflags & LK_RECURSEFAIL) {
656 error = EDEADLK;
657 break;
658 } else
659 panic("lockmgr: locking against myself");
660 }
661 lkp->lk_exclusivecount++;
662 if (extflags & LK_SETRECURSE &&
663 lkp->lk_recurselevel == 0)
664 lkp->lk_recurselevel = lkp->lk_exclusivecount;
665 COUNT(lkp, p, cpu_id, 1);
666 break;
667 }
668 /*
669 * If we are just polling, check to see if we will sleep.
670 */
671 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
672 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
673 lkp->lk_sharecount != 0)) {
674 error = EBUSY;
675 break;
676 }
677 /*
678 * Try to acquire the want_exclusive flag.
679 */
680 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
681 (LK_HAVE_EXCL | LK_WANT_EXCL));
682 if (error)
683 break;
684 lkp->lk_flags |= LK_WANT_EXCL;
685 /*
686 * Wait for shared locks and upgrades to finish.
687 */
688 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0 ||
689 (lkp->lk_flags & LK_WANT_UPGRADE));
690 lkp->lk_flags &= ~LK_WANT_EXCL;
691 if (error)
692 break;
693 lkp->lk_flags |= LK_HAVE_EXCL;
694 SETHOLDER(lkp, pid, cpu_id);
695 #if defined(LOCKDEBUG)
696 lkp->lk_lock_file = file;
697 lkp->lk_lock_line = line;
698 #endif
699 HAVEIT(lkp);
700 if (lkp->lk_exclusivecount != 0)
701 panic("lockmgr: non-zero exclusive count");
702 lkp->lk_exclusivecount = 1;
703 if (extflags & LK_SETRECURSE)
704 lkp->lk_recurselevel = 1;
705 COUNT(lkp, p, cpu_id, 1);
706 break;
707
708 case LK_RELEASE:
709 if (lkp->lk_exclusivecount != 0) {
710 if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
711 if (lkp->lk_flags & LK_SPIN) {
712 panic("lockmgr: processor %lu, not "
713 "exclusive lock holder %lu "
714 "unlocking", cpu_id, lkp->lk_cpu);
715 } else {
716 panic("lockmgr: pid %d, not "
717 "exclusive lock holder %d "
718 "unlocking", pid,
719 lkp->lk_lockholder);
720 }
721 }
722 if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
723 lkp->lk_recurselevel = 0;
724 lkp->lk_exclusivecount--;
725 COUNT(lkp, p, cpu_id, -1);
726 if (lkp->lk_exclusivecount == 0) {
727 lkp->lk_flags &= ~LK_HAVE_EXCL;
728 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
729 #if defined(LOCKDEBUG)
730 lkp->lk_unlock_file = file;
731 lkp->lk_unlock_line = line;
732 #endif
733 DONTHAVEIT(lkp);
734 }
735 } else if (lkp->lk_sharecount != 0) {
736 lkp->lk_sharecount--;
737 COUNT(lkp, p, cpu_id, -1);
738 }
739 #ifdef DIAGNOSTIC
740 else
741 panic("lockmgr: release of unlocked lock!");
742 #endif
743 WAKEUP_WAITER(lkp);
744 break;
745
746 case LK_DRAIN:
747 /*
748 * Check that we do not already hold the lock, as it can
749 * never drain if we do. Unfortunately, we have no way to
750 * check for holding a shared lock, but at least we can
751 * check for an exclusive one.
752 */
753 if (WEHOLDIT(lkp, pid, cpu_id))
754 panic("lockmgr: draining against myself");
755 /*
756 * If we are just polling, check to see if we will sleep.
757 */
758 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
759 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
760 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
761 error = EBUSY;
762 break;
763 }
764 ACQUIRE(lkp, error, extflags, 1,
765 ((lkp->lk_flags &
766 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
767 lkp->lk_sharecount != 0 ||
768 lkp->lk_waitcount != 0));
769 if (error)
770 break;
771 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
772 SETHOLDER(lkp, pid, cpu_id);
773 #if defined(LOCKDEBUG)
774 lkp->lk_lock_file = file;
775 lkp->lk_lock_line = line;
776 #endif
777 HAVEIT(lkp);
778 lkp->lk_exclusivecount = 1;
779 /* XXX unlikely that we'd want this */
780 if (extflags & LK_SETRECURSE)
781 lkp->lk_recurselevel = 1;
782 COUNT(lkp, p, cpu_id, 1);
783 break;
784
785 default:
786 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
787 panic("lockmgr: unknown locktype request %d",
788 flags & LK_TYPE_MASK);
789 /* NOTREACHED */
790 }
791 if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
792 ((lkp->lk_flags &
793 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
794 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
795 lkp->lk_flags &= ~LK_WAITDRAIN;
796 wakeup_one((void *)&lkp->lk_flags);
797 }
798 /*
799 * Note that this panic will be a recursive panic, since
800 * we only set lock_shutdown_noblock above if panicstr != NULL.
801 */
802 if (error && lock_shutdown_noblock)
803 panic("lockmgr: deadlock (see previous panic)");
804
805 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
806 return (error);
807 }
808
809 /*
810 * For a recursive spinlock held one or more times by the current CPU,
811 * release all N locks, and return N.
812 * Intended for use in mi_switch() shortly before context switching.
813 */
814
815 int
816 #if defined(LOCKDEBUG)
817 _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line)
818 #else
819 spinlock_release_all(__volatile struct lock *lkp)
820 #endif
821 {
822 int s, count;
823 cpuid_t cpu_id;
824
825 KASSERT(lkp->lk_flags & LK_SPIN);
826
827 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
828
829 cpu_id = cpu_number();
830 count = lkp->lk_exclusivecount;
831
832 if (count != 0) {
833 #ifdef DIAGNOSTIC
834 if (WEHOLDIT(lkp, 0, cpu_id) == 0) {
835 panic("spinlock_release_all: processor %lu, not "
836 "exclusive lock holder %lu "
837 "unlocking", (long)cpu_id, lkp->lk_cpu);
838 }
839 #endif
840 lkp->lk_recurselevel = 0;
841 lkp->lk_exclusivecount = 0;
842 COUNT_CPU(cpu_id, -count);
843 lkp->lk_flags &= ~LK_HAVE_EXCL;
844 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
845 #if defined(LOCKDEBUG)
846 lkp->lk_unlock_file = file;
847 lkp->lk_unlock_line = line;
848 #endif
849 DONTHAVEIT(lkp);
850 }
851 #ifdef DIAGNOSTIC
852 else if (lkp->lk_sharecount != 0)
853 panic("spinlock_release_all: release of shared lock!");
854 else
855 panic("spinlock_release_all: release of unlocked lock!");
856 #endif
857 INTERLOCK_RELEASE(lkp, LK_SPIN, s);
858
859 return (count);
860 }
861
862 /*
863 * For a recursive spinlock held one or more times by the current CPU,
864 * release all N locks, and return N.
865 * Intended for use in mi_switch() right after resuming execution.
866 */
867
868 void
869 #if defined(LOCKDEBUG)
870 _spinlock_acquire_count(__volatile struct lock *lkp, int count,
871 const char *file, int line)
872 #else
873 spinlock_acquire_count(__volatile struct lock *lkp, int count)
874 #endif
875 {
876 int s, error;
877 cpuid_t cpu_id;
878
879 KASSERT(lkp->lk_flags & LK_SPIN);
880
881 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
882
883 cpu_id = cpu_number();
884
885 #ifdef DIAGNOSTIC
886 if (WEHOLDIT(lkp, LK_NOPROC, cpu_id))
887 panic("spinlock_acquire_count: processor %lu already holds lock\n", (long)cpu_id);
888 #endif
889 /*
890 * Try to acquire the want_exclusive flag.
891 */
892 ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_flags &
893 (LK_HAVE_EXCL | LK_WANT_EXCL));
894 lkp->lk_flags |= LK_WANT_EXCL;
895 /*
896 * Wait for shared locks and upgrades to finish.
897 */
898 ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_sharecount != 0 ||
899 (lkp->lk_flags & LK_WANT_UPGRADE));
900 lkp->lk_flags &= ~LK_WANT_EXCL;
901 lkp->lk_flags |= LK_HAVE_EXCL;
902 SETHOLDER(lkp, LK_NOPROC, cpu_id);
903 #if defined(LOCKDEBUG)
904 lkp->lk_lock_file = file;
905 lkp->lk_lock_line = line;
906 #endif
907 HAVEIT(lkp);
908 if (lkp->lk_exclusivecount != 0)
909 panic("lockmgr: non-zero exclusive count");
910 lkp->lk_exclusivecount = count;
911 lkp->lk_recurselevel = 1;
912 COUNT_CPU(cpu_id, count);
913
914 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
915 }
916
917
918
919 /*
920 * Print out information about state of a lock. Used by VOP_PRINT
921 * routines to display ststus about contained locks.
922 */
923 void
924 lockmgr_printinfo(__volatile struct lock *lkp)
925 {
926
927 if (lkp->lk_sharecount)
928 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
929 lkp->lk_sharecount);
930 else if (lkp->lk_flags & LK_HAVE_EXCL) {
931 printf(" lock type %s: EXCL (count %d) by ",
932 lkp->lk_wmesg, lkp->lk_exclusivecount);
933 if (lkp->lk_flags & LK_SPIN)
934 printf("processor %lu", lkp->lk_cpu);
935 else
936 printf("pid %d", lkp->lk_lockholder);
937 } else
938 printf(" not locked");
939 if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
940 printf(" with %d pending", lkp->lk_waitcount);
941 }
942
943 #if defined(LOCKDEBUG) /* { */
944 TAILQ_HEAD(, simplelock) simplelock_list =
945 TAILQ_HEAD_INITIALIZER(simplelock_list);
946
947 #if defined(MULTIPROCESSOR) /* { */
948 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
949
950 #define SLOCK_LIST_LOCK() \
951 __cpu_simple_lock(&simplelock_list_slock.lock_data)
952
953 #define SLOCK_LIST_UNLOCK() \
954 __cpu_simple_unlock(&simplelock_list_slock.lock_data)
955
956 #define SLOCK_COUNT(x) \
957 curcpu()->ci_simple_locks += (x)
958 #else
959 u_long simple_locks;
960
961 #define SLOCK_LIST_LOCK() /* nothing */
962
963 #define SLOCK_LIST_UNLOCK() /* nothing */
964
965 #define SLOCK_COUNT(x) simple_locks += (x)
966 #endif /* MULTIPROCESSOR */ /* } */
967
968 #ifdef DDB /* { */
969 #ifdef MULTIPROCESSOR
970 int simple_lock_debugger = 1; /* more serious on MP */
971 #else
972 int simple_lock_debugger = 0;
973 #endif
974 #define SLOCK_DEBUGGER() if (simple_lock_debugger) Debugger()
975 #else
976 #define SLOCK_DEBUGGER() /* nothing */
977 #endif /* } */
978
979 #ifdef MULTIPROCESSOR
980 #define SLOCK_MP() lock_printf("on cpu %ld\n", \
981 (u_long) cpu_number())
982 #else
983 #define SLOCK_MP() /* nothing */
984 #endif
985
986 #define SLOCK_WHERE(str, alp, id, l) \
987 do { \
988 lock_printf(str); \
989 lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
990 SLOCK_MP(); \
991 if ((alp)->lock_file != NULL) \
992 lock_printf("last locked: %s:%d\n", (alp)->lock_file, \
993 (alp)->lock_line); \
994 if ((alp)->unlock_file != NULL) \
995 lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
996 (alp)->unlock_line); \
997 SLOCK_DEBUGGER(); \
998 } while (/*CONSTCOND*/0)
999
1000 /*
1001 * Simple lock functions so that the debugger can see from whence
1002 * they are being called.
1003 */
1004 void
1005 simple_lock_init(struct simplelock *alp)
1006 {
1007
1008 #if defined(MULTIPROCESSOR) /* { */
1009 __cpu_simple_lock_init(&alp->lock_data);
1010 #else
1011 alp->lock_data = __SIMPLELOCK_UNLOCKED;
1012 #endif /* } */
1013 alp->lock_file = NULL;
1014 alp->lock_line = 0;
1015 alp->unlock_file = NULL;
1016 alp->unlock_line = 0;
1017 alp->lock_holder = LK_NOCPU;
1018 }
1019
1020 void
1021 _simple_lock(__volatile struct simplelock *alp, const char *id, int l)
1022 {
1023 cpuid_t cpu_id = cpu_number();
1024 int s;
1025
1026 s = spllock();
1027
1028 /*
1029 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1030 * don't take any action, and just fall into the normal spin case.
1031 */
1032 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1033 #if defined(MULTIPROCESSOR) /* { */
1034 if (alp->lock_holder == cpu_id) {
1035 SLOCK_WHERE("simple_lock: locking against myself\n",
1036 alp, id, l);
1037 goto out;
1038 }
1039 #else
1040 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
1041 goto out;
1042 #endif /* MULTIPROCESSOR */ /* } */
1043 }
1044
1045 #if defined(MULTIPROCESSOR) /* { */
1046 /* Acquire the lock before modifying any fields. */
1047 __cpu_simple_lock(&alp->lock_data);
1048 #else
1049 alp->lock_data = __SIMPLELOCK_LOCKED;
1050 #endif /* } */
1051
1052 if (alp->lock_holder != LK_NOCPU) {
1053 SLOCK_WHERE("simple_lock: uninitialized lock\n",
1054 alp, id, l);
1055 }
1056 alp->lock_file = id;
1057 alp->lock_line = l;
1058 alp->lock_holder = cpu_id;
1059
1060 SLOCK_LIST_LOCK();
1061 /* XXX Cast away volatile */
1062 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
1063 SLOCK_LIST_UNLOCK();
1064
1065 SLOCK_COUNT(1);
1066
1067 out:
1068 splx(s);
1069 }
1070
1071 int
1072 _simple_lock_held(__volatile struct simplelock *alp)
1073 {
1074 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC)
1075 cpuid_t cpu_id = cpu_number();
1076 #endif
1077 int s, locked = 0;
1078
1079 s = spllock();
1080
1081 #if defined(MULTIPROCESSOR)
1082 if (__cpu_simple_lock_try(&alp->lock_data) == 0)
1083 locked = (alp->lock_holder == cpu_id);
1084 else
1085 __cpu_simple_unlock(&alp->lock_data);
1086 #else
1087 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1088 locked = 1;
1089 KASSERT(alp->lock_holder == cpu_id);
1090 }
1091 #endif
1092
1093 splx(s);
1094
1095 return (locked);
1096 }
1097
1098 int
1099 _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l)
1100 {
1101 cpuid_t cpu_id = cpu_number();
1102 int s, rv = 0;
1103
1104 s = spllock();
1105
1106 /*
1107 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1108 * don't take any action.
1109 */
1110 #if defined(MULTIPROCESSOR) /* { */
1111 if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
1112 if (alp->lock_holder == cpu_id)
1113 SLOCK_WHERE("simple_lock_try: locking against myself\n",
1114 alp, id, l);
1115 goto out;
1116 }
1117 #else
1118 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1119 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
1120 goto out;
1121 }
1122 alp->lock_data = __SIMPLELOCK_LOCKED;
1123 #endif /* MULTIPROCESSOR */ /* } */
1124
1125 /*
1126 * At this point, we have acquired the lock.
1127 */
1128
1129 rv = 1;
1130
1131 alp->lock_file = id;
1132 alp->lock_line = l;
1133 alp->lock_holder = cpu_id;
1134
1135 SLOCK_LIST_LOCK();
1136 /* XXX Cast away volatile. */
1137 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
1138 SLOCK_LIST_UNLOCK();
1139
1140 SLOCK_COUNT(1);
1141
1142 out:
1143 splx(s);
1144 return (rv);
1145 }
1146
1147 void
1148 _simple_unlock(__volatile struct simplelock *alp, const char *id, int l)
1149 {
1150 int s;
1151
1152 s = spllock();
1153
1154 /*
1155 * MULTIPROCESSOR case: This is `safe' because we think we hold
1156 * the lock, and if we don't, we don't take any action.
1157 */
1158 if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
1159 SLOCK_WHERE("simple_unlock: lock not held\n",
1160 alp, id, l);
1161 goto out;
1162 }
1163
1164 SLOCK_LIST_LOCK();
1165 TAILQ_REMOVE(&simplelock_list, alp, list);
1166 SLOCK_LIST_UNLOCK();
1167
1168 SLOCK_COUNT(-1);
1169
1170 alp->list.tqe_next = NULL; /* sanity */
1171 alp->list.tqe_prev = NULL; /* sanity */
1172
1173 alp->unlock_file = id;
1174 alp->unlock_line = l;
1175
1176 #if defined(MULTIPROCESSOR) /* { */
1177 alp->lock_holder = LK_NOCPU;
1178 /* Now that we've modified all fields, release the lock. */
1179 __cpu_simple_unlock(&alp->lock_data);
1180 #else
1181 alp->lock_data = __SIMPLELOCK_UNLOCKED;
1182 KASSERT(alp->lock_holder == cpu_number());
1183 alp->lock_holder = LK_NOCPU;
1184 #endif /* } */
1185
1186 out:
1187 splx(s);
1188 }
1189
1190 void
1191 simple_lock_dump(void)
1192 {
1193 struct simplelock *alp;
1194 int s;
1195
1196 s = spllock();
1197 SLOCK_LIST_LOCK();
1198 lock_printf("all simple locks:\n");
1199 for (alp = TAILQ_FIRST(&simplelock_list); alp != NULL;
1200 alp = TAILQ_NEXT(alp, list)) {
1201 lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
1202 alp->lock_file, alp->lock_line);
1203 }
1204 SLOCK_LIST_UNLOCK();
1205 splx(s);
1206 }
1207
1208 void
1209 simple_lock_freecheck(void *start, void *end)
1210 {
1211 struct simplelock *alp;
1212 int s;
1213
1214 s = spllock();
1215 SLOCK_LIST_LOCK();
1216 for (alp = TAILQ_FIRST(&simplelock_list); alp != NULL;
1217 alp = TAILQ_NEXT(alp, list)) {
1218 if ((void *)alp >= start && (void *)alp < end) {
1219 lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
1220 alp, alp->lock_holder, alp->lock_file,
1221 alp->lock_line);
1222 SLOCK_DEBUGGER();
1223 }
1224 }
1225 SLOCK_LIST_UNLOCK();
1226 splx(s);
1227 }
1228
1229 /*
1230 * We must be holding exactly one lock: the sched_lock.
1231 */
1232
1233 void
1234 simple_lock_switchcheck(void)
1235 {
1236
1237 simple_lock_only_held(&sched_lock, "switching");
1238 }
1239
1240 void
1241 simple_lock_only_held(volatile struct simplelock *lp, const char *where)
1242 {
1243 struct simplelock *alp;
1244 cpuid_t cpu_id = cpu_number();
1245 int s;
1246
1247 if (lp) {
1248 LOCK_ASSERT(simple_lock_held(lp));
1249 }
1250 s = spllock();
1251 SLOCK_LIST_LOCK();
1252 for (alp = TAILQ_FIRST(&simplelock_list); alp != NULL;
1253 alp = TAILQ_NEXT(alp, list)) {
1254 if (alp == lp)
1255 continue;
1256 if (alp->lock_holder == cpu_id)
1257 break;
1258 }
1259 SLOCK_LIST_UNLOCK();
1260 splx(s);
1261
1262 if (alp != NULL) {
1263 lock_printf("%s with held simple_lock %p "
1264 "CPU %lu %s:%d\n",
1265 where, alp, alp->lock_holder, alp->lock_file,
1266 alp->lock_line);
1267 #ifdef DDB
1268 db_stack_trace_print((db_expr_t)__builtin_frame_address(0),
1269 TRUE, 65535, "", printf);
1270 #endif
1271 SLOCK_DEBUGGER();
1272 }
1273 }
1274 #endif /* LOCKDEBUG */ /* } */
1275