kern_lock.c revision 1.128.2.4 1 /* $NetBSD: kern_lock.c,v 1.128.2.4 2007/12/27 01:41:29 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
10 *
11 * This code is derived from software contributed to The NetBSD Foundation
12 * by Ross Harvey.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the NetBSD
25 * Foundation, Inc. and its contributors.
26 * 4. Neither the name of The NetBSD Foundation nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43 /*
44 * Copyright (c) 1995
45 * The Regents of the University of California. All rights reserved.
46 *
47 * This code contains ideas from software contributed to Berkeley by
48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
49 * System project at Carnegie-Mellon University.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 * 1. Redistributions of source code must retain the above copyright
55 * notice, this list of conditions and the following disclaimer.
56 * 2. Redistributions in binary form must reproduce the above copyright
57 * notice, this list of conditions and the following disclaimer in the
58 * documentation and/or other materials provided with the distribution.
59 * 3. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.128.2.4 2007/12/27 01:41:29 ad Exp $");
80
81 #include "opt_multiprocessor.h"
82
83 #include <sys/param.h>
84 #include <sys/proc.h>
85 #include <sys/lock.h>
86 #include <sys/systm.h>
87 #include <sys/kernel.h>
88 #include <sys/lockdebug.h>
89 #include <sys/cpu.h>
90 #include <sys/syslog.h>
91 #include <sys/atomic.h>
92
93 #include <machine/stdarg.h>
94
95 #include <dev/lockstat.h>
96
97 /*
98 * note that stdarg.h and the ansi style va_start macro is used for both
99 * ansi and traditional c compiles.
100 * XXX: this requires that stdarg.h define: va_alist and va_dcl
101 */
102 void lock_printf(const char *fmt, ...)
103 __attribute__((__format__(__printf__,1,2)));
104
105 static int acquire(struct lock **, int *, int, int, int, uintptr_t);
106
107 int lock_debug_syslog = 0; /* defaults to printf, but can be patched */
108 bool kernel_lock_dodebug;
109 __cpu_simple_lock_t kernel_lock;
110
111 static lockops_t lockmgr_lockops = {
112 "lockmgr",
113 1,
114 (void *)nullop
115 };
116
117 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
118 #define COUNT(lkp, l, cpu_id, x) (l)->l_locks += (x)
119 #else
120 #define COUNT(lkp, p, cpu_id, x)
121 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
122
123 #define RETURN_ADDRESS ((uintptr_t)__builtin_return_address(0))
124
125 /*
126 * Acquire a resource.
127 */
128 static int
129 acquire(struct lock **lkpp, int *s, int extflags,
130 int drain, int wanted, uintptr_t ra)
131 {
132 int error;
133 struct lock *lkp = *lkpp;
134 LOCKSTAT_TIMER(slptime);
135 LOCKSTAT_FLAG(lsflag);
136
137 KASSERT(drain || (wanted & LK_WAIT_NONZERO) == 0);
138
139 LOCKSTAT_ENTER(lsflag);
140
141 for (error = 0; (lkp->lk_flags & wanted) != 0; ) {
142 if (drain)
143 lkp->lk_flags |= LK_WAITDRAIN;
144 else {
145 lkp->lk_waitcount++;
146 lkp->lk_flags |= LK_WAIT_NONZERO;
147 }
148 LOCKSTAT_START_TIMER(lsflag, slptime);
149 error = mtsleep(drain ? (void *)&lkp->lk_flags : (void *)lkp,
150 lkp->lk_prio, lkp->lk_wmesg, lkp->lk_timo,
151 __UNVOLATILE(&lkp->lk_interlock));
152 LOCKSTAT_STOP_TIMER(lsflag, slptime);
153 LOCKSTAT_EVENT_RA(lsflag, (void *)(uintptr_t)lkp,
154 LB_LOCKMGR | LB_SLEEP1, 1, slptime, ra);
155 if (!drain) {
156 lkp->lk_waitcount--;
157 if (lkp->lk_waitcount == 0)
158 lkp->lk_flags &= ~LK_WAIT_NONZERO;
159 }
160 if (error)
161 break;
162 if (extflags & LK_SLEEPFAIL) {
163 error = ENOLCK;
164 break;
165 }
166 }
167
168 LOCKSTAT_EXIT(lsflag);
169
170 return error;
171 }
172
173 #define SETHOLDER(lkp, pid, lid, cpu_id) \
174 do { \
175 (lkp)->lk_lockholder = pid; \
176 (lkp)->lk_locklwp = lid; \
177 } while (/*CONSTCOND*/0)
178
179 #define WEHOLDIT(lkp, pid, lid, cpu_id) \
180 ((lkp)->lk_lockholder == (pid) && (lkp)->lk_locklwp == (lid))
181
182 #define WAKEUP_WAITER(lkp) \
183 do { \
184 if (((lkp)->lk_flags & LK_WAIT_NONZERO) != 0) { \
185 wakeup((lkp)); \
186 } \
187 } while (/*CONSTCOND*/0)
188
189 #if defined(LOCKDEBUG)
190 /*
191 * Lock debug printing routine; can be configured to print to console
192 * or log to syslog.
193 */
194 void
195 lock_printf(const char *fmt, ...)
196 {
197 char b[150];
198 va_list ap;
199
200 va_start(ap, fmt);
201 if (lock_debug_syslog)
202 vlog(LOG_DEBUG, fmt, ap);
203 else {
204 vsnprintf(b, sizeof(b), fmt, ap);
205 printf_nolog("%s", b);
206 }
207 va_end(ap);
208 }
209 #endif /* LOCKDEBUG */
210
211 static void
212 lockpanic(struct lock *lkp, const char *fmt, ...)
213 {
214 char s[150], b[150];
215 static const char *locktype[] = {
216 "*0*", "shared", "exclusive", "*3*", "*4*", "downgrade",
217 "*release*", "drain", "exclother", "*9*", "*10*",
218 "*11*", "*12*", "*13*", "*14*", "*15*"
219 };
220 va_list ap;
221 va_start(ap, fmt);
222 vsnprintf(s, sizeof(s), fmt, ap);
223 va_end(ap);
224 bitmask_snprintf(lkp->lk_flags, __LK_FLAG_BITS, b, sizeof(b));
225 panic("%s ("
226 "type %s flags %s, sharecount %d, exclusivecount %d, "
227 "recurselevel %d, waitcount %d, wmesg %s"
228 ", lock_addr %p, unlock_addr %p"
229 ")\n",
230 s, locktype[lkp->lk_flags & LK_TYPE_MASK],
231 b, lkp->lk_sharecount, lkp->lk_exclusivecount,
232 lkp->lk_recurselevel, lkp->lk_waitcount, lkp->lk_wmesg,
233 (void *)lkp->lk_lock_addr, (void *)lkp->lk_unlock_addr
234 );
235 }
236
237 /*
238 * Initialize a lock; required before use.
239 */
240 void
241 lockinit(struct lock *lkp, pri_t prio, const char *wmesg, int timo, int flags)
242 {
243
244 memset(lkp, 0, sizeof(struct lock));
245 lkp->lk_flags = flags & LK_EXTFLG_MASK;
246 mutex_init(&lkp->lk_interlock, MUTEX_DEFAULT, IPL_NONE);
247 lkp->lk_lockholder = LK_NOPROC;
248 lkp->lk_prio = prio;
249 lkp->lk_timo = timo;
250 lkp->lk_wmesg = wmesg;
251 lkp->lk_lock_addr = 0;
252 lkp->lk_unlock_addr = 0;
253
254 if (LOCKDEBUG_ALLOC(lkp, &lockmgr_lockops,
255 (uintptr_t)__builtin_return_address(0))) {
256 lkp->lk_flags |= LK_DODEBUG;
257 }
258 }
259
260 void
261 lockdestroy(struct lock *lkp)
262 {
263
264 LOCKDEBUG_FREE(((lkp->lk_flags & LK_DODEBUG) != 0), lkp);
265 mutex_destroy(&lkp->lk_interlock);
266 }
267
268 /*
269 * Determine the status of a lock.
270 */
271 int
272 lockstatus(struct lock *lkp)
273 {
274 int lock_type = 0;
275 struct lwp *l = curlwp; /* XXX */
276 pid_t pid;
277 lwpid_t lid;
278 cpuid_t cpu_num;
279
280 if (l == NULL) {
281 cpu_num = cpu_number();
282 pid = LK_KERNPROC;
283 lid = 0;
284 } else {
285 cpu_num = LK_NOCPU;
286 pid = l->l_proc->p_pid;
287 lid = l->l_lid;
288 }
289
290 mutex_enter(&lkp->lk_interlock);
291 if (lkp->lk_exclusivecount != 0) {
292 if (WEHOLDIT(lkp, pid, lid, cpu_num))
293 lock_type = LK_EXCLUSIVE;
294 else
295 lock_type = LK_EXCLOTHER;
296 } else if (lkp->lk_sharecount != 0)
297 lock_type = LK_SHARED;
298 else if (lkp->lk_flags & LK_WANT_EXCL)
299 lock_type = LK_EXCLOTHER;
300 mutex_exit(&lkp->lk_interlock);
301 return (lock_type);
302 }
303
304 /*
305 * XXX XXX kludge around another kludge..
306 *
307 * vfs_shutdown() may be called from interrupt context, either as a result
308 * of a panic, or from the debugger. It proceeds to call
309 * sys_sync(&proc0, ...), pretending its running on behalf of proc0
310 *
311 * We would like to make an attempt to sync the filesystems in this case, so
312 * if this happens, we treat attempts to acquire locks specially.
313 * All locks are acquired on behalf of proc0.
314 *
315 * If we've already paniced, we don't block waiting for locks, but
316 * just barge right ahead since we're already going down in flames.
317 */
318
319 /*
320 * Set, change, or release a lock.
321 *
322 * Shared requests increment the shared count. Exclusive requests set the
323 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
324 * accepted shared locks to go away.
325 */
326 int
327 lockmgr(struct lock *lkp, u_int flags, kmutex_t *interlkp)
328 {
329 int error;
330 pid_t pid;
331 lwpid_t lid;
332 int extflags;
333 cpuid_t cpu_num;
334 struct lwp *l = curlwp;
335 int lock_shutdown_noblock = 0;
336 int s = 0;
337
338 error = 0;
339
340 /* LK_RETRY is for vn_lock, not for lockmgr. */
341 KASSERT((flags & LK_RETRY) == 0);
342 KASSERT((l->l_pflag & LP_INTR) == 0 || panicstr != NULL);
343
344 mutex_enter(&lkp->lk_interlock);
345 if (flags & LK_INTERLOCK)
346 mutex_exit(interlkp);
347 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
348
349 if (l == NULL) {
350 if (!doing_shutdown) {
351 panic("lockmgr: no context");
352 } else {
353 l = &lwp0;
354 if (panicstr && (!(flags & LK_NOWAIT))) {
355 flags |= LK_NOWAIT;
356 lock_shutdown_noblock = 1;
357 }
358 }
359 }
360 lid = l->l_lid;
361 pid = l->l_proc->p_pid;
362 cpu_num = cpu_number();
363
364 /*
365 * Once a lock has drained, the LK_DRAINING flag is set and an
366 * exclusive lock is returned. The only valid operation thereafter
367 * is a single release of that exclusive lock. This final release
368 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
369 * further requests of any sort will result in a panic. The bits
370 * selected for these two flags are chosen so that they will be set
371 * in memory that is freed (freed memory is filled with 0xdeadbeef).
372 * The final release is permitted to give a new lease on life to
373 * the lock by specifying LK_REENABLE.
374 */
375 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
376 #ifdef DIAGNOSTIC /* { */
377 if (lkp->lk_flags & LK_DRAINED)
378 lockpanic(lkp, "lockmgr: using decommissioned lock");
379 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
380 WEHOLDIT(lkp, pid, lid, cpu_num) == 0)
381 lockpanic(lkp, "lockmgr: non-release on draining lock: %d",
382 flags & LK_TYPE_MASK);
383 #endif /* DIAGNOSTIC */ /* } */
384 lkp->lk_flags &= ~LK_DRAINING;
385 if ((flags & LK_REENABLE) == 0)
386 lkp->lk_flags |= LK_DRAINED;
387 }
388
389 switch (flags & LK_TYPE_MASK) {
390
391 case LK_SHARED:
392 if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0) {
393 /*
394 * If just polling, check to see if we will block.
395 */
396 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
397 (LK_HAVE_EXCL | LK_WANT_EXCL))) {
398 error = EBUSY;
399 break;
400 }
401 /*
402 * Wait for exclusive locks to clear.
403 */
404 error = acquire(&lkp, &s, extflags, 0,
405 LK_HAVE_EXCL | LK_WANT_EXCL,
406 RETURN_ADDRESS);
407 if (error)
408 break;
409 lkp->lk_sharecount++;
410 lkp->lk_flags |= LK_SHARE_NONZERO;
411 COUNT(lkp, l, cpu_num, 1);
412 break;
413 }
414 /*
415 * We hold an exclusive lock, so downgrade it to shared.
416 * An alternative would be to fail with EDEADLK.
417 */
418 lkp->lk_sharecount++;
419 lkp->lk_flags |= LK_SHARE_NONZERO;
420 COUNT(lkp, l, cpu_num, 1);
421 /* fall into downgrade */
422
423 case LK_DOWNGRADE:
424 if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0 ||
425 lkp->lk_exclusivecount == 0)
426 lockpanic(lkp, "lockmgr: not holding exclusive lock");
427 lkp->lk_sharecount += lkp->lk_exclusivecount;
428 lkp->lk_flags |= LK_SHARE_NONZERO;
429 lkp->lk_exclusivecount = 0;
430 lkp->lk_recurselevel = 0;
431 lkp->lk_flags &= ~LK_HAVE_EXCL;
432 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
433 #if defined(LOCKDEBUG)
434 lkp->lk_unlock_addr = RETURN_ADDRESS;
435 #endif
436 WAKEUP_WAITER(lkp);
437 break;
438
439 case LK_EXCLUSIVE:
440 if (WEHOLDIT(lkp, pid, lid, cpu_num)) {
441 /*
442 * Recursive lock.
443 */
444 if ((extflags & LK_CANRECURSE) == 0 &&
445 lkp->lk_recurselevel == 0) {
446 if (extflags & LK_RECURSEFAIL) {
447 error = EDEADLK;
448 break;
449 } else
450 lockpanic(lkp, "lockmgr: locking against myself");
451 }
452 lkp->lk_exclusivecount++;
453 COUNT(lkp, l, cpu_num, 1);
454 break;
455 }
456 /*
457 * If we are just polling, check to see if we will sleep.
458 */
459 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
460 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_SHARE_NONZERO))) {
461 error = EBUSY;
462 break;
463 }
464 /*
465 * Try to acquire the want_exclusive flag.
466 */
467 error = acquire(&lkp, &s, extflags, 0,
468 LK_HAVE_EXCL | LK_WANT_EXCL, RETURN_ADDRESS);
469 if (error)
470 break;
471 lkp->lk_flags |= LK_WANT_EXCL;
472 /*
473 * Wait for shared locks to finish.
474 */
475 error = acquire(&lkp, &s, extflags, 0,
476 LK_HAVE_EXCL | LK_SHARE_NONZERO,
477 RETURN_ADDRESS);
478 lkp->lk_flags &= ~LK_WANT_EXCL;
479 if (error) {
480 WAKEUP_WAITER(lkp);
481 break;
482 }
483 lkp->lk_flags |= LK_HAVE_EXCL;
484 SETHOLDER(lkp, pid, lid, cpu_num);
485 #if defined(LOCKDEBUG)
486 lkp->lk_lock_addr = RETURN_ADDRESS;
487 #endif
488 if (lkp->lk_exclusivecount != 0)
489 lockpanic(lkp, "lockmgr: non-zero exclusive count");
490 lkp->lk_exclusivecount = 1;
491 COUNT(lkp, l, cpu_num, 1);
492 break;
493
494 case LK_RELEASE:
495 if (lkp->lk_exclusivecount != 0) {
496 if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0) {
497 lockpanic(lkp, "lockmgr: pid %d.%d, not "
498 "exclusive lock holder %d.%d "
499 "unlocking", pid, lid,
500 lkp->lk_lockholder,
501 lkp->lk_locklwp);
502 }
503 if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
504 lkp->lk_recurselevel = 0;
505 lkp->lk_exclusivecount--;
506 COUNT(lkp, l, cpu_num, -1);
507 if (lkp->lk_exclusivecount == 0) {
508 lkp->lk_flags &= ~LK_HAVE_EXCL;
509 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
510 #if defined(LOCKDEBUG)
511 lkp->lk_unlock_addr = RETURN_ADDRESS;
512 #endif
513 }
514 } else if (lkp->lk_sharecount != 0) {
515 lkp->lk_sharecount--;
516 if (lkp->lk_sharecount == 0)
517 lkp->lk_flags &= ~LK_SHARE_NONZERO;
518 COUNT(lkp, l, cpu_num, -1);
519 }
520 #ifdef DIAGNOSTIC
521 else
522 lockpanic(lkp, "lockmgr: release of unlocked lock!");
523 #endif
524 WAKEUP_WAITER(lkp);
525 break;
526
527 case LK_DRAIN:
528 /*
529 * Check that we do not already hold the lock, as it can
530 * never drain if we do. Unfortunately, we have no way to
531 * check for holding a shared lock, but at least we can
532 * check for an exclusive one.
533 */
534 if (WEHOLDIT(lkp, pid, lid, cpu_num))
535 lockpanic(lkp, "lockmgr: draining against myself");
536 /*
537 * If we are just polling, check to see if we will sleep.
538 */
539 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
540 (LK_HAVE_EXCL | LK_WANT_EXCL |
541 LK_SHARE_NONZERO | LK_WAIT_NONZERO))) {
542 error = EBUSY;
543 break;
544 }
545 error = acquire(&lkp, &s, extflags, 1,
546 LK_HAVE_EXCL | LK_WANT_EXCL |
547 LK_SHARE_NONZERO | LK_WAIT_NONZERO,
548 RETURN_ADDRESS);
549 if (error)
550 break;
551 lkp->lk_flags |= LK_HAVE_EXCL;
552 if ((extflags & LK_RESURRECT) == 0)
553 lkp->lk_flags |= LK_DRAINING;
554 SETHOLDER(lkp, pid, lid, cpu_num);
555 #if defined(LOCKDEBUG)
556 lkp->lk_lock_addr = RETURN_ADDRESS;
557 #endif
558 lkp->lk_exclusivecount = 1;
559 COUNT(lkp, l, cpu_num, 1);
560 break;
561
562 default:
563 mutex_exit(&lkp->lk_interlock);
564 lockpanic(lkp, "lockmgr: unknown locktype request %d",
565 flags & LK_TYPE_MASK);
566 /* NOTREACHED */
567 }
568 if ((lkp->lk_flags & LK_WAITDRAIN) != 0 &&
569 ((lkp->lk_flags &
570 (LK_HAVE_EXCL | LK_WANT_EXCL |
571 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0)) {
572 lkp->lk_flags &= ~LK_WAITDRAIN;
573 wakeup(&lkp->lk_flags);
574 }
575 /*
576 * Note that this panic will be a recursive panic, since
577 * we only set lock_shutdown_noblock above if panicstr != NULL.
578 */
579 if (error && lock_shutdown_noblock)
580 lockpanic(lkp, "lockmgr: deadlock (see previous panic)");
581
582 mutex_exit(&lkp->lk_interlock);
583 return (error);
584 }
585
586 /*
587 * Print out information about state of a lock. Used by VOP_PRINT
588 * routines to display ststus about contained locks.
589 */
590 void
591 lockmgr_printinfo(struct lock *lkp)
592 {
593
594 if (lkp->lk_sharecount)
595 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
596 lkp->lk_sharecount);
597 else if (lkp->lk_flags & LK_HAVE_EXCL) {
598 printf(" lock type %s: EXCL (count %d) by ",
599 lkp->lk_wmesg, lkp->lk_exclusivecount);
600 printf("pid %d.%d", lkp->lk_lockholder,
601 lkp->lk_locklwp);
602 } else
603 printf(" not locked");
604 if (lkp->lk_waitcount > 0)
605 printf(" with %d pending", lkp->lk_waitcount);
606 }
607
608 #if defined(LOCKDEBUG)
609 void
610 assert_sleepable(struct simplelock *interlock, const char *msg)
611 {
612
613 if (panicstr != NULL)
614 return;
615 LOCKDEBUG_BARRIER(&kernel_lock, 1);
616 if (CURCPU_IDLE_P() && !cold) {
617 panic("assert_sleepable: idle");
618 }
619 }
620 #endif
621
622 /*
623 * rump doesn't need the kernel lock so force it out. We cannot
624 * currently easily include it for compilation because of
625 * a) SPINLOCK_* b) membar_producer(). They are defined in different
626 * places / way for each arch, so just simply do not bother to
627 * fight a lot for no gain (i.e. pain but still no gain).
628 */
629 #ifndef _RUMPKERNEL
630 /*
631 * Functions for manipulating the kernel_lock. We put them here
632 * so that they show up in profiles.
633 */
634
635 #define _KERNEL_LOCK_ABORT(msg) \
636 LOCKDEBUG_ABORT(&kernel_lock, &_kernel_lock_ops, __func__, msg)
637
638 #ifdef LOCKDEBUG
639 #define _KERNEL_LOCK_ASSERT(cond) \
640 do { \
641 if (!(cond)) \
642 _KERNEL_LOCK_ABORT("assertion failed: " #cond); \
643 } while (/* CONSTCOND */ 0)
644 #else
645 #define _KERNEL_LOCK_ASSERT(cond) /* nothing */
646 #endif
647
648 void _kernel_lock_dump(volatile void *);
649
650 lockops_t _kernel_lock_ops = {
651 "Kernel lock",
652 0,
653 _kernel_lock_dump
654 };
655
656 /*
657 * Initialize the kernel lock.
658 */
659 void
660 kernel_lock_init(void)
661 {
662
663 __cpu_simple_lock_init(&kernel_lock);
664 kernel_lock_dodebug = LOCKDEBUG_ALLOC(&kernel_lock, &_kernel_lock_ops,
665 RETURN_ADDRESS);
666 }
667
668 /*
669 * Print debugging information about the kernel lock.
670 */
671 void
672 _kernel_lock_dump(volatile void *junk)
673 {
674 struct cpu_info *ci = curcpu();
675
676 (void)junk;
677
678 printf_nolog("curcpu holds : %18d wanted by: %#018lx\n",
679 ci->ci_biglock_count, (long)ci->ci_biglock_wanted);
680 }
681
682 /*
683 * Acquire 'nlocks' holds on the kernel lock. If 'l' is non-null, the
684 * acquisition is from process context.
685 */
686 void
687 _kernel_lock(int nlocks, struct lwp *l)
688 {
689 struct cpu_info *ci = curcpu();
690 LOCKSTAT_TIMER(spintime);
691 LOCKSTAT_FLAG(lsflag);
692 struct lwp *owant;
693 #ifdef LOCKDEBUG
694 u_int spins;
695 #endif
696 int s;
697
698 if (nlocks == 0)
699 return;
700 _KERNEL_LOCK_ASSERT(nlocks > 0);
701
702 l = curlwp;
703
704 if (ci->ci_biglock_count != 0) {
705 _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(&kernel_lock));
706 ci->ci_biglock_count += nlocks;
707 l->l_blcnt += nlocks;
708 return;
709 }
710
711 _KERNEL_LOCK_ASSERT(l->l_blcnt == 0);
712 LOCKDEBUG_WANTLOCK(kernel_lock_dodebug, &kernel_lock, RETURN_ADDRESS,
713 0);
714
715 s = splvm();
716 if (__cpu_simple_lock_try(&kernel_lock)) {
717 ci->ci_biglock_count = nlocks;
718 l->l_blcnt = nlocks;
719 LOCKDEBUG_LOCKED(kernel_lock_dodebug, &kernel_lock,
720 RETURN_ADDRESS, 0);
721 splx(s);
722 return;
723 }
724
725 LOCKSTAT_ENTER(lsflag);
726 LOCKSTAT_START_TIMER(lsflag, spintime);
727
728 /*
729 * Before setting ci_biglock_wanted we must post a store
730 * fence (see kern_mutex.c). This is accomplished by the
731 * __cpu_simple_lock_try() above.
732 */
733 owant = ci->ci_biglock_wanted;
734 ci->ci_biglock_wanted = curlwp; /* XXXAD */
735
736 #ifdef LOCKDEBUG
737 spins = 0;
738 #endif
739
740 do {
741 splx(s);
742 while (__SIMPLELOCK_LOCKED_P(&kernel_lock)) {
743 #ifdef LOCKDEBUG
744 if (SPINLOCK_SPINOUT(spins))
745 _KERNEL_LOCK_ABORT("spinout");
746 #endif
747 SPINLOCK_BACKOFF_HOOK;
748 SPINLOCK_SPIN_HOOK;
749 }
750 (void)splvm();
751 } while (!__cpu_simple_lock_try(&kernel_lock));
752
753 ci->ci_biglock_wanted = owant;
754 ci->ci_biglock_count = nlocks;
755 l->l_blcnt = nlocks;
756 LOCKSTAT_STOP_TIMER(lsflag, spintime);
757 LOCKDEBUG_LOCKED(kernel_lock_dodebug, &kernel_lock, RETURN_ADDRESS, 0);
758 splx(s);
759
760 /*
761 * Again, another store fence is required (see kern_mutex.c).
762 */
763 membar_producer();
764 if (owant == NULL) {
765 LOCKSTAT_EVENT(lsflag, &kernel_lock, LB_KERNEL_LOCK | LB_SPIN,
766 1, spintime);
767 }
768 LOCKSTAT_EXIT(lsflag);
769 }
770
771 /*
772 * Release 'nlocks' holds on the kernel lock. If 'nlocks' is zero, release
773 * all holds. If 'l' is non-null, the release is from process context.
774 */
775 void
776 _kernel_unlock(int nlocks, struct lwp *l, int *countp)
777 {
778 struct cpu_info *ci = curcpu();
779 u_int olocks;
780 int s;
781
782 l = curlwp;
783
784 _KERNEL_LOCK_ASSERT(nlocks < 2);
785
786 olocks = l->l_blcnt;
787
788 if (olocks == 0) {
789 _KERNEL_LOCK_ASSERT(nlocks <= 0);
790 if (countp != NULL)
791 *countp = 0;
792 return;
793 }
794
795 _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(&kernel_lock));
796
797 if (nlocks == 0)
798 nlocks = olocks;
799 else if (nlocks == -1) {
800 nlocks = 1;
801 _KERNEL_LOCK_ASSERT(olocks == 1);
802 }
803
804 _KERNEL_LOCK_ASSERT(ci->ci_biglock_count >= l->l_blcnt);
805
806 l->l_blcnt -= nlocks;
807 if (ci->ci_biglock_count == nlocks) {
808 s = splvm();
809 LOCKDEBUG_UNLOCKED(kernel_lock_dodebug, &kernel_lock,
810 RETURN_ADDRESS, 0);
811 ci->ci_biglock_count = 0;
812 __cpu_simple_unlock(&kernel_lock);
813 splx(s);
814 } else
815 ci->ci_biglock_count -= nlocks;
816
817 if (countp != NULL)
818 *countp = olocks;
819 }
820 #endif /* !_RUMPKERNEL */
821