kern_lock.c revision 1.24 1 /* $NetBSD: kern_lock.c,v 1.24 1999/08/10 21:10:20 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * This code is derived from software contributed to The NetBSD Foundation
12 * by Ross Harvey.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the NetBSD
25 * Foundation, Inc. and its contributors.
26 * 4. Neither the name of The NetBSD Foundation nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43 /*
44 * Copyright (c) 1995
45 * The Regents of the University of California. All rights reserved.
46 *
47 * This code contains ideas from software contributed to Berkeley by
48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
49 * System project at Carnegie-Mellon University.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 * 1. Redistributions of source code must retain the above copyright
55 * notice, this list of conditions and the following disclaimer.
56 * 2. Redistributions in binary form must reproduce the above copyright
57 * notice, this list of conditions and the following disclaimer in the
58 * documentation and/or other materials provided with the distribution.
59 * 3. All advertising materials mentioning features or use of this software
60 * must display the following acknowledgement:
61 * This product includes software developed by the University of
62 * California, Berkeley and its contributors.
63 * 4. Neither the name of the University nor the names of its contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
71 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
72 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
73 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
74 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
75 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
76 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77 * SUCH DAMAGE.
78 *
79 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
80 */
81
82 #include "opt_multiprocessor.h"
83 #include "opt_lockdebug.h"
84 #include "opt_ddb.h"
85
86 #include <sys/param.h>
87 #include <sys/proc.h>
88 #include <sys/lock.h>
89 #include <sys/systm.h>
90 #include <machine/cpu.h>
91
92 /*
93 * Locking primitives implementation.
94 * Locks provide shared/exclusive sychronization.
95 */
96
97 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
98 #if defined(MULTIPROCESSOR) /* { */
99 #define COUNT_CPU(cpu_id, x) \
100 /* atomic_add_ulong(&curcpu().ci_spin_locks, (x)) */
101 #else
102 u_long spin_locks;
103 #define COUNT_CPU(cpu_id, x) spin_locks += (x)
104 #endif /* MULTIPROCESSOR */ /* } */
105
106 #define COUNT(lkp, p, cpu_id, x) \
107 do { \
108 if ((lkp)->lk_flags & LK_SPIN) \
109 COUNT_CPU((cpu_id), (x)); \
110 else \
111 (p)->p_locks += (x); \
112 } while (0)
113 #else
114 #define COUNT(lkp, p, cpu_id, x)
115 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
116
117 /*
118 * Acquire a resource.
119 */
120 #define ACQUIRE(lkp, error, extflags, drain, wanted) \
121 if ((extflags) & LK_SPIN) { \
122 int interlocked; \
123 \
124 if ((drain) == 0) \
125 (lkp)->lk_waitcount++; \
126 for (interlocked = 1;;) { \
127 if (wanted) { \
128 if (interlocked) { \
129 simple_unlock(&(lkp)->lk_interlock); \
130 interlocked = 0; \
131 } \
132 } else if (interlocked) { \
133 break; \
134 } else { \
135 simple_lock(&(lkp)->lk_interlock); \
136 interlocked = 1; \
137 } \
138 } \
139 if ((drain) == 0) \
140 (lkp)->lk_waitcount--; \
141 KASSERT((wanted) == 0); \
142 error = 0; /* sanity */ \
143 } else { \
144 for (error = 0; wanted; ) { \
145 if ((drain)) \
146 (lkp)->lk_flags |= LK_WAITDRAIN; \
147 else \
148 (lkp)->lk_waitcount++; \
149 simple_unlock(&(lkp)->lk_interlock); \
150 /* XXX Cast away volatile. */ \
151 error = tsleep((drain) ? &(lkp)->lk_flags : \
152 (void *)(lkp), (lkp)->lk_prio, \
153 (lkp)->lk_wmesg, (lkp)->lk_timo); \
154 simple_lock(&(lkp)->lk_interlock); \
155 if ((drain) == 0) \
156 (lkp)->lk_waitcount--; \
157 if (error) \
158 break; \
159 if ((extflags) & LK_SLEEPFAIL) { \
160 error = ENOLCK; \
161 break; \
162 } \
163 } \
164 }
165
166 #define SETHOLDER(lkp, pid, cpu_id) \
167 do { \
168 if ((lkp)->lk_flags & LK_SPIN) \
169 (lkp)->lk_cpu = cpu_id; \
170 else \
171 (lkp)->lk_lockholder = pid; \
172 } while (0)
173
174 #define WEHOLDIT(lkp, pid, cpu_id) \
175 (((lkp)->lk_flags & LK_SPIN) != 0 ? \
176 ((lkp)->lk_cpu == (cpu_id)) : ((lkp)->lk_lockholder == (pid)))
177
178 #define WAKEUP_WAITER(lkp) \
179 do { \
180 if (((lkp)->lk_flags & LK_SPIN) == 0 && (lkp)->lk_waitcount) { \
181 /* XXX Cast away volatile. */ \
182 wakeup_one((void *)(lkp)); \
183 } \
184 } while (0)
185
186 #if defined(LOCKDEBUG) /* { */
187 #if defined(MULTIPROCESSOR) /* { */
188 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
189
190 #define SPINLOCK_LIST_LOCK() cpu_simple_lock(&spinlock_list_slock)
191
192 #define SPINLOCK_LIST_UNLOCK() cpu_simple_unlock(&spinlock_list_slock)
193 #else
194 #define SPINLOCK_LIST_LOCK() /* nothing */
195
196 #define SPINLOCK_LIST_UNLOCK() /* nothing */
197 #endif /* MULTIPROCESSOR */ /* } */
198
199 TAILQ_HEAD(, lock) spinlock_list =
200 TAILQ_HEAD_INITIALIZER(spinlock_list);
201
202 #define HAVEIT(lkp) \
203 do { \
204 if ((lkp)->lk_flags & LK_SPIN) { \
205 int s = splhigh(); \
206 SPINLOCK_LIST_LOCK(); \
207 /* XXX Cast away volatile. */ \
208 TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \
209 lk_list); \
210 SPINLOCK_LIST_UNLOCK(); \
211 splx(s); \
212 } \
213 } while (0)
214
215 #define DONTHAVEIT(lkp) \
216 do { \
217 if ((lkp)->lk_flags & LK_SPIN) { \
218 int s = splhigh(); \
219 SPINLOCK_LIST_LOCK(); \
220 /* XXX Cast away volatile. */ \
221 TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp), \
222 lk_list); \
223 SPINLOCK_LIST_UNLOCK(); \
224 splx(s); \
225 } \
226 } while (0)
227 #else
228 #define HAVEIT(lkp) /* nothing */
229
230 #define DONTHAVEIT(lkp) /* nothing */
231 #endif /* LOCKDEBUG */ /* } */
232
233 /*
234 * Initialize a lock; required before use.
235 */
236 void
237 lockinit(lkp, prio, wmesg, timo, flags)
238 struct lock *lkp;
239 int prio;
240 const char *wmesg;
241 int timo;
242 int flags;
243 {
244
245 memset(lkp, 0, sizeof(struct lock));
246 simple_lock_init(&lkp->lk_interlock);
247 lkp->lk_flags = flags & LK_EXTFLG_MASK;
248 if (flags & LK_SPIN)
249 lkp->lk_cpu = LK_NOCPU;
250 else {
251 lkp->lk_lockholder = LK_NOPROC;
252 lkp->lk_prio = prio;
253 lkp->lk_timo = timo;
254 }
255 lkp->lk_wmesg = wmesg; /* just a name for spin locks */
256 }
257
258 /*
259 * Determine the status of a lock.
260 */
261 int
262 lockstatus(lkp)
263 struct lock *lkp;
264 {
265 int lock_type = 0;
266
267 simple_lock(&lkp->lk_interlock);
268 if (lkp->lk_exclusivecount != 0)
269 lock_type = LK_EXCLUSIVE;
270 else if (lkp->lk_sharecount != 0)
271 lock_type = LK_SHARED;
272 simple_unlock(&lkp->lk_interlock);
273 return (lock_type);
274 }
275
276 /*
277 * Set, change, or release a lock.
278 *
279 * Shared requests increment the shared count. Exclusive requests set the
280 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
281 * accepted shared locks and shared-to-exclusive upgrades to go away.
282 */
283 int
284 lockmgr(lkp, flags, interlkp)
285 __volatile struct lock *lkp;
286 u_int flags;
287 struct simplelock *interlkp;
288 {
289 int error;
290 pid_t pid;
291 int extflags;
292 cpuid_t cpu_id;
293 struct proc *p = curproc;
294
295 error = 0;
296
297 simple_lock(&lkp->lk_interlock);
298 if (flags & LK_INTERLOCK)
299 simple_unlock(interlkp);
300 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
301
302 #ifdef DIAGNOSTIC /* { */
303 /*
304 * Don't allow spins on sleep locks and don't allow sleeps
305 * on spin locks.
306 */
307 if ((flags ^ lkp->lk_flags) & LK_SPIN)
308 panic("lockmgr: sleep/spin mismatch\n");
309 #endif /* } */
310
311 if (extflags & LK_SPIN)
312 pid = LK_KERNPROC;
313 else {
314 #ifdef DIAGNOSTIC /* { */
315 if (p == NULL)
316 panic("lockmgr: no context");
317 #endif /* } */
318 pid = p->p_pid;
319 }
320 cpu_id = cpu_number();
321
322 #ifdef DIAGNOSTIC /* { */
323 /*
324 * Once a lock has drained, the LK_DRAINING flag is set and an
325 * exclusive lock is returned. The only valid operation thereafter
326 * is a single release of that exclusive lock. This final release
327 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
328 * further requests of any sort will result in a panic. The bits
329 * selected for these two flags are chosen so that they will be set
330 * in memory that is freed (freed memory is filled with 0xdeadbeef).
331 * The final release is permitted to give a new lease on life to
332 * the lock by specifying LK_REENABLE.
333 */
334 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
335 if (lkp->lk_flags & LK_DRAINED)
336 panic("lockmgr: using decommissioned lock");
337 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
338 WEHOLDIT(lkp, pid, cpu_id) == 0)
339 panic("lockmgr: non-release on draining lock: %d\n",
340 flags & LK_TYPE_MASK);
341 lkp->lk_flags &= ~LK_DRAINING;
342 if ((flags & LK_REENABLE) == 0)
343 lkp->lk_flags |= LK_DRAINED;
344 }
345 #endif /* DIAGNOSTIC */ /* } */
346
347 switch (flags & LK_TYPE_MASK) {
348
349 case LK_SHARED:
350 if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
351 /*
352 * If just polling, check to see if we will block.
353 */
354 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
355 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
356 error = EBUSY;
357 break;
358 }
359 /*
360 * Wait for exclusive locks and upgrades to clear.
361 */
362 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
363 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
364 if (error)
365 break;
366 lkp->lk_sharecount++;
367 COUNT(lkp, p, cpu_id, 1);
368 break;
369 }
370 /*
371 * We hold an exclusive lock, so downgrade it to shared.
372 * An alternative would be to fail with EDEADLK.
373 */
374 lkp->lk_sharecount++;
375 COUNT(lkp, p, cpu_id, 1);
376 /* fall into downgrade */
377
378 case LK_DOWNGRADE:
379 if (WEHOLDIT(lkp, pid, cpu_id) == 0 ||
380 lkp->lk_exclusivecount == 0)
381 panic("lockmgr: not holding exclusive lock");
382 lkp->lk_sharecount += lkp->lk_exclusivecount;
383 lkp->lk_exclusivecount = 0;
384 lkp->lk_recurselevel = 0;
385 lkp->lk_flags &= ~LK_HAVE_EXCL;
386 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
387 DONTHAVEIT(lkp);
388 WAKEUP_WAITER(lkp);
389 break;
390
391 case LK_EXCLUPGRADE:
392 /*
393 * If another process is ahead of us to get an upgrade,
394 * then we want to fail rather than have an intervening
395 * exclusive access.
396 */
397 if (lkp->lk_flags & LK_WANT_UPGRADE) {
398 lkp->lk_sharecount--;
399 COUNT(lkp, p, cpu_id, -1);
400 error = EBUSY;
401 break;
402 }
403 /* fall into normal upgrade */
404
405 case LK_UPGRADE:
406 /*
407 * Upgrade a shared lock to an exclusive one. If another
408 * shared lock has already requested an upgrade to an
409 * exclusive lock, our shared lock is released and an
410 * exclusive lock is requested (which will be granted
411 * after the upgrade). If we return an error, the file
412 * will always be unlocked.
413 */
414 if (WEHOLDIT(lkp, pid, cpu_id) || lkp->lk_sharecount <= 0)
415 panic("lockmgr: upgrade exclusive lock");
416 lkp->lk_sharecount--;
417 COUNT(lkp, p, cpu_id, -1);
418 /*
419 * If we are just polling, check to see if we will block.
420 */
421 if ((extflags & LK_NOWAIT) &&
422 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
423 lkp->lk_sharecount > 1)) {
424 error = EBUSY;
425 break;
426 }
427 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
428 /*
429 * We are first shared lock to request an upgrade, so
430 * request upgrade and wait for the shared count to
431 * drop to zero, then take exclusive lock.
432 */
433 lkp->lk_flags |= LK_WANT_UPGRADE;
434 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount);
435 lkp->lk_flags &= ~LK_WANT_UPGRADE;
436 if (error)
437 break;
438 lkp->lk_flags |= LK_HAVE_EXCL;
439 SETHOLDER(lkp, pid, cpu_id);
440 HAVEIT(lkp);
441 if (lkp->lk_exclusivecount != 0)
442 panic("lockmgr: non-zero exclusive count");
443 lkp->lk_exclusivecount = 1;
444 if (extflags & LK_SETRECURSE)
445 lkp->lk_recurselevel = 1;
446 COUNT(lkp, p, cpu_id, 1);
447 break;
448 }
449 /*
450 * Someone else has requested upgrade. Release our shared
451 * lock, awaken upgrade requestor if we are the last shared
452 * lock, then request an exclusive lock.
453 */
454 if (lkp->lk_sharecount == 0)
455 WAKEUP_WAITER(lkp);
456 /* fall into exclusive request */
457
458 case LK_EXCLUSIVE:
459 if (WEHOLDIT(lkp, pid, cpu_id)) {
460 /*
461 * Recursive lock.
462 */
463 if ((extflags & LK_CANRECURSE) == 0 &&
464 lkp->lk_recurselevel == 0) {
465 if (extflags & LK_RECURSEFAIL) {
466 error = EDEADLK;
467 break;
468 } else
469 panic("lockmgr: locking against myself");
470 }
471 lkp->lk_exclusivecount++;
472 if (extflags & LK_SETRECURSE &&
473 lkp->lk_recurselevel == 0)
474 lkp->lk_recurselevel = lkp->lk_exclusivecount;
475 COUNT(lkp, p, cpu_id, 1);
476 break;
477 }
478 /*
479 * If we are just polling, check to see if we will sleep.
480 */
481 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
482 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
483 lkp->lk_sharecount != 0)) {
484 error = EBUSY;
485 break;
486 }
487 /*
488 * Try to acquire the want_exclusive flag.
489 */
490 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
491 (LK_HAVE_EXCL | LK_WANT_EXCL));
492 if (error)
493 break;
494 lkp->lk_flags |= LK_WANT_EXCL;
495 /*
496 * Wait for shared locks and upgrades to finish.
497 */
498 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0 ||
499 (lkp->lk_flags & LK_WANT_UPGRADE));
500 lkp->lk_flags &= ~LK_WANT_EXCL;
501 if (error)
502 break;
503 lkp->lk_flags |= LK_HAVE_EXCL;
504 SETHOLDER(lkp, pid, cpu_id);
505 HAVEIT(lkp);
506 if (lkp->lk_exclusivecount != 0)
507 panic("lockmgr: non-zero exclusive count");
508 lkp->lk_exclusivecount = 1;
509 if (extflags & LK_SETRECURSE)
510 lkp->lk_recurselevel = 1;
511 COUNT(lkp, p, cpu_id, 1);
512 break;
513
514 case LK_RELEASE:
515 if (lkp->lk_exclusivecount != 0) {
516 if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
517 if (lkp->lk_flags & LK_SPIN) {
518 panic("lockmgr: processor %lu, not "
519 "exclusive lock holder %lu "
520 "unlocking", cpu_id, lkp->lk_cpu);
521 } else {
522 panic("lockmgr: pid %d, not "
523 "exclusive lock holder %d "
524 "unlocking", pid,
525 lkp->lk_lockholder);
526 }
527 }
528 if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
529 lkp->lk_recurselevel = 0;
530 lkp->lk_exclusivecount--;
531 COUNT(lkp, p, cpu_id, -1);
532 if (lkp->lk_exclusivecount == 0) {
533 lkp->lk_flags &= ~LK_HAVE_EXCL;
534 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
535 DONTHAVEIT(lkp);
536 }
537 } else if (lkp->lk_sharecount != 0) {
538 lkp->lk_sharecount--;
539 COUNT(lkp, p, cpu_id, -1);
540 }
541 WAKEUP_WAITER(lkp);
542 break;
543
544 case LK_DRAIN:
545 /*
546 * Check that we do not already hold the lock, as it can
547 * never drain if we do. Unfortunately, we have no way to
548 * check for holding a shared lock, but at least we can
549 * check for an exclusive one.
550 */
551 if (WEHOLDIT(lkp, pid, cpu_id))
552 panic("lockmgr: draining against myself");
553 /*
554 * If we are just polling, check to see if we will sleep.
555 */
556 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
557 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
558 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
559 error = EBUSY;
560 break;
561 }
562 ACQUIRE(lkp, error, extflags, 1,
563 ((lkp->lk_flags &
564 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
565 lkp->lk_sharecount != 0 ||
566 lkp->lk_waitcount != 0));
567 if (error)
568 break;
569 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
570 SETHOLDER(lkp, pid, cpu_id);
571 HAVEIT(lkp);
572 lkp->lk_exclusivecount = 1;
573 /* XXX unlikely that we'd want this */
574 if (extflags & LK_SETRECURSE)
575 lkp->lk_recurselevel = 1;
576 COUNT(lkp, p, cpu_id, 1);
577 break;
578
579 default:
580 simple_unlock(&lkp->lk_interlock);
581 panic("lockmgr: unknown locktype request %d",
582 flags & LK_TYPE_MASK);
583 /* NOTREACHED */
584 }
585 if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
586 ((lkp->lk_flags &
587 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
588 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
589 lkp->lk_flags &= ~LK_WAITDRAIN;
590 wakeup_one((void *)&lkp->lk_flags);
591 }
592 simple_unlock(&lkp->lk_interlock);
593 return (error);
594 }
595
596 /*
597 * Print out information about state of a lock. Used by VOP_PRINT
598 * routines to display ststus about contained locks.
599 */
600 void
601 lockmgr_printinfo(lkp)
602 __volatile struct lock *lkp;
603 {
604
605 if (lkp->lk_sharecount)
606 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
607 lkp->lk_sharecount);
608 else if (lkp->lk_flags & LK_HAVE_EXCL) {
609 printf(" lock type %s: EXCL (count %d) by ",
610 lkp->lk_wmesg, lkp->lk_exclusivecount);
611 if (lkp->lk_flags & LK_SPIN)
612 printf("processor %lu", lkp->lk_cpu);
613 else
614 printf("pid %d", lkp->lk_lockholder);
615 } else
616 printf(" not locked");
617 if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
618 printf(" with %d pending", lkp->lk_waitcount);
619 }
620
621 #if defined(LOCKDEBUG) /* { */
622 TAILQ_HEAD(, simplelock) simplelock_list =
623 TAILQ_HEAD_INITIALIZER(simplelock_list);
624
625 #if defined(MULTIPROCESSOR) /* { */
626 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
627
628 #define SLOCK_LIST_LOCK() \
629 cpu_simple_lock(&simplelock_list_slock)
630
631 #define SLOCK_LIST_UNLOCK() \
632 cpu_simple_unlock(&simplelock_list_slock)
633
634 #define SLOCK_COUNT(x) \
635 /* atomic_add_ulong(&curcpu()->ci_simple_locks, (x)) */
636 #else
637 u_long simple_locks;
638
639 #define SLOCK_LIST_LOCK() /* nothing */
640
641 #define SLOCK_LIST_UNLOCK() /* nothing */
642
643 #define SLOCK_COUNT(x) simple_locks += (x)
644 #endif /* MULTIPROCESSOR */ /* } */
645
646 #ifdef DDB /* { */
647 int simple_lock_debugger = 0;
648 #define SLOCK_DEBUGGER() if (simple_lock_debugger) Debugger()
649 #else
650 #define SLOCK_DEBUGGER() /* nothing */
651 #endif /* } */
652
653 #define SLOCK_WHERE(str, alp, id, l) \
654 do { \
655 printf(str); \
656 printf("currently at: %s:%d\n", (id), (l)); \
657 if ((alp)->lock_file != NULL) \
658 printf("last locked: %s:%d\n", (alp)->lock_file, \
659 (alp)->lock_line); \
660 if ((alp)->unlock_file != NULL) \
661 printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
662 (alp)->unlock_line); \
663 SLOCK_DEBUGGER(); \
664 } while (0)
665
666 /*
667 * Simple lock functions so that the debugger can see from whence
668 * they are being called.
669 */
670 void
671 simple_lock_init(alp)
672 struct simplelock *alp;
673 {
674
675 #if defined(MULTIPROCESSOR) /* { */
676 cpu_simple_lock_init(alp);
677 #else
678 alp->lock_data = SIMPLELOCK_UNLOCKED;
679 #endif /* } */
680 alp->lock_file = NULL;
681 alp->lock_line = 0;
682 alp->unlock_file = NULL;
683 alp->unlock_line = 0;
684 alp->lock_holder = 0;
685 }
686
687 void
688 _simple_lock(alp, id, l)
689 __volatile struct simplelock *alp;
690 const char *id;
691 int l;
692 {
693 cpuid_t cpu_id = cpu_number();
694 int s;
695
696 s = splhigh();
697
698 /*
699 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
700 * don't take any action, and just fall into the normal spin case.
701 */
702 if (alp->lock_data == SIMPLELOCK_LOCKED) {
703 #if defined(MULTIPROCESSOR) /* { */
704 if (alp->lock_holder == cpu_id) {
705 SLOCK_WHERE("simple_lock: locking against myself\n",
706 alp, id, l);
707 goto out;
708 }
709 #else
710 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
711 goto out;
712 #endif /* MULTIPROCESSOR */ /* } */
713 }
714
715 #if defined(MULTIPROCESSOR) /* { */
716 /* Acquire the lock before modifying any fields. */
717 cpu_simple_lock(alp);
718 #else
719 alp->lock_data = SIMPLELOCK_LOCKED;
720 #endif /* } */
721
722 alp->lock_file = id;
723 alp->lock_line = l;
724 alp->lock_holder = cpu_id;
725
726 SLOCK_LIST_LOCK();
727 /* XXX Cast away volatile */
728 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
729 SLOCK_LIST_UNLOCK();
730
731 SLOCK_COUNT(1);
732
733 out:
734 splx(s);
735 }
736
737 int
738 _simple_lock_try(alp, id, l)
739 __volatile struct simplelock *alp;
740 const char *id;
741 int l;
742 {
743 cpuid_t cpu_id = cpu_number();
744 int s, rv = 0;
745
746 s = splhigh();
747
748 /*
749 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
750 * don't take any action.
751 */
752 #if defined(MULTIPROCESSOR) /* { */
753 if ((rv = cpu_simple_lock_try(alp)) == 0) {
754 if (alp->lock_holder == cpu_id)
755 SLOCK_WHERE("simple_lock_try: locking against myself\n",
756 alp, id l);
757 goto out;
758 }
759 #else
760 if (alp->lock_data == SIMPLELOCK_LOCKED) {
761 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
762 goto out;
763 }
764 alp->lock_data = SIMPLELOCK_LOCKED;
765 #endif /* MULTIPROCESSOR */ /* } */
766
767 /*
768 * At this point, we have acquired the lock.
769 */
770
771 rv = 1;
772
773 alp->lock_file = id;
774 alp->lock_line = l;
775 alp->lock_holder = cpu_id;
776
777 SLOCK_LIST_LOCK();
778 /* XXX Cast away volatile. */
779 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
780 SLOCK_LIST_UNLOCK();
781
782 SLOCK_COUNT(1);
783
784 out:
785 splx(s);
786 return (rv);
787 }
788
789 void
790 _simple_unlock(alp, id, l)
791 __volatile struct simplelock *alp;
792 const char *id;
793 int l;
794 {
795 int s;
796
797 s = splhigh();
798
799 /*
800 * MULTIPROCESSOR case: This is `safe' because we think we hold
801 * the lock, and if we don't, we don't take any action.
802 */
803 if (alp->lock_data == SIMPLELOCK_UNLOCKED) {
804 SLOCK_WHERE("simple_unlock: lock not held\n",
805 alp, id, l);
806 goto out;
807 }
808
809 SLOCK_LIST_LOCK();
810 TAILQ_REMOVE(&simplelock_list, alp, list);
811 SLOCK_LIST_UNLOCK();
812
813 SLOCK_COUNT(-1);
814
815 alp->list.tqe_next = NULL; /* sanity */
816 alp->list.tqe_prev = NULL; /* sanity */
817
818 alp->unlock_file = id;
819 alp->unlock_line = l;
820
821 #if defined(MULTIPROCESSOR) /* { */
822 /* Now that we've modified all fields, release the lock. */
823 cpu_simple_unlock(alp);
824 #else
825 alp->lock_data = SIMPLELOCK_UNLOCKED;
826 #endif /* } */
827
828 out:
829 splx(s);
830 }
831
832 void
833 simple_lock_dump()
834 {
835 struct simplelock *alp;
836 int s;
837
838 s = splhigh();
839 SLOCK_LIST_LOCK();
840 printf("all simple locks:\n");
841 for (alp = TAILQ_FIRST(&simplelock_list); alp != NULL;
842 alp = TAILQ_NEXT(alp, list)) {
843 printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
844 alp->lock_file, alp->lock_line);
845 }
846 SLOCK_LIST_UNLOCK();
847 splx(s);
848 }
849
850 void
851 simple_lock_freecheck(start, end)
852 void *start, *end;
853 {
854 struct simplelock *alp;
855 int s;
856
857 s = splhigh();
858 SLOCK_LIST_LOCK();
859 for (alp = TAILQ_FIRST(&simplelock_list); alp != NULL;
860 alp = TAILQ_NEXT(alp, list)) {
861 if ((void *)alp >= start && (void *)alp < end) {
862 printf("freeing simple_lock %p CPU %lu %s:%d\n",
863 alp, alp->lock_holder, alp->lock_file,
864 alp->lock_line);
865 SLOCK_DEBUGGER();
866 }
867 }
868 SLOCK_LIST_UNLOCK();
869 splx(s);
870 }
871 #endif /* LOCKDEBUG */ /* } */
872