kern_lock.c revision 1.20 1 /* $NetBSD: kern_lock.c,v 1.20 1999/07/26 23:02:53 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * This code is derived from software contributed to The NetBSD Foundation
12 * by Ross Harvey.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the NetBSD
25 * Foundation, Inc. and its contributors.
26 * 4. Neither the name of The NetBSD Foundation nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43 /*
44 * Copyright (c) 1995
45 * The Regents of the University of California. All rights reserved.
46 *
47 * This code contains ideas from software contributed to Berkeley by
48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
49 * System project at Carnegie-Mellon University.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 * 1. Redistributions of source code must retain the above copyright
55 * notice, this list of conditions and the following disclaimer.
56 * 2. Redistributions in binary form must reproduce the above copyright
57 * notice, this list of conditions and the following disclaimer in the
58 * documentation and/or other materials provided with the distribution.
59 * 3. All advertising materials mentioning features or use of this software
60 * must display the following acknowledgement:
61 * This product includes software developed by the University of
62 * California, Berkeley and its contributors.
63 * 4. Neither the name of the University nor the names of its contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
71 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
72 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
73 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
74 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
75 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
76 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77 * SUCH DAMAGE.
78 *
79 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
80 */
81
82 #include "opt_lockdebug.h"
83 #include "opt_ddb.h"
84
85 #include <sys/param.h>
86 #include <sys/proc.h>
87 #include <sys/lock.h>
88 #include <sys/systm.h>
89 #include <machine/cpu.h>
90
91 /*
92 * Locking primitives implementation.
93 * Locks provide shared/exclusive sychronization.
94 */
95
96 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
97 #define COUNT(p, x) if (p) (p)->p_locks += (x)
98 #else
99 #define COUNT(p, x)
100 #endif
101
102 /*
103 * Acquire a resource.
104 */
105 #define ACQUIRE(lkp, error, extflags, wanted) \
106 if ((extflags) & LK_SPIN) { \
107 int interlocked; \
108 \
109 for (interlocked = 1;;) { \
110 if (wanted) { \
111 if (interlocked) { \
112 simple_unlock(&(lkp)->lk_interlock); \
113 interlocked = 0; \
114 } \
115 } else if (interlocked) { \
116 break; \
117 } else { \
118 simple_lock(&(lkp)->lk_interlock); \
119 interlocked = 1; \
120 } \
121 } \
122 KASSERT((wanted) == 0); \
123 error = 0; /* sanity */ \
124 } else { \
125 for (error = 0; wanted; ) { \
126 (lkp)->lk_waitcount++; \
127 simple_unlock(&(lkp)->lk_interlock); \
128 error = tsleep((void *)lkp, (lkp)->lk_prio, \
129 (lkp)->lk_wmesg, (lkp)->lk_timo); \
130 simple_lock(&(lkp)->lk_interlock); \
131 (lkp)->lk_waitcount--; \
132 if (error) \
133 break; \
134 if ((extflags) & LK_SLEEPFAIL) { \
135 error = ENOLCK; \
136 break; \
137 } \
138 } \
139 }
140
141 #define SETHOLDER(lkp, pid, cpu_id) \
142 do { \
143 if ((lkp)->lk_flags & LK_SPIN) \
144 (lkp)->lk_cpu = cpu_id; \
145 else \
146 (lkp)->lk_lockholder = pid; \
147 } while (0)
148
149 #define WEHOLDIT(lkp, pid, cpu_id) \
150 (((lkp)->lk_flags & LK_SPIN) != 0 ? \
151 ((lkp)->lk_cpu == (cpu_id)) : ((lkp)->lk_lockholder == (pid)))
152
153 /*
154 * Initialize a lock; required before use.
155 */
156 void
157 lockinit(lkp, prio, wmesg, timo, flags)
158 struct lock *lkp;
159 int prio;
160 const char *wmesg;
161 int timo;
162 int flags;
163 {
164
165 memset(lkp, 0, sizeof(struct lock));
166 simple_lock_init(&lkp->lk_interlock);
167 lkp->lk_flags = flags & LK_EXTFLG_MASK;
168 if (flags & LK_SPIN)
169 lkp->lk_cpu = LK_NOCPU;
170 else {
171 lkp->lk_lockholder = LK_NOPROC;
172 lkp->lk_prio = prio;
173 lkp->lk_timo = timo;
174 }
175 lkp->lk_wmesg = wmesg; /* just a name for spin locks */
176 }
177
178 /*
179 * Determine the status of a lock.
180 */
181 int
182 lockstatus(lkp)
183 struct lock *lkp;
184 {
185 int lock_type = 0;
186
187 simple_lock(&lkp->lk_interlock);
188 if (lkp->lk_exclusivecount != 0)
189 lock_type = LK_EXCLUSIVE;
190 else if (lkp->lk_sharecount != 0)
191 lock_type = LK_SHARED;
192 simple_unlock(&lkp->lk_interlock);
193 return (lock_type);
194 }
195
196 /*
197 * Set, change, or release a lock.
198 *
199 * Shared requests increment the shared count. Exclusive requests set the
200 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
201 * accepted shared locks and shared-to-exclusive upgrades to go away.
202 */
203 int
204 lockmgr(lkp, flags, interlkp)
205 __volatile struct lock *lkp;
206 u_int flags;
207 struct simplelock *interlkp;
208 {
209 int error;
210 pid_t pid;
211 int extflags;
212 u_long cpu_id;
213 struct proc *p = curproc;
214
215 error = 0;
216
217 simple_lock(&lkp->lk_interlock);
218 if (flags & LK_INTERLOCK)
219 simple_unlock(interlkp);
220 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
221
222 #ifdef DIAGNOSTIC
223 /*
224 * Don't allow spins on sleep locks and don't allow sleeps
225 * on spin locks.
226 */
227 if ((flags ^ lkp->lk_flags) & LK_SPIN)
228 panic("lockmgr: sleep/spin mismatch\n");
229 #endif
230
231 if (extflags & LK_SPIN)
232 pid = LK_KERNPROC;
233 else {
234 #ifdef DIAGNOSTIC
235 if (p == NULL)
236 panic("lockmgr: no context");
237 #endif
238 pid = p->p_pid;
239 }
240 cpu_id = 0; /* XXX cpu_number() XXX */
241
242 #ifdef DIAGNOSTIC
243 /*
244 * Once a lock has drained, the LK_DRAINING flag is set and an
245 * exclusive lock is returned. The only valid operation thereafter
246 * is a single release of that exclusive lock. This final release
247 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
248 * further requests of any sort will result in a panic. The bits
249 * selected for these two flags are chosen so that they will be set
250 * in memory that is freed (freed memory is filled with 0xdeadbeef).
251 * The final release is permitted to give a new lease on life to
252 * the lock by specifying LK_REENABLE.
253 */
254 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
255 if (lkp->lk_flags & LK_DRAINED)
256 panic("lockmgr: using decommissioned lock");
257 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
258 WEHOLDIT(lkp, pid, cpu_id) == 0)
259 panic("lockmgr: non-release on draining lock: %d\n",
260 flags & LK_TYPE_MASK);
261 lkp->lk_flags &= ~LK_DRAINING;
262 if ((flags & LK_REENABLE) == 0)
263 lkp->lk_flags |= LK_DRAINED;
264 }
265 #endif /* DIAGNOSTIC */
266
267 switch (flags & LK_TYPE_MASK) {
268
269 case LK_SHARED:
270 if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
271 /*
272 * If just polling, check to see if we will block.
273 */
274 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
275 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
276 error = EBUSY;
277 break;
278 }
279 /*
280 * Wait for exclusive locks and upgrades to clear.
281 */
282 ACQUIRE(lkp, error, extflags, lkp->lk_flags &
283 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
284 if (error)
285 break;
286 lkp->lk_sharecount++;
287 COUNT(p, 1);
288 break;
289 }
290 /*
291 * We hold an exclusive lock, so downgrade it to shared.
292 * An alternative would be to fail with EDEADLK.
293 */
294 lkp->lk_sharecount++;
295 COUNT(p, 1);
296 /* fall into downgrade */
297
298 case LK_DOWNGRADE:
299 if (WEHOLDIT(lkp, pid, cpu_id) == 0 ||
300 lkp->lk_exclusivecount == 0)
301 panic("lockmgr: not holding exclusive lock");
302 lkp->lk_sharecount += lkp->lk_exclusivecount;
303 lkp->lk_exclusivecount = 0;
304 lkp->lk_recurselevel = 0;
305 lkp->lk_flags &= ~LK_HAVE_EXCL;
306 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
307 if (lkp->lk_waitcount)
308 wakeup_one((void *)lkp);
309 break;
310
311 case LK_EXCLUPGRADE:
312 /*
313 * If another process is ahead of us to get an upgrade,
314 * then we want to fail rather than have an intervening
315 * exclusive access.
316 */
317 if (lkp->lk_flags & LK_WANT_UPGRADE) {
318 lkp->lk_sharecount--;
319 COUNT(p, -1);
320 error = EBUSY;
321 break;
322 }
323 /* fall into normal upgrade */
324
325 case LK_UPGRADE:
326 /*
327 * Upgrade a shared lock to an exclusive one. If another
328 * shared lock has already requested an upgrade to an
329 * exclusive lock, our shared lock is released and an
330 * exclusive lock is requested (which will be granted
331 * after the upgrade). If we return an error, the file
332 * will always be unlocked.
333 */
334 if (WEHOLDIT(lkp, pid, cpu_id) || lkp->lk_sharecount <= 0)
335 panic("lockmgr: upgrade exclusive lock");
336 lkp->lk_sharecount--;
337 COUNT(p, -1);
338 /*
339 * If we are just polling, check to see if we will block.
340 */
341 if ((extflags & LK_NOWAIT) &&
342 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
343 lkp->lk_sharecount > 1)) {
344 error = EBUSY;
345 break;
346 }
347 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
348 /*
349 * We are first shared lock to request an upgrade, so
350 * request upgrade and wait for the shared count to
351 * drop to zero, then take exclusive lock.
352 */
353 lkp->lk_flags |= LK_WANT_UPGRADE;
354 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
355 lkp->lk_flags &= ~LK_WANT_UPGRADE;
356 if (error)
357 break;
358 lkp->lk_flags |= LK_HAVE_EXCL;
359 SETHOLDER(lkp, pid, cpu_id);
360 if (lkp->lk_exclusivecount != 0)
361 panic("lockmgr: non-zero exclusive count");
362 lkp->lk_exclusivecount = 1;
363 if (extflags & LK_SETRECURSE)
364 lkp->lk_recurselevel = 1;
365 COUNT(p, 1);
366 break;
367 }
368 /*
369 * Someone else has requested upgrade. Release our shared
370 * lock, awaken upgrade requestor if we are the last shared
371 * lock, then request an exclusive lock.
372 */
373 if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
374 wakeup_one((void *)lkp);
375 /* fall into exclusive request */
376
377 case LK_EXCLUSIVE:
378 if (WEHOLDIT(lkp, pid, cpu_id)) {
379 /*
380 * Recursive lock.
381 */
382 if ((extflags & LK_CANRECURSE) == 0 &&
383 lkp->lk_recurselevel == 0) {
384 if (extflags & LK_RECURSEFAIL) {
385 error = EDEADLK;
386 break;
387 } else
388 panic("lockmgr: locking against myself");
389 }
390 lkp->lk_exclusivecount++;
391 if (extflags & LK_SETRECURSE &&
392 lkp->lk_recurselevel == 0)
393 lkp->lk_recurselevel = lkp->lk_exclusivecount;
394 COUNT(p, 1);
395 break;
396 }
397 /*
398 * If we are just polling, check to see if we will sleep.
399 */
400 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
401 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
402 lkp->lk_sharecount != 0)) {
403 error = EBUSY;
404 break;
405 }
406 /*
407 * Try to acquire the want_exclusive flag.
408 */
409 ACQUIRE(lkp, error, extflags, lkp->lk_flags &
410 (LK_HAVE_EXCL | LK_WANT_EXCL));
411 if (error)
412 break;
413 lkp->lk_flags |= LK_WANT_EXCL;
414 /*
415 * Wait for shared locks and upgrades to finish.
416 */
417 ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
418 (lkp->lk_flags & LK_WANT_UPGRADE));
419 lkp->lk_flags &= ~LK_WANT_EXCL;
420 if (error)
421 break;
422 lkp->lk_flags |= LK_HAVE_EXCL;
423 SETHOLDER(lkp, pid, cpu_id);
424 if (lkp->lk_exclusivecount != 0)
425 panic("lockmgr: non-zero exclusive count");
426 lkp->lk_exclusivecount = 1;
427 if (extflags & LK_SETRECURSE)
428 lkp->lk_recurselevel = 1;
429 COUNT(p, 1);
430 break;
431
432 case LK_RELEASE:
433 if (lkp->lk_exclusivecount != 0) {
434 if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
435 if (lkp->lk_flags & LK_SPIN) {
436 panic("lockmgr: processor %lu, not "
437 "exclusive lock holder %lu "
438 "unlocking", cpu_id, lkp->lk_cpu);
439 } else {
440 panic("lockmgr: pid %d, not "
441 "exclusive lock holder %d "
442 "unlocking", pid,
443 lkp->lk_lockholder);
444 }
445 }
446 if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
447 lkp->lk_recurselevel = 0;
448 lkp->lk_exclusivecount--;
449 COUNT(p, -1);
450 if (lkp->lk_exclusivecount == 0) {
451 lkp->lk_flags &= ~LK_HAVE_EXCL;
452 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
453 }
454 } else if (lkp->lk_sharecount != 0) {
455 lkp->lk_sharecount--;
456 COUNT(p, -1);
457 }
458 if (lkp->lk_waitcount)
459 wakeup_one((void *)lkp);
460 break;
461
462 case LK_DRAIN:
463 /*
464 * Check that we do not already hold the lock, as it can
465 * never drain if we do. Unfortunately, we have no way to
466 * check for holding a shared lock, but at least we can
467 * check for an exclusive one.
468 */
469 if (WEHOLDIT(lkp, pid, cpu_id))
470 panic("lockmgr: draining against myself");
471 /*
472 * If we are just polling, check to see if we will sleep.
473 */
474 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
475 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
476 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
477 error = EBUSY;
478 break;
479 }
480 if (lkp->lk_flags & LK_SPIN) {
481 ACQUIRE(lkp, error, extflags,
482 ((lkp->lk_flags &
483 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
484 lkp->lk_sharecount != 0 ||
485 lkp->lk_waitcount != 0));
486 } else {
487 /*
488 * This is just a special cause of the sleep case
489 * in ACQUIRE(). We set WANTDRAIN instead of
490 * incrementing waitcount.
491 */
492 for (error = 0; ((lkp->lk_flags &
493 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
494 lkp->lk_sharecount != 0 ||
495 lkp->lk_waitcount != 0); ) {
496 lkp->lk_flags |= LK_WAITDRAIN;
497 simple_unlock(&lkp->lk_interlock);
498 if ((error = tsleep((void *)&lkp->lk_flags,
499 lkp->lk_prio, lkp->lk_wmesg, lkp->lk_timo)))
500 return (error);
501 if ((extflags) & LK_SLEEPFAIL)
502 return (ENOLCK);
503 simple_lock(&lkp->lk_interlock);
504 }
505 }
506 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
507 SETHOLDER(lkp, pid, cpu_id);
508 lkp->lk_exclusivecount = 1;
509 /* XXX unlikely that we'd want this */
510 if (extflags & LK_SETRECURSE)
511 lkp->lk_recurselevel = 1;
512 COUNT(p, 1);
513 break;
514
515 default:
516 simple_unlock(&lkp->lk_interlock);
517 panic("lockmgr: unknown locktype request %d",
518 flags & LK_TYPE_MASK);
519 /* NOTREACHED */
520 }
521 if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
522 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
523 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
524 lkp->lk_flags &= ~LK_WAITDRAIN;
525 wakeup_one((void *)&lkp->lk_flags);
526 }
527 simple_unlock(&lkp->lk_interlock);
528 return (error);
529 }
530
531 /*
532 * Print out information about state of a lock. Used by VOP_PRINT
533 * routines to display ststus about contained locks.
534 */
535 void
536 lockmgr_printinfo(lkp)
537 __volatile struct lock *lkp;
538 {
539
540 if (lkp->lk_sharecount)
541 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
542 lkp->lk_sharecount);
543 else if (lkp->lk_flags & LK_HAVE_EXCL) {
544 printf(" lock type %s: EXCL (count %d) by ",
545 lkp->lk_wmesg, lkp->lk_exclusivecount);
546 if (lkp->lk_flags & LK_SPIN)
547 printf("processor %lu", lkp->lk_cpu);
548 else
549 printf("pid %d", lkp->lk_lockholder);
550 } else
551 printf(" not locked");
552 if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
553 printf(" with %d pending", lkp->lk_waitcount);
554 }
555
556 #if defined(LOCKDEBUG) && !defined(MULTIPROCESSOR)
557 LIST_HEAD(slocklist, simplelock) slockdebuglist;
558 int simple_lock_debugger = 0;
559
560 /*
561 * Simple lock functions so that the debugger can see from whence
562 * they are being called.
563 */
564 void
565 simple_lock_init(alp)
566 struct simplelock *alp;
567 {
568 alp->lock_data = 0;
569 alp->lock_file = NULL;
570 alp->lock_line = 0;
571 alp->unlock_file = NULL;
572 alp->unlock_line = 0;
573 alp->lock_holder = 0;
574 }
575
576 void
577 _simple_lock(alp, id, l)
578 __volatile struct simplelock *alp;
579 const char *id;
580 int l;
581 {
582 int s;
583
584 s = splhigh();
585 if (alp->lock_data == 1) {
586 printf("simple_lock: lock held\n");
587 printf("currently at: %s:%d\n", id, l);
588 printf("last locked: %s:%d\n",
589 alp->lock_file, alp->lock_line);
590 printf("last unlocked: %s:%d\n",
591 alp->unlock_file, alp->unlock_line);
592 if (simple_lock_debugger) {
593 Debugger();
594 }
595 splx(s);
596 return;
597 }
598 LIST_INSERT_HEAD(&slockdebuglist, (struct simplelock *)alp, list);
599 alp->lock_data = 1;
600 alp->lock_file = id;
601 alp->lock_line = l;
602 if (curproc)
603 curproc->p_simple_locks++;
604 splx(s);
605 }
606
607 int
608 _simple_lock_try(alp, id, l)
609 __volatile struct simplelock *alp;
610 const char *id;
611 int l;
612 {
613 int s;
614
615 s = splhigh();
616 if (alp->lock_data != 0) {
617 printf("simple_lock_try: lock held\n");
618 printf("currently at: %s:%d\n", id, l);
619 printf("last locked: %s:%d\n",
620 alp->lock_file, alp->lock_line);
621 printf("last unlocked: %s:%d\n",
622 alp->unlock_file, alp->unlock_line);
623 if (simple_lock_debugger) {
624 Debugger();
625 }
626 splx(s);
627 return (0);
628 }
629
630 alp->lock_data = 1;
631 alp->lock_file = id;
632 alp->lock_line = l;
633 LIST_INSERT_HEAD(&slockdebuglist, (struct simplelock *)alp, list);
634 if (curproc)
635 curproc->p_simple_locks++;
636 splx(s);
637
638 return (1);
639 }
640
641 void
642 _simple_unlock(alp, id, l)
643 __volatile struct simplelock *alp;
644 const char *id;
645 int l;
646 {
647 int s;
648
649 s = splhigh();
650 if (alp->lock_data == 0) {
651 printf("simple_unlock: lock not held\n");
652 printf("currently at: %s:%d\n", id, l);
653 printf("last locked: %s:%d\n",
654 alp->lock_file, alp->lock_line);
655 printf("last unlocked: %s:%d\n",
656 alp->unlock_file, alp->unlock_line);
657 if (simple_lock_debugger) {
658 Debugger();
659 }
660 splx(s);
661 return;
662 }
663
664 LIST_REMOVE(alp, list);
665 alp->list.le_next = NULL;
666 alp->list.le_prev = NULL;
667 alp->lock_data = 0;
668 alp->unlock_file = id;
669 alp->unlock_line = l;
670 if (curproc)
671 curproc->p_simple_locks--;
672 splx(s);
673 }
674
675 void
676 simple_lock_dump()
677 {
678 struct simplelock *alp;
679 int s;
680
681 s = splhigh();
682 printf("all simple locks:\n");
683 for (alp = LIST_FIRST(&slockdebuglist);
684 alp != NULL;
685 alp = LIST_NEXT(alp, list)) {
686 printf("%p %s:%d\n", alp, alp->lock_file, alp->lock_line);
687 }
688 splx(s);
689 }
690
691 void
692 simple_lock_freecheck(start, end)
693 void *start, *end;
694 {
695 struct simplelock *alp;
696 int s;
697
698 s = splhigh();
699 for (alp = LIST_FIRST(&slockdebuglist);
700 alp != NULL;
701 alp = LIST_NEXT(alp, list)) {
702 if ((void *)alp >= start && (void *)alp < end) {
703 printf("freeing simple_lock %p %s:%d\n",
704 alp, alp->lock_file, alp->lock_line);
705 #ifdef DDB
706 Debugger();
707 #endif
708 }
709 }
710 splx(s);
711 }
712 #endif /* LOCKDEBUG && ! MULTIPROCESSOR */
713