vfs_lockf.c revision 1.17 1 /* $NetBSD: vfs_lockf.c,v 1.17 2000/07/22 15:26:13 jdolecek Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Scooter Morris at Genentech Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94
39 */
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/file.h>
45 #include <sys/proc.h>
46 #include <sys/vnode.h>
47 #include <sys/malloc.h>
48 #include <sys/fcntl.h>
49 #include <sys/lockf.h>
50
51 /*
52 * This variable controls the maximum number of processes that will
53 * be checked in doing deadlock detection.
54 */
55 int maxlockdepth = MAXDEPTH;
56
57 #ifdef LOCKF_DEBUG
58 int lockf_debug = 0;
59 #endif
60
61 #define NOLOCKF (struct lockf *)0
62 #define SELF 0x1
63 #define OTHERS 0x2
64
65 /*
66 * XXX TODO
67 * Misc cleanups: "caddr_t id" should be visible in the API as a
68 * "struct proc *".
69 * (This requires rototilling all VFS's which support advisory locking).
70 *
71 * Use pools for lock allocation.
72 */
73
74 /*
75 * XXXSMP TODO: Using either (a) a global lock, or (b) the vnode's
76 * interlock should be sufficient; (b) requires a change to the API
77 * because the vnode isn't visible here.
78 *
79 * If there's a lot of lock contention on a single vnode, locking
80 * schemes which allow for more paralleism would be needed. Given how
81 * infrequently byte-range locks are actually used in typical BSD
82 * code, a more complex approach probably isn't worth it.
83 */
84
85 /*
86 * Do an advisory lock operation.
87 */
88 int
89 lf_advlock(ap, head, size)
90 struct vop_advlock_args *ap;
91 struct lockf **head;
92 off_t size;
93 {
94 struct flock *fl = ap->a_fl;
95 struct lockf *lock;
96 off_t start, end;
97 int error;
98
99 /*
100 * Convert the flock structure into a start and end.
101 */
102 switch (fl->l_whence) {
103 case SEEK_SET:
104 case SEEK_CUR:
105 /*
106 * Caller is responsible for adding any necessary offset
107 * when SEEK_CUR is used.
108 */
109 start = fl->l_start;
110 break;
111
112 case SEEK_END:
113 start = size + fl->l_start;
114 break;
115
116 default:
117 return (EINVAL);
118 }
119 if (start < 0)
120 return (EINVAL);
121
122 /*
123 * Avoid the common case of unlocking when inode has no locks.
124 */
125 if (*head == (struct lockf *)0) {
126 if (ap->a_op != F_SETLK) {
127 fl->l_type = F_UNLCK;
128 return (0);
129 }
130 }
131
132 if (fl->l_len == 0)
133 end = -1;
134 else
135 end = start + fl->l_len - 1;
136 /*
137 * Create the lockf structure.
138 */
139 MALLOC(lock, struct lockf *, sizeof(*lock), M_LOCKF, M_WAITOK);
140 lock->lf_start = start;
141 lock->lf_end = end;
142 lock->lf_id = ap->a_id;
143 lock->lf_head = head;
144 lock->lf_type = fl->l_type;
145 lock->lf_next = (struct lockf *)0;
146 TAILQ_INIT(&lock->lf_blkhd);
147 lock->lf_flags = ap->a_flags;
148 /*
149 * Do the requested operation.
150 */
151 switch (ap->a_op) {
152
153 case F_SETLK:
154 return (lf_setlock(lock));
155
156 case F_UNLCK:
157 error = lf_clearlock(lock);
158 FREE(lock, M_LOCKF);
159 return (error);
160
161 case F_GETLK:
162 error = lf_getlock(lock, fl);
163 FREE(lock, M_LOCKF);
164 return (error);
165
166 default:
167 FREE(lock, M_LOCKF);
168 return (EINVAL);
169 }
170 /* NOTREACHED */
171 }
172
173 /*
174 * Set a byte-range lock.
175 */
176 int
177 lf_setlock(lock)
178 struct lockf *lock;
179 {
180 struct lockf *block;
181 struct lockf **head = lock->lf_head;
182 struct lockf **prev, *overlap, *ltmp;
183 static char lockstr[] = "lockf";
184 int ovcase, priority, needtolink, error;
185
186 #ifdef LOCKF_DEBUG
187 if (lockf_debug & 1)
188 lf_print("lf_setlock", lock);
189 #endif /* LOCKF_DEBUG */
190
191 /*
192 * Set the priority
193 */
194 priority = PLOCK;
195 if (lock->lf_type == F_WRLCK)
196 priority += 4;
197 priority |= PCATCH;
198 /*
199 * Scan lock list for this file looking for locks that would block us.
200 */
201 while ((block = lf_getblock(lock)) != NULL) {
202 /*
203 * Free the structure and return if nonblocking.
204 */
205 if ((lock->lf_flags & F_WAIT) == 0) {
206 FREE(lock, M_LOCKF);
207 return (EAGAIN);
208 }
209 /*
210 * We are blocked. Since flock style locks cover
211 * the whole file, there is no chance for deadlock.
212 * For byte-range locks we must check for deadlock.
213 *
214 * Deadlock detection is done by looking through the
215 * wait channels to see if there are any cycles that
216 * involve us. MAXDEPTH is set just to make sure we
217 * do not go off into neverneverland.
218 */
219 if ((lock->lf_flags & F_POSIX) &&
220 (block->lf_flags & F_POSIX)) {
221 struct proc *wproc;
222 struct lockf *waitblock;
223 int i = 0;
224
225 /* The block is waiting on something */
226 wproc = (struct proc *)block->lf_id;
227 while (wproc->p_wchan &&
228 (wproc->p_wmesg == lockstr) &&
229 (i++ < maxlockdepth)) {
230 waitblock = (struct lockf *)wproc->p_wchan;
231 /* Get the owner of the blocking lock */
232 waitblock = waitblock->lf_next;
233 if ((waitblock->lf_flags & F_POSIX) == 0)
234 break;
235 wproc = (struct proc *)waitblock->lf_id;
236 if (wproc == (struct proc *)lock->lf_id) {
237 free(lock, M_LOCKF);
238 return (EDEADLK);
239 }
240 }
241 /*
242 * If we're still following a dependancy chain
243 * after maxlockdepth iterations, assume we're in
244 * a cycle to be safe.
245 */
246 if (i >= maxlockdepth) {
247 free(lock, M_LOCKF);
248 return (EDEADLK);
249 }
250 }
251 /*
252 * For flock type locks, we must first remove
253 * any shared locks that we hold before we sleep
254 * waiting for an exclusive lock.
255 */
256 if ((lock->lf_flags & F_FLOCK) &&
257 lock->lf_type == F_WRLCK) {
258 lock->lf_type = F_UNLCK;
259 (void) lf_clearlock(lock);
260 lock->lf_type = F_WRLCK;
261 }
262 /*
263 * Add our lock to the blocked list and sleep until we're free.
264 * Remember who blocked us (for deadlock detection).
265 */
266 lock->lf_next = block;
267 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
268 #ifdef LOCKF_DEBUG
269 if (lockf_debug & 1) {
270 lf_print("lf_setlock: blocking on", block);
271 lf_printlist("lf_setlock", block);
272 }
273 #endif /* LOCKF_DEBUG */
274 error = tsleep((caddr_t)lock, priority, lockstr, 0);
275
276 /*
277 * We may have been awakened by a signal (in
278 * which case we must remove ourselves from the
279 * blocked list) and/or by another process
280 * releasing a lock (in which case we have already
281 * been removed from the blocked list and our
282 * lf_next field set to NOLOCKF).
283 */
284 if (lock->lf_next != NOLOCKF) {
285 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
286 lock->lf_next = NOLOCKF;
287 }
288 if (error) {
289 free(lock, M_LOCKF);
290 return (error);
291 }
292 }
293 /*
294 * No blocks!! Add the lock. Note that we will
295 * downgrade or upgrade any overlapping locks this
296 * process already owns.
297 *
298 * Skip over locks owned by other processes.
299 * Handle any locks that overlap and are owned by ourselves.
300 */
301 prev = head;
302 block = *head;
303 needtolink = 1;
304 for (;;) {
305 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
306 if (ovcase)
307 block = overlap->lf_next;
308 /*
309 * Six cases:
310 * 0) no overlap
311 * 1) overlap == lock
312 * 2) overlap contains lock
313 * 3) lock contains overlap
314 * 4) overlap starts before lock
315 * 5) overlap ends after lock
316 */
317 switch (ovcase) {
318 case 0: /* no overlap */
319 if (needtolink) {
320 *prev = lock;
321 lock->lf_next = overlap;
322 }
323 break;
324
325 case 1: /* overlap == lock */
326 /*
327 * If downgrading lock, others may be
328 * able to acquire it.
329 */
330 if (lock->lf_type == F_RDLCK &&
331 overlap->lf_type == F_WRLCK)
332 lf_wakelock(overlap);
333 overlap->lf_type = lock->lf_type;
334 FREE(lock, M_LOCKF);
335 lock = overlap; /* for debug output below */
336 break;
337
338 case 2: /* overlap contains lock */
339 /*
340 * Check for common starting point and different types.
341 */
342 if (overlap->lf_type == lock->lf_type) {
343 free(lock, M_LOCKF);
344 lock = overlap; /* for debug output below */
345 break;
346 }
347 if (overlap->lf_start == lock->lf_start) {
348 *prev = lock;
349 lock->lf_next = overlap;
350 overlap->lf_start = lock->lf_end + 1;
351 } else
352 lf_split(overlap, lock);
353 lf_wakelock(overlap);
354 break;
355
356 case 3: /* lock contains overlap */
357 /*
358 * If downgrading lock, others may be able to
359 * acquire it, otherwise take the list.
360 */
361 if (lock->lf_type == F_RDLCK &&
362 overlap->lf_type == F_WRLCK) {
363 lf_wakelock(overlap);
364 } else {
365 while ((ltmp = overlap->lf_blkhd.tqh_first)) {
366 KASSERT(ltmp->lf_next == overlap);
367 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
368 lf_block);
369 ltmp->lf_next = lock;
370 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
371 ltmp, lf_block);
372 }
373 }
374 /*
375 * Add the new lock if necessary and delete the overlap.
376 */
377 if (needtolink) {
378 *prev = lock;
379 lock->lf_next = overlap->lf_next;
380 prev = &lock->lf_next;
381 needtolink = 0;
382 } else
383 *prev = overlap->lf_next;
384 free(overlap, M_LOCKF);
385 continue;
386
387 case 4: /* overlap starts before lock */
388 /*
389 * Add lock after overlap on the list.
390 */
391 lock->lf_next = overlap->lf_next;
392 overlap->lf_next = lock;
393 overlap->lf_end = lock->lf_start - 1;
394 prev = &lock->lf_next;
395 lf_wakelock(overlap);
396 needtolink = 0;
397 continue;
398
399 case 5: /* overlap ends after lock */
400 /*
401 * Add the new lock before overlap.
402 */
403 if (needtolink) {
404 *prev = lock;
405 lock->lf_next = overlap;
406 }
407 overlap->lf_start = lock->lf_end + 1;
408 lf_wakelock(overlap);
409 break;
410 }
411 break;
412 }
413 #ifdef LOCKF_DEBUG
414 if (lockf_debug & 1) {
415 lf_print("lf_setlock: got the lock", lock);
416 lf_printlist("lf_setlock", lock);
417 }
418 #endif /* LOCKF_DEBUG */
419 return (0);
420 }
421
422 /*
423 * Remove a byte-range lock on an inode.
424 *
425 * Generally, find the lock (or an overlap to that lock)
426 * and remove it (or shrink it), then wakeup anyone we can.
427 */
428 int
429 lf_clearlock(unlock)
430 struct lockf *unlock;
431 {
432 struct lockf **head = unlock->lf_head;
433 struct lockf *lf = *head;
434 struct lockf *overlap, **prev;
435 int ovcase;
436
437 if (lf == NOLOCKF)
438 return (0);
439 #ifdef LOCKF_DEBUG
440 if (unlock->lf_type != F_UNLCK)
441 panic("lf_clearlock: bad type");
442 if (lockf_debug & 1)
443 lf_print("lf_clearlock", unlock);
444 #endif /* LOCKF_DEBUG */
445 prev = head;
446 while ((ovcase = lf_findoverlap(lf, unlock, SELF,
447 &prev, &overlap)) != 0) {
448 /*
449 * Wakeup the list of locks to be retried.
450 */
451 lf_wakelock(overlap);
452
453 switch (ovcase) {
454
455 case 1: /* overlap == lock */
456 *prev = overlap->lf_next;
457 FREE(overlap, M_LOCKF);
458 break;
459
460 case 2: /* overlap contains lock: split it */
461 if (overlap->lf_start == unlock->lf_start) {
462 overlap->lf_start = unlock->lf_end + 1;
463 break;
464 }
465 lf_split(overlap, unlock);
466 overlap->lf_next = unlock->lf_next;
467 break;
468
469 case 3: /* lock contains overlap */
470 *prev = overlap->lf_next;
471 lf = overlap->lf_next;
472 free(overlap, M_LOCKF);
473 continue;
474
475 case 4: /* overlap starts before lock */
476 overlap->lf_end = unlock->lf_start - 1;
477 prev = &overlap->lf_next;
478 lf = overlap->lf_next;
479 continue;
480
481 case 5: /* overlap ends after lock */
482 overlap->lf_start = unlock->lf_end + 1;
483 break;
484 }
485 break;
486 }
487 #ifdef LOCKF_DEBUG
488 if (lockf_debug & 1)
489 lf_printlist("lf_clearlock", unlock);
490 #endif /* LOCKF_DEBUG */
491 return (0);
492 }
493
494 /*
495 * Check whether there is a blocking lock,
496 * and if so return its process identifier.
497 */
498 int
499 lf_getlock(lock, fl)
500 struct lockf *lock;
501 struct flock *fl;
502 {
503 struct lockf *block;
504
505 #ifdef LOCKF_DEBUG
506 if (lockf_debug & 1)
507 lf_print("lf_getlock", lock);
508 #endif /* LOCKF_DEBUG */
509
510 if ((block = lf_getblock(lock)) != NULL) {
511 fl->l_type = block->lf_type;
512 fl->l_whence = SEEK_SET;
513 fl->l_start = block->lf_start;
514 if (block->lf_end == -1)
515 fl->l_len = 0;
516 else
517 fl->l_len = block->lf_end - block->lf_start + 1;
518 if (block->lf_flags & F_POSIX)
519 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
520 else
521 fl->l_pid = -1;
522 } else {
523 fl->l_type = F_UNLCK;
524 }
525 return (0);
526 }
527
528 /*
529 * Walk the list of locks for an inode and
530 * return the first blocking lock.
531 */
532 struct lockf *
533 lf_getblock(lock)
534 struct lockf *lock;
535 {
536 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
537 int ovcase;
538
539 prev = lock->lf_head;
540 while ((ovcase = lf_findoverlap(lf, lock, OTHERS,
541 &prev, &overlap)) != 0) {
542 /*
543 * We've found an overlap, see if it blocks us
544 */
545 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
546 return (overlap);
547 /*
548 * Nope, point to the next one on the list and
549 * see if it blocks us
550 */
551 lf = overlap->lf_next;
552 }
553 return (NOLOCKF);
554 }
555
556 /*
557 * Walk the list of locks for an inode to
558 * find an overlapping lock (if any).
559 *
560 * NOTE: this returns only the FIRST overlapping lock. There
561 * may be more than one.
562 */
563 int
564 lf_findoverlap(lf, lock, type, prev, overlap)
565 struct lockf *lf;
566 struct lockf *lock;
567 int type;
568 struct lockf ***prev;
569 struct lockf **overlap;
570 {
571 off_t start, end;
572
573 *overlap = lf;
574 if (lf == NOLOCKF)
575 return (0);
576 #ifdef LOCKF_DEBUG
577 if (lockf_debug & 2)
578 lf_print("lf_findoverlap: looking for overlap in", lock);
579 #endif /* LOCKF_DEBUG */
580 start = lock->lf_start;
581 end = lock->lf_end;
582 while (lf != NOLOCKF) {
583 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
584 ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
585 *prev = &lf->lf_next;
586 *overlap = lf = lf->lf_next;
587 continue;
588 }
589 #ifdef LOCKF_DEBUG
590 if (lockf_debug & 2)
591 lf_print("\tchecking", lf);
592 #endif /* LOCKF_DEBUG */
593 /*
594 * OK, check for overlap
595 *
596 * Six cases:
597 * 0) no overlap
598 * 1) overlap == lock
599 * 2) overlap contains lock
600 * 3) lock contains overlap
601 * 4) overlap starts before lock
602 * 5) overlap ends after lock
603 */
604 if ((lf->lf_end != -1 && start > lf->lf_end) ||
605 (end != -1 && lf->lf_start > end)) {
606 /* Case 0 */
607 #ifdef LOCKF_DEBUG
608 if (lockf_debug & 2)
609 printf("no overlap\n");
610 #endif /* LOCKF_DEBUG */
611 if ((type & SELF) && end != -1 && lf->lf_start > end)
612 return (0);
613 *prev = &lf->lf_next;
614 *overlap = lf = lf->lf_next;
615 continue;
616 }
617 if ((lf->lf_start == start) && (lf->lf_end == end)) {
618 /* Case 1 */
619 #ifdef LOCKF_DEBUG
620 if (lockf_debug & 2)
621 printf("overlap == lock\n");
622 #endif /* LOCKF_DEBUG */
623 return (1);
624 }
625 if ((lf->lf_start <= start) &&
626 (end != -1) &&
627 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
628 /* Case 2 */
629 #ifdef LOCKF_DEBUG
630 if (lockf_debug & 2)
631 printf("overlap contains lock\n");
632 #endif /* LOCKF_DEBUG */
633 return (2);
634 }
635 if (start <= lf->lf_start &&
636 (end == -1 ||
637 (lf->lf_end != -1 && end >= lf->lf_end))) {
638 /* Case 3 */
639 #ifdef LOCKF_DEBUG
640 if (lockf_debug & 2)
641 printf("lock contains overlap\n");
642 #endif /* LOCKF_DEBUG */
643 return (3);
644 }
645 if ((lf->lf_start < start) &&
646 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
647 /* Case 4 */
648 #ifdef LOCKF_DEBUG
649 if (lockf_debug & 2)
650 printf("overlap starts before lock\n");
651 #endif /* LOCKF_DEBUG */
652 return (4);
653 }
654 if ((lf->lf_start > start) &&
655 (end != -1) &&
656 ((lf->lf_end > end) || (lf->lf_end == -1))) {
657 /* Case 5 */
658 #ifdef LOCKF_DEBUG
659 if (lockf_debug & 2)
660 printf("overlap ends after lock\n");
661 #endif /* LOCKF_DEBUG */
662 return (5);
663 }
664 panic("lf_findoverlap: default");
665 }
666 return (0);
667 }
668
669 /*
670 * Split a lock and a contained region into
671 * two or three locks as necessary.
672 */
673 void
674 lf_split(lock1, lock2)
675 struct lockf *lock1;
676 struct lockf *lock2;
677 {
678 struct lockf *splitlock;
679
680 #ifdef LOCKF_DEBUG
681 if (lockf_debug & 2) {
682 lf_print("lf_split", lock1);
683 lf_print("splitting from", lock2);
684 }
685 #endif /* LOCKF_DEBUG */
686 /*
687 * Check to see if spliting into only two pieces.
688 */
689 if (lock1->lf_start == lock2->lf_start) {
690 lock1->lf_start = lock2->lf_end + 1;
691 lock2->lf_next = lock1;
692 return;
693 }
694 if (lock1->lf_end == lock2->lf_end) {
695 lock1->lf_end = lock2->lf_start - 1;
696 lock2->lf_next = lock1->lf_next;
697 lock1->lf_next = lock2;
698 return;
699 }
700 /*
701 * Make a new lock consisting of the last part of
702 * the encompassing lock
703 */
704 MALLOC(splitlock, struct lockf *, sizeof(*splitlock), M_LOCKF, M_WAITOK);
705 memcpy((caddr_t)splitlock, (caddr_t)lock1, sizeof(*splitlock));
706 splitlock->lf_start = lock2->lf_end + 1;
707 TAILQ_INIT(&splitlock->lf_blkhd);
708 lock1->lf_end = lock2->lf_start - 1;
709 /*
710 * OK, now link it in
711 */
712 splitlock->lf_next = lock1->lf_next;
713 lock2->lf_next = splitlock;
714 lock1->lf_next = lock2;
715 }
716
717 /*
718 * Wakeup a blocklist
719 */
720 void
721 lf_wakelock(listhead)
722 struct lockf *listhead;
723 {
724 struct lockf *wakelock;
725
726 while ((wakelock = listhead->lf_blkhd.tqh_first)) {
727 KASSERT(wakelock->lf_next == listhead);
728 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
729 wakelock->lf_next = NOLOCKF;
730 #ifdef LOCKF_DEBUG
731 if (lockf_debug & 2)
732 lf_print("lf_wakelock: awakening", wakelock);
733 #endif
734 wakeup((caddr_t)wakelock);
735 }
736 }
737
738 #ifdef LOCKF_DEBUG
739 /*
740 * Print out a lock.
741 */
742 void
743 lf_print(tag, lock)
744 char *tag;
745 struct lockf *lock;
746 {
747
748 printf("%s: lock %p for ", tag, lock);
749 if (lock->lf_flags & F_POSIX)
750 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
751 else
752 printf("id 0x%p", lock->lf_id);
753 printf(" %s, start %qx, end %qx",
754 lock->lf_type == F_RDLCK ? "shared" :
755 lock->lf_type == F_WRLCK ? "exclusive" :
756 lock->lf_type == F_UNLCK ? "unlock" :
757 "unknown", lock->lf_start, lock->lf_end);
758 if (lock->lf_blkhd.tqh_first)
759 printf(" block %p\n", lock->lf_blkhd.tqh_first);
760 else
761 printf("\n");
762 }
763
764 void
765 lf_printlist(tag, lock)
766 char *tag;
767 struct lockf *lock;
768 {
769 struct lockf *lf, *blk;
770
771 printf("%s: Lock list:\n", tag);
772 for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
773 printf("\tlock %p for ", lf);
774 if (lf->lf_flags & F_POSIX)
775 printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid);
776 else
777 printf("id 0x%p", lf->lf_id);
778 printf(", %s, start %qx, end %qx",
779 lf->lf_type == F_RDLCK ? "shared" :
780 lf->lf_type == F_WRLCK ? "exclusive" :
781 lf->lf_type == F_UNLCK ? "unlock" :
782 "unknown", lf->lf_start, lf->lf_end);
783 for (blk = lf->lf_blkhd.tqh_first; blk;
784 blk = blk->lf_block.tqe_next) {
785 if (blk->lf_flags & F_POSIX)
786 printf("proc %d",
787 ((struct proc *)(blk->lf_id))->p_pid);
788 else
789 printf("id 0x%p", blk->lf_id);
790 printf(", %s, start %qx, end %qx",
791 blk->lf_type == F_RDLCK ? "shared" :
792 blk->lf_type == F_WRLCK ? "exclusive" :
793 blk->lf_type == F_UNLCK ? "unlock" :
794 "unknown", blk->lf_start, blk->lf_end);
795 if (blk->lf_blkhd.tqh_first)
796 panic("lf_printlist: bad list");
797 }
798 printf("\n");
799 }
800 }
801 #endif /* LOCKF_DEBUG */
802