vfs_lockf.c revision 1.14 1 /* $NetBSD: vfs_lockf.c,v 1.14 1998/08/04 04:03:19 perry Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Scooter Morris at Genentech Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94
39 */
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/file.h>
45 #include <sys/proc.h>
46 #include <sys/vnode.h>
47 #include <sys/malloc.h>
48 #include <sys/fcntl.h>
49 #include <sys/lockf.h>
50
51 /*
52 * This variable controls the maximum number of processes that will
53 * be checked in doing deadlock detection.
54 */
55 int maxlockdepth = MAXDEPTH;
56
57 #ifdef LOCKF_DEBUG
58 int lockf_debug = 0;
59 #endif
60
61 #define NOLOCKF (struct lockf *)0
62 #define SELF 0x1
63 #define OTHERS 0x2
64
65 /*
66 * Do an advisory lock operation.
67 */
68 int
69 lf_advlock(head, size, id, op, fl, flags)
70 struct lockf **head;
71 off_t size;
72 caddr_t id;
73 int op;
74 register struct flock *fl;
75 int flags;
76 {
77 register struct lockf *lock;
78 off_t start, end;
79 int error;
80
81 /*
82 * Convert the flock structure into a start and end.
83 */
84 switch (fl->l_whence) {
85 case SEEK_SET:
86 case SEEK_CUR:
87 /*
88 * Caller is responsible for adding any necessary offset
89 * when SEEK_CUR is used.
90 */
91 start = fl->l_start;
92 break;
93
94 case SEEK_END:
95 start = size + fl->l_start;
96 break;
97
98 default:
99 return (EINVAL);
100 }
101 if (start < 0)
102 return (EINVAL);
103
104 /*
105 * Avoid the common case of unlocking when inode has no locks.
106 */
107 if (*head == (struct lockf *)0) {
108 if (op != F_SETLK) {
109 fl->l_type = F_UNLCK;
110 return (0);
111 }
112 }
113
114 if (fl->l_len == 0)
115 end = -1;
116 else
117 end = start + fl->l_len - 1;
118 /*
119 * Create the lockf structure.
120 */
121 MALLOC(lock, struct lockf *, sizeof(*lock), M_LOCKF, M_WAITOK);
122 lock->lf_start = start;
123 lock->lf_end = end;
124 lock->lf_id = id;
125 lock->lf_head = head;
126 lock->lf_type = fl->l_type;
127 lock->lf_next = (struct lockf *)0;
128 TAILQ_INIT(&lock->lf_blkhd);
129 lock->lf_flags = flags;
130 /*
131 * Do the requested operation.
132 */
133 switch (op) {
134
135 case F_SETLK:
136 return (lf_setlock(lock));
137
138 case F_UNLCK:
139 error = lf_clearlock(lock);
140 FREE(lock, M_LOCKF);
141 return (error);
142
143 case F_GETLK:
144 error = lf_getlock(lock, fl);
145 FREE(lock, M_LOCKF);
146 return (error);
147
148 default:
149 FREE(lock, M_LOCKF);
150 return (EINVAL);
151 }
152 /* NOTREACHED */
153 }
154
155 /*
156 * Set a byte-range lock.
157 */
158 int
159 lf_setlock(lock)
160 register struct lockf *lock;
161 {
162 register struct lockf *block;
163 struct lockf **head = lock->lf_head;
164 struct lockf **prev, *overlap, *ltmp;
165 static char lockstr[] = "lockf";
166 int ovcase, priority, needtolink, error;
167
168 #ifdef LOCKF_DEBUG
169 if (lockf_debug & 1)
170 lf_print("lf_setlock", lock);
171 #endif /* LOCKF_DEBUG */
172
173 /*
174 * Set the priority
175 */
176 priority = PLOCK;
177 if (lock->lf_type == F_WRLCK)
178 priority += 4;
179 priority |= PCATCH;
180 /*
181 * Scan lock list for this file looking for locks that would block us.
182 */
183 while ((block = lf_getblock(lock)) != NULL) {
184 /*
185 * Free the structure and return if nonblocking.
186 */
187 if ((lock->lf_flags & F_WAIT) == 0) {
188 FREE(lock, M_LOCKF);
189 return (EAGAIN);
190 }
191 /*
192 * We are blocked. Since flock style locks cover
193 * the whole file, there is no chance for deadlock.
194 * For byte-range locks we must check for deadlock.
195 *
196 * Deadlock detection is done by looking through the
197 * wait channels to see if there are any cycles that
198 * involve us. MAXDEPTH is set just to make sure we
199 * do not go off into neverland.
200 */
201 if ((lock->lf_flags & F_POSIX) &&
202 (block->lf_flags & F_POSIX)) {
203 register struct proc *wproc;
204 register struct lockf *waitblock;
205 int i = 0;
206
207 /* The block is waiting on something */
208 wproc = (struct proc *)block->lf_id;
209 while (wproc->p_wchan &&
210 (wproc->p_wmesg == lockstr) &&
211 (i++ < maxlockdepth)) {
212 waitblock = (struct lockf *)wproc->p_wchan;
213 /* Get the owner of the blocking lock */
214 waitblock = waitblock->lf_next;
215 if ((waitblock->lf_flags & F_POSIX) == 0)
216 break;
217 wproc = (struct proc *)waitblock->lf_id;
218 if (wproc == (struct proc *)lock->lf_id) {
219 free(lock, M_LOCKF);
220 return (EDEADLK);
221 }
222 }
223 }
224 /*
225 * For flock type locks, we must first remove
226 * any shared locks that we hold before we sleep
227 * waiting for an exclusive lock.
228 */
229 if ((lock->lf_flags & F_FLOCK) &&
230 lock->lf_type == F_WRLCK) {
231 lock->lf_type = F_UNLCK;
232 (void) lf_clearlock(lock);
233 lock->lf_type = F_WRLCK;
234 }
235 /*
236 * Add our lock to the blocked list and sleep until we're free.
237 * Remember who blocked us (for deadlock detection).
238 */
239 lock->lf_next = block;
240 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
241 #ifdef LOCKF_DEBUG
242 if (lockf_debug & 1) {
243 lf_print("lf_setlock: blocking on", block);
244 lf_printlist("lf_setlock", block);
245 }
246 #endif /* LOCKF_DEBUG */
247 error = tsleep((caddr_t)lock, priority, lockstr, 0);
248 if (error) {
249 /*
250 * We may have been awakened by a signal (in
251 * which case we must remove ourselves from the
252 * blocked list) and/or by another process
253 * releasing a lock (in which case we have already
254 * been removed from the blocked list and our
255 * lf_next field set to NOLOCKF).
256 */
257 if (lock->lf_next)
258 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock,
259 lf_block);
260 free(lock, M_LOCKF);
261 return (error);
262 }
263 }
264 /*
265 * No blocks!! Add the lock. Note that we will
266 * downgrade or upgrade any overlapping locks this
267 * process already owns.
268 *
269 * Skip over locks owned by other processes.
270 * Handle any locks that overlap and are owned by ourselves.
271 */
272 prev = head;
273 block = *head;
274 needtolink = 1;
275 for (;;) {
276 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
277 if (ovcase)
278 block = overlap->lf_next;
279 /*
280 * Six cases:
281 * 0) no overlap
282 * 1) overlap == lock
283 * 2) overlap contains lock
284 * 3) lock contains overlap
285 * 4) overlap starts before lock
286 * 5) overlap ends after lock
287 */
288 switch (ovcase) {
289 case 0: /* no overlap */
290 if (needtolink) {
291 *prev = lock;
292 lock->lf_next = overlap;
293 }
294 break;
295
296 case 1: /* overlap == lock */
297 /*
298 * If downgrading lock, others may be
299 * able to acquire it.
300 */
301 if (lock->lf_type == F_RDLCK &&
302 overlap->lf_type == F_WRLCK)
303 lf_wakelock(overlap);
304 overlap->lf_type = lock->lf_type;
305 FREE(lock, M_LOCKF);
306 lock = overlap; /* for debug output below */
307 break;
308
309 case 2: /* overlap contains lock */
310 /*
311 * Check for common starting point and different types.
312 */
313 if (overlap->lf_type == lock->lf_type) {
314 free(lock, M_LOCKF);
315 lock = overlap; /* for debug output below */
316 break;
317 }
318 if (overlap->lf_start == lock->lf_start) {
319 *prev = lock;
320 lock->lf_next = overlap;
321 overlap->lf_start = lock->lf_end + 1;
322 } else
323 lf_split(overlap, lock);
324 lf_wakelock(overlap);
325 break;
326
327 case 3: /* lock contains overlap */
328 /*
329 * If downgrading lock, others may be able to
330 * acquire it, otherwise take the list.
331 */
332 if (lock->lf_type == F_RDLCK &&
333 overlap->lf_type == F_WRLCK) {
334 lf_wakelock(overlap);
335 } else {
336 while ((ltmp = overlap->lf_blkhd.tqh_first)) {
337 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
338 lf_block);
339 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
340 ltmp, lf_block);
341 }
342 }
343 /*
344 * Add the new lock if necessary and delete the overlap.
345 */
346 if (needtolink) {
347 *prev = lock;
348 lock->lf_next = overlap->lf_next;
349 prev = &lock->lf_next;
350 needtolink = 0;
351 } else
352 *prev = overlap->lf_next;
353 free(overlap, M_LOCKF);
354 continue;
355
356 case 4: /* overlap starts before lock */
357 /*
358 * Add lock after overlap on the list.
359 */
360 lock->lf_next = overlap->lf_next;
361 overlap->lf_next = lock;
362 overlap->lf_end = lock->lf_start - 1;
363 prev = &lock->lf_next;
364 lf_wakelock(overlap);
365 needtolink = 0;
366 continue;
367
368 case 5: /* overlap ends after lock */
369 /*
370 * Add the new lock before overlap.
371 */
372 if (needtolink) {
373 *prev = lock;
374 lock->lf_next = overlap;
375 }
376 overlap->lf_start = lock->lf_end + 1;
377 lf_wakelock(overlap);
378 break;
379 }
380 break;
381 }
382 #ifdef LOCKF_DEBUG
383 if (lockf_debug & 1) {
384 lf_print("lf_setlock: got the lock", lock);
385 lf_printlist("lf_setlock", lock);
386 }
387 #endif /* LOCKF_DEBUG */
388 return (0);
389 }
390
391 /*
392 * Remove a byte-range lock on an inode.
393 *
394 * Generally, find the lock (or an overlap to that lock)
395 * and remove it (or shrink it), then wakeup anyone we can.
396 */
397 int
398 lf_clearlock(unlock)
399 register struct lockf *unlock;
400 {
401 struct lockf **head = unlock->lf_head;
402 register struct lockf *lf = *head;
403 struct lockf *overlap, **prev;
404 int ovcase;
405
406 if (lf == NOLOCKF)
407 return (0);
408 #ifdef LOCKF_DEBUG
409 if (unlock->lf_type != F_UNLCK)
410 panic("lf_clearlock: bad type");
411 if (lockf_debug & 1)
412 lf_print("lf_clearlock", unlock);
413 #endif /* LOCKF_DEBUG */
414 prev = head;
415 while ((ovcase = lf_findoverlap(lf, unlock, SELF,
416 &prev, &overlap)) != 0) {
417 /*
418 * Wakeup the list of locks to be retried.
419 */
420 lf_wakelock(overlap);
421
422 switch (ovcase) {
423
424 case 1: /* overlap == lock */
425 *prev = overlap->lf_next;
426 FREE(overlap, M_LOCKF);
427 break;
428
429 case 2: /* overlap contains lock: split it */
430 if (overlap->lf_start == unlock->lf_start) {
431 overlap->lf_start = unlock->lf_end + 1;
432 break;
433 }
434 lf_split(overlap, unlock);
435 overlap->lf_next = unlock->lf_next;
436 break;
437
438 case 3: /* lock contains overlap */
439 *prev = overlap->lf_next;
440 lf = overlap->lf_next;
441 free(overlap, M_LOCKF);
442 continue;
443
444 case 4: /* overlap starts before lock */
445 overlap->lf_end = unlock->lf_start - 1;
446 prev = &overlap->lf_next;
447 lf = overlap->lf_next;
448 continue;
449
450 case 5: /* overlap ends after lock */
451 overlap->lf_start = unlock->lf_end + 1;
452 break;
453 }
454 break;
455 }
456 #ifdef LOCKF_DEBUG
457 if (lockf_debug & 1)
458 lf_printlist("lf_clearlock", unlock);
459 #endif /* LOCKF_DEBUG */
460 return (0);
461 }
462
463 /*
464 * Check whether there is a blocking lock,
465 * and if so return its process identifier.
466 */
467 int
468 lf_getlock(lock, fl)
469 register struct lockf *lock;
470 register struct flock *fl;
471 {
472 register struct lockf *block;
473
474 #ifdef LOCKF_DEBUG
475 if (lockf_debug & 1)
476 lf_print("lf_getlock", lock);
477 #endif /* LOCKF_DEBUG */
478
479 if ((block = lf_getblock(lock)) != NULL) {
480 fl->l_type = block->lf_type;
481 fl->l_whence = SEEK_SET;
482 fl->l_start = block->lf_start;
483 if (block->lf_end == -1)
484 fl->l_len = 0;
485 else
486 fl->l_len = block->lf_end - block->lf_start + 1;
487 if (block->lf_flags & F_POSIX)
488 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
489 else
490 fl->l_pid = -1;
491 } else {
492 fl->l_type = F_UNLCK;
493 }
494 return (0);
495 }
496
497 /*
498 * Walk the list of locks for an inode and
499 * return the first blocking lock.
500 */
501 struct lockf *
502 lf_getblock(lock)
503 register struct lockf *lock;
504 {
505 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
506 int ovcase;
507
508 prev = lock->lf_head;
509 while ((ovcase = lf_findoverlap(lf, lock, OTHERS,
510 &prev, &overlap)) != 0) {
511 /*
512 * We've found an overlap, see if it blocks us
513 */
514 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
515 return (overlap);
516 /*
517 * Nope, point to the next one on the list and
518 * see if it blocks us
519 */
520 lf = overlap->lf_next;
521 }
522 return (NOLOCKF);
523 }
524
525 /*
526 * Walk the list of locks for an inode to
527 * find an overlapping lock (if any).
528 *
529 * NOTE: this returns only the FIRST overlapping lock. There
530 * may be more than one.
531 */
532 int
533 lf_findoverlap(lf, lock, type, prev, overlap)
534 register struct lockf *lf;
535 struct lockf *lock;
536 int type;
537 struct lockf ***prev;
538 struct lockf **overlap;
539 {
540 off_t start, end;
541
542 *overlap = lf;
543 if (lf == NOLOCKF)
544 return (0);
545 #ifdef LOCKF_DEBUG
546 if (lockf_debug & 2)
547 lf_print("lf_findoverlap: looking for overlap in", lock);
548 #endif /* LOCKF_DEBUG */
549 start = lock->lf_start;
550 end = lock->lf_end;
551 while (lf != NOLOCKF) {
552 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
553 ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
554 *prev = &lf->lf_next;
555 *overlap = lf = lf->lf_next;
556 continue;
557 }
558 #ifdef LOCKF_DEBUG
559 if (lockf_debug & 2)
560 lf_print("\tchecking", lf);
561 #endif /* LOCKF_DEBUG */
562 /*
563 * OK, check for overlap
564 *
565 * Six cases:
566 * 0) no overlap
567 * 1) overlap == lock
568 * 2) overlap contains lock
569 * 3) lock contains overlap
570 * 4) overlap starts before lock
571 * 5) overlap ends after lock
572 */
573 if ((lf->lf_end != -1 && start > lf->lf_end) ||
574 (end != -1 && lf->lf_start > end)) {
575 /* Case 0 */
576 #ifdef LOCKF_DEBUG
577 if (lockf_debug & 2)
578 printf("no overlap\n");
579 #endif /* LOCKF_DEBUG */
580 if ((type & SELF) && end != -1 && lf->lf_start > end)
581 return (0);
582 *prev = &lf->lf_next;
583 *overlap = lf = lf->lf_next;
584 continue;
585 }
586 if ((lf->lf_start == start) && (lf->lf_end == end)) {
587 /* Case 1 */
588 #ifdef LOCKF_DEBUG
589 if (lockf_debug & 2)
590 printf("overlap == lock\n");
591 #endif /* LOCKF_DEBUG */
592 return (1);
593 }
594 if ((lf->lf_start <= start) &&
595 (end != -1) &&
596 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
597 /* Case 2 */
598 #ifdef LOCKF_DEBUG
599 if (lockf_debug & 2)
600 printf("overlap contains lock\n");
601 #endif /* LOCKF_DEBUG */
602 return (2);
603 }
604 if (start <= lf->lf_start &&
605 (end == -1 ||
606 (lf->lf_end != -1 && end >= lf->lf_end))) {
607 /* Case 3 */
608 #ifdef LOCKF_DEBUG
609 if (lockf_debug & 2)
610 printf("lock contains overlap\n");
611 #endif /* LOCKF_DEBUG */
612 return (3);
613 }
614 if ((lf->lf_start < start) &&
615 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
616 /* Case 4 */
617 #ifdef LOCKF_DEBUG
618 if (lockf_debug & 2)
619 printf("overlap starts before lock\n");
620 #endif /* LOCKF_DEBUG */
621 return (4);
622 }
623 if ((lf->lf_start > start) &&
624 (end != -1) &&
625 ((lf->lf_end > end) || (lf->lf_end == -1))) {
626 /* Case 5 */
627 #ifdef LOCKF_DEBUG
628 if (lockf_debug & 2)
629 printf("overlap ends after lock\n");
630 #endif /* LOCKF_DEBUG */
631 return (5);
632 }
633 panic("lf_findoverlap: default");
634 }
635 return (0);
636 }
637
638 /*
639 * Split a lock and a contained region into
640 * two or three locks as necessary.
641 */
642 void
643 lf_split(lock1, lock2)
644 register struct lockf *lock1;
645 register struct lockf *lock2;
646 {
647 register struct lockf *splitlock;
648
649 #ifdef LOCKF_DEBUG
650 if (lockf_debug & 2) {
651 lf_print("lf_split", lock1);
652 lf_print("splitting from", lock2);
653 }
654 #endif /* LOCKF_DEBUG */
655 /*
656 * Check to see if spliting into only two pieces.
657 */
658 if (lock1->lf_start == lock2->lf_start) {
659 lock1->lf_start = lock2->lf_end + 1;
660 lock2->lf_next = lock1;
661 return;
662 }
663 if (lock1->lf_end == lock2->lf_end) {
664 lock1->lf_end = lock2->lf_start - 1;
665 lock2->lf_next = lock1->lf_next;
666 lock1->lf_next = lock2;
667 return;
668 }
669 /*
670 * Make a new lock consisting of the last part of
671 * the encompassing lock
672 */
673 MALLOC(splitlock, struct lockf *, sizeof(*splitlock), M_LOCKF, M_WAITOK);
674 memcpy((caddr_t)splitlock, (caddr_t)lock1, sizeof(*splitlock));
675 splitlock->lf_start = lock2->lf_end + 1;
676 TAILQ_INIT(&splitlock->lf_blkhd);
677 lock1->lf_end = lock2->lf_start - 1;
678 /*
679 * OK, now link it in
680 */
681 splitlock->lf_next = lock1->lf_next;
682 lock2->lf_next = splitlock;
683 lock1->lf_next = lock2;
684 }
685
686 /*
687 * Wakeup a blocklist
688 */
689 void
690 lf_wakelock(listhead)
691 struct lockf *listhead;
692 {
693 register struct lockf *wakelock;
694
695 while ((wakelock = listhead->lf_blkhd.tqh_first)) {
696 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
697 wakelock->lf_next = NOLOCKF;
698 #ifdef LOCKF_DEBUG
699 if (lockf_debug & 2)
700 lf_print("lf_wakelock: awakening", wakelock);
701 #endif
702 wakeup((caddr_t)wakelock);
703 }
704 }
705
706 #ifdef LOCKF_DEBUG
707 /*
708 * Print out a lock.
709 */
710 void
711 lf_print(tag, lock)
712 char *tag;
713 register struct lockf *lock;
714 {
715
716 printf("%s: lock %p for ", tag, lock);
717 if (lock->lf_flags & F_POSIX)
718 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
719 else
720 printf("id 0x%p", lock->lf_id);
721 printf(" %s, start %qx, end %qx",
722 lock->lf_type == F_RDLCK ? "shared" :
723 lock->lf_type == F_WRLCK ? "exclusive" :
724 lock->lf_type == F_UNLCK ? "unlock" :
725 "unknown", lock->lf_start, lock->lf_end);
726 if (lock->lf_blkhd.tqh_first)
727 printf(" block %p\n", lock->lf_blkhd.tqh_first);
728 else
729 printf("\n");
730 }
731
732 void
733 lf_printlist(tag, lock)
734 char *tag;
735 struct lockf *lock;
736 {
737 register struct lockf *lf, *blk;
738
739 printf("%s: Lock list:\n", tag);
740 for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
741 printf("\tlock %p for ", lf);
742 if (lf->lf_flags & F_POSIX)
743 printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid);
744 else
745 printf("id 0x%p", lf->lf_id);
746 printf(", %s, start %qx, end %qx",
747 lf->lf_type == F_RDLCK ? "shared" :
748 lf->lf_type == F_WRLCK ? "exclusive" :
749 lf->lf_type == F_UNLCK ? "unlock" :
750 "unknown", lf->lf_start, lf->lf_end);
751 for (blk = lf->lf_blkhd.tqh_first; blk;
752 blk = blk->lf_block.tqe_next) {
753 if (blk->lf_flags & F_POSIX)
754 printf("proc %d",
755 ((struct proc *)(blk->lf_id))->p_pid);
756 else
757 printf("id 0x%p", blk->lf_id);
758 printf(", %s, start %qx, end %qx",
759 blk->lf_type == F_RDLCK ? "shared" :
760 blk->lf_type == F_WRLCK ? "exclusive" :
761 blk->lf_type == F_UNLCK ? "unlock" :
762 "unknown", blk->lf_start, blk->lf_end);
763 if (blk->lf_blkhd.tqh_first)
764 panic("lf_printlist: bad list");
765 }
766 printf("\n");
767 }
768 }
769 #endif /* LOCKF_DEBUG */
770