vfs_lockf.c revision 1.3 1 /*
2 * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Scooter Morris at Genentech Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)ufs_lockf.c 7.7 (Berkeley) 7/2/91
37 * $Id: vfs_lockf.c,v 1.3 1994/05/19 05:04:07 cgd Exp $
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/file.h>
44 #include <sys/proc.h>
45 #include <sys/vnode.h>
46 #include <sys/malloc.h>
47 #include <sys/fcntl.h>
48 #include <sys/lockf.h>
49
50 /*
51 * Advisory record locking support
52 */
53 lf_advlock(head, size, id, op, fl, flags)
54 struct lockf **head;
55 off_t size;
56 caddr_t id;
57 int op;
58 register struct flock *fl;
59 int flags;
60 {
61 register struct lockf *lock;
62 off_t start, end;
63 int error;
64
65 /*
66 * Avoid the common case of unlocking when inode has no locks.
67 */
68 if (*head == (struct lockf *)0) {
69 if (op != F_SETLK) {
70 fl->l_type = F_UNLCK;
71 return (0);
72 }
73 }
74
75 /*
76 * Convert the flock structure into a start and end.
77 */
78 switch (fl->l_whence) {
79
80 case SEEK_SET:
81 case SEEK_CUR:
82 /*
83 * Caller is responsible for adding any necessary offset
84 * when SEEK_CUR is used.
85 */
86 start = fl->l_start;
87 break;
88
89 case SEEK_END:
90 start = size + fl->l_start;
91 break;
92
93 default:
94 return (EINVAL);
95 }
96 if (start < 0)
97 return (EINVAL);
98 if (fl->l_len == 0)
99 end = -1;
100 else
101 end = start + fl->l_len - 1;
102 /*
103 * Create the lockf structure
104 */
105 MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
106 lock->lf_start = start;
107 lock->lf_end = end;
108 lock->lf_id = id;
109 lock->lf_head = head;
110 lock->lf_type = fl->l_type;
111 lock->lf_next = (struct lockf *)0;
112 lock->lf_block = (struct lockf *)0;
113 lock->lf_flags = flags;
114 /*
115 * Do the requested operation.
116 */
117 switch(op) {
118 case F_SETLK:
119 return (lf_setlock(lock));
120
121 case F_UNLCK:
122 error = lf_clearlock(lock);
123 FREE(lock, M_LOCKF);
124 return (error);
125
126 case F_GETLK:
127 error = lf_getlock(lock, fl);
128 FREE(lock, M_LOCKF);
129 return (error);
130
131 default:
132 free(lock, M_LOCKF);
133 return (EINVAL);
134 }
135 /* NOTREACHED */
136 }
137
138 /*
139 * This variable controls the maximum number of processes that will
140 * be checked in doing deadlock detection.
141 */
142 int maxlockdepth = MAXDEPTH;
143
144 #ifdef LOCKF_DEBUG
145 int lockf_debug = 0;
146 #endif /* LOCKF_DEBUG */
147
148 #define NOLOCKF (struct lockf *)0
149 #define SELF 0x1
150 #define OTHERS 0x2
151
152 /*
153 * Set a byte-range lock.
154 */
155 lf_setlock(lock)
156 register struct lockf *lock;
157 {
158 register struct lockf *block;
159 struct lockf **head = lock->lf_head;
160 struct lockf **prev, *overlap, *ltmp;
161 static char lockstr[] = "lockf";
162 int ovcase, priority, needtolink, error;
163
164 #ifdef LOCKF_DEBUG
165 if (lockf_debug & 1)
166 lf_print("lf_setlock", lock);
167 #endif /* LOCKF_DEBUG */
168
169 /*
170 * Set the priority
171 */
172 priority = PLOCK;
173 if (lock->lf_type == F_WRLCK)
174 priority += 4;
175 priority |= PCATCH;
176 /*
177 * Scan lock list for this file looking for locks that would block us.
178 */
179 while (block = lf_getblock(lock)) {
180 /*
181 * Free the structure and return if nonblocking.
182 */
183 if ((lock->lf_flags & F_WAIT) == 0) {
184 FREE(lock, M_LOCKF);
185 return (EAGAIN);
186 }
187 /*
188 * We are blocked. Since flock style locks cover
189 * the whole file, there is no chance for deadlock.
190 * For byte-range locks we must check for deadlock.
191 *
192 * Deadlock detection is done by looking through the
193 * wait channels to see if there are any cycles that
194 * involve us. MAXDEPTH is set just to make sure we
195 * do not go off into neverland.
196 */
197 if ((lock->lf_flags & F_POSIX) &&
198 (block->lf_flags & F_POSIX)) {
199 register struct proc *wproc;
200 register struct lockf *waitblock;
201 int i = 0;
202
203 /* The block is waiting on something */
204 wproc = (struct proc *)block->lf_id;
205 while (wproc->p_wchan &&
206 (wproc->p_wmesg == lockstr) &&
207 (i++ < maxlockdepth)) {
208 waitblock = (struct lockf *)wproc->p_wchan;
209 /* Get the owner of the blocking lock */
210 waitblock = waitblock->lf_next;
211 if ((waitblock->lf_flags & F_POSIX) == 0)
212 break;
213 wproc = (struct proc *)waitblock->lf_id;
214 if (wproc == (struct proc *)lock->lf_id) {
215 free(lock, M_LOCKF);
216 return (EDEADLK);
217 }
218 }
219 }
220 /*
221 * For flock type locks, we must first remove
222 * any shared locks that we hold before we sleep
223 * waiting for an exclusive lock.
224 */
225 if ((lock->lf_flags & F_FLOCK) &&
226 lock->lf_type == F_WRLCK) {
227 lock->lf_type = F_UNLCK;
228 (void) lf_clearlock(lock);
229 lock->lf_type = F_WRLCK;
230 }
231 /*
232 * Add our lock to the blocked list and sleep until we're free.
233 * Remember who blocked us (for deadlock detection).
234 */
235 lock->lf_next = block;
236 lf_addblock(block, lock);
237 #ifdef LOCKF_DEBUG
238 if (lockf_debug & 1) {
239 lf_print("lf_setlock: blocking on", block);
240 lf_printlist("lf_setlock", block);
241 }
242 #endif /* LOCKF_DEBUG */
243 if (error = tsleep((caddr_t)lock, priority, lockstr, 0)) {
244 /*
245 * Delete ourselves from the waiting to lock list.
246 */
247 for (block = lock->lf_next;
248 block != NOLOCKF;
249 block = block->lf_block) {
250 if (block->lf_block != lock)
251 continue;
252 block->lf_block = block->lf_block->lf_block;
253 free(lock, M_LOCKF);
254 return (error);
255 }
256 panic("lf_setlock: lost lock");
257 }
258 }
259 /*
260 * No blocks!! Add the lock. Note that we will
261 * downgrade or upgrade any overlapping locks this
262 * process already owns.
263 *
264 * Skip over locks owned by other processes.
265 * Handle any locks that overlap and are owned by ourselves.
266 */
267 prev = head;
268 block = *head;
269 needtolink = 1;
270 for (;;) {
271 if (ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap))
272 block = overlap->lf_next;
273 /*
274 * Six cases:
275 * 0) no overlap
276 * 1) overlap == lock
277 * 2) overlap contains lock
278 * 3) lock contains overlap
279 * 4) overlap starts before lock
280 * 5) overlap ends after lock
281 */
282 switch (ovcase) {
283 case 0: /* no overlap */
284 if (needtolink) {
285 *prev = lock;
286 lock->lf_next = overlap;
287 }
288 break;
289
290 case 1: /* overlap == lock */
291 /*
292 * If downgrading lock, others may be
293 * able to acquire it.
294 */
295 if (lock->lf_type == F_RDLCK &&
296 overlap->lf_type == F_WRLCK)
297 lf_wakelock(overlap);
298 overlap->lf_type = lock->lf_type;
299 FREE(lock, M_LOCKF);
300 lock = overlap; /* for debug output below */
301 break;
302
303 case 2: /* overlap contains lock */
304 /*
305 * Check for common starting point and different types.
306 */
307 if (overlap->lf_type == lock->lf_type) {
308 free(lock, M_LOCKF);
309 lock = overlap; /* for debug output below */
310 break;
311 }
312 if (overlap->lf_start == lock->lf_start) {
313 *prev = lock;
314 lock->lf_next = overlap;
315 overlap->lf_start = lock->lf_end + 1;
316 } else
317 lf_split(overlap, lock);
318 lf_wakelock(overlap);
319 break;
320
321 case 3: /* lock contains overlap */
322 /*
323 * If downgrading lock, others may be able to
324 * acquire it, otherwise take the list.
325 */
326 if (lock->lf_type == F_RDLCK &&
327 overlap->lf_type == F_WRLCK) {
328 lf_wakelock(overlap);
329 } else {
330 ltmp = lock->lf_block;
331 lock->lf_block = overlap->lf_block;
332 lf_addblock(lock, ltmp);
333 }
334 /*
335 * Add the new lock if necessary and delete the overlap.
336 */
337 if (needtolink) {
338 *prev = lock;
339 lock->lf_next = overlap->lf_next;
340 prev = &lock->lf_next;
341 needtolink = 0;
342 } else
343 *prev = overlap->lf_next;
344 free(overlap, M_LOCKF);
345 continue;
346
347 case 4: /* overlap starts before lock */
348 /*
349 * Add lock after overlap on the list.
350 */
351 lock->lf_next = overlap->lf_next;
352 overlap->lf_next = lock;
353 overlap->lf_end = lock->lf_start - 1;
354 prev = &lock->lf_next;
355 lf_wakelock(overlap);
356 needtolink = 0;
357 continue;
358
359 case 5: /* overlap ends after lock */
360 /*
361 * Add the new lock before overlap.
362 */
363 if (needtolink) {
364 *prev = lock;
365 lock->lf_next = overlap;
366 }
367 overlap->lf_start = lock->lf_end + 1;
368 lf_wakelock(overlap);
369 break;
370 }
371 break;
372 }
373 #ifdef LOCKF_DEBUG
374 if (lockf_debug & 1) {
375 lf_print("lf_setlock: got the lock", lock);
376 lf_printlist("lf_setlock", lock);
377 }
378 #endif /* LOCKF_DEBUG */
379 return (0);
380 }
381
382 /*
383 * Remove a byte-range lock on an inode.
384 *
385 * Generally, find the lock (or an overlap to that lock)
386 * and remove it (or shrink it), then wakeup anyone we can.
387 */
388 lf_clearlock(unlock)
389 register struct lockf *unlock;
390 {
391 struct lockf **head = unlock->lf_head;
392 register struct lockf *lf = *head;
393 struct lockf *overlap, **prev;
394 int ovcase;
395
396 if (lf == NOLOCKF)
397 return (0);
398 #ifdef LOCKF_DEBUG
399 if (unlock->lf_type != F_UNLCK)
400 panic("lf_clearlock: bad type");
401 if (lockf_debug & 1)
402 lf_print("lf_clearlock", unlock);
403 #endif /* LOCKF_DEBUG */
404 prev = head;
405 while (ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap)) {
406 /*
407 * Wakeup the list of locks to be retried.
408 */
409 lf_wakelock(overlap);
410
411 switch (ovcase) {
412
413 case 1: /* overlap == lock */
414 *prev = overlap->lf_next;
415 FREE(overlap, M_LOCKF);
416 break;
417
418 case 2: /* overlap contains lock: split it */
419 if (overlap->lf_start == unlock->lf_start) {
420 overlap->lf_start = unlock->lf_end + 1;
421 break;
422 }
423 lf_split(overlap, unlock);
424 overlap->lf_next = unlock->lf_next;
425 break;
426
427 case 3: /* lock contains overlap */
428 *prev = overlap->lf_next;
429 lf = overlap->lf_next;
430 free(overlap, M_LOCKF);
431 continue;
432
433 case 4: /* overlap starts before lock */
434 overlap->lf_end = unlock->lf_start - 1;
435 prev = &overlap->lf_next;
436 lf = overlap->lf_next;
437 continue;
438
439 case 5: /* overlap ends after lock */
440 overlap->lf_start = unlock->lf_end + 1;
441 break;
442 }
443 break;
444 }
445 #ifdef LOCKF_DEBUG
446 if (lockf_debug & 1)
447 lf_printlist("lf_clearlock", unlock);
448 #endif /* LOCKF_DEBUG */
449 return (0);
450 }
451
452 /*
453 * Check whether there is a blocking lock,
454 * and if so return its process identifier.
455 */
456 lf_getlock(lock, fl)
457 register struct lockf *lock;
458 register struct flock *fl;
459 {
460 register struct lockf *block;
461 off_t start, end;
462
463 #ifdef LOCKF_DEBUG
464 if (lockf_debug & 1)
465 lf_print("lf_getlock", lock);
466 #endif /* LOCKF_DEBUG */
467
468 if (block = lf_getblock(lock)) {
469 fl->l_type = block->lf_type;
470 fl->l_whence = SEEK_SET;
471 fl->l_start = block->lf_start;
472 if (block->lf_end == -1)
473 fl->l_len = 0;
474 else
475 fl->l_len = block->lf_end - block->lf_start + 1;
476 if (block->lf_flags & F_POSIX)
477 fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
478 else
479 fl->l_pid = -1;
480 } else {
481 fl->l_type = F_UNLCK;
482 }
483 return (0);
484 }
485
486 /*
487 * Walk the list of locks for an inode and
488 * return the first blocking lock.
489 */
490 struct lockf *
491 lf_getblock(lock)
492 register struct lockf *lock;
493 {
494 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
495 int ovcase;
496
497 prev = lock->lf_head;
498 while (ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap)) {
499 /*
500 * We've found an overlap, see if it blocks us
501 */
502 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
503 return (overlap);
504 /*
505 * Nope, point to the next one on the list and
506 * see if it blocks us
507 */
508 lf = overlap->lf_next;
509 }
510 return (NOLOCKF);
511 }
512
513 /*
514 * Walk the list of locks for an inode to
515 * find an overlapping lock (if any).
516 *
517 * NOTE: this returns only the FIRST overlapping lock. There
518 * may be more than one.
519 */
520 lf_findoverlap(lf, lock, type, prev, overlap)
521 register struct lockf *lf;
522 struct lockf *lock;
523 int type;
524 struct lockf ***prev;
525 struct lockf **overlap;
526 {
527 off_t start, end;
528
529 *overlap = lf;
530 if (lf == NOLOCKF)
531 return (0);
532 #ifdef LOCKF_DEBUG
533 if (lockf_debug & 2)
534 lf_print("lf_findoverlap: looking for overlap in", lock);
535 #endif /* LOCKF_DEBUG */
536 start = lock->lf_start;
537 end = lock->lf_end;
538 while (lf != NOLOCKF) {
539 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
540 ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
541 *prev = &lf->lf_next;
542 *overlap = lf = lf->lf_next;
543 continue;
544 }
545 #ifdef LOCKF_DEBUG
546 if (lockf_debug & 2)
547 lf_print("\tchecking", lf);
548 #endif /* LOCKF_DEBUG */
549 /*
550 * OK, check for overlap
551 *
552 * Six cases:
553 * 0) no overlap
554 * 1) overlap == lock
555 * 2) overlap contains lock
556 * 3) lock contains overlap
557 * 4) overlap starts before lock
558 * 5) overlap ends after lock
559 */
560 if ((lf->lf_end != -1 && start > lf->lf_end) ||
561 (end != -1 && lf->lf_start > end)) {
562 /* Case 0 */
563 #ifdef LOCKF_DEBUG
564 if (lockf_debug & 2)
565 printf("no overlap\n");
566 #endif /* LOCKF_DEBUG */
567 if ((type & SELF) && end != -1 && lf->lf_start > end)
568 return (0);
569 *prev = &lf->lf_next;
570 *overlap = lf = lf->lf_next;
571 continue;
572 }
573 if ((lf->lf_start == start) && (lf->lf_end == end)) {
574 /* Case 1 */
575 #ifdef LOCKF_DEBUG
576 if (lockf_debug & 2)
577 printf("overlap == lock\n");
578 #endif /* LOCKF_DEBUG */
579 return (1);
580 }
581 if ((lf->lf_start <= start) &&
582 (end != -1) &&
583 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
584 /* Case 2 */
585 #ifdef LOCKF_DEBUG
586 if (lockf_debug & 2)
587 printf("overlap contains lock\n");
588 #endif /* LOCKF_DEBUG */
589 return (2);
590 }
591 if (start <= lf->lf_start &&
592 (end == -1 ||
593 (lf->lf_end != -1 && end >= lf->lf_end))) {
594 /* Case 3 */
595 #ifdef LOCKF_DEBUG
596 if (lockf_debug & 2)
597 printf("lock contains overlap\n");
598 #endif /* LOCKF_DEBUG */
599 return (3);
600 }
601 if ((lf->lf_start < start) &&
602 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
603 /* Case 4 */
604 #ifdef LOCKF_DEBUG
605 if (lockf_debug & 2)
606 printf("overlap starts before lock\n");
607 #endif /* LOCKF_DEBUG */
608 return (4);
609 }
610 if ((lf->lf_start > start) &&
611 (end != -1) &&
612 ((lf->lf_end > end) || (lf->lf_end == -1))) {
613 /* Case 5 */
614 #ifdef LOCKF_DEBUG
615 if (lockf_debug & 2)
616 printf("overlap ends after lock\n");
617 #endif /* LOCKF_DEBUG */
618 return (5);
619 }
620 panic("lf_findoverlap: default");
621 }
622 return (0);
623 }
624
625 /*
626 * Add a lock to the end of the blocked list.
627 */
628 void
629 lf_addblock(lock, blocked)
630 struct lockf *lock;
631 struct lockf *blocked;
632 {
633 register struct lockf *lf;
634
635 if (blocked == NOLOCKF)
636 return;
637 #ifdef LOCKF_DEBUG
638 if (lockf_debug & 2) {
639 lf_print("addblock: adding", blocked);
640 lf_print("to blocked list of", lock);
641 }
642 #endif /* LOCKF_DEBUG */
643 if ((lf = lock->lf_block) == NOLOCKF) {
644 lock->lf_block = blocked;
645 return;
646 }
647 while (lf->lf_block != NOLOCKF)
648 lf = lf->lf_block;
649 lf->lf_block = blocked;
650 return;
651 }
652
653 /*
654 * Split a lock and a contained region into
655 * two or three locks as necessary.
656 */
657 void
658 lf_split(lock1, lock2)
659 register struct lockf *lock1;
660 register struct lockf *lock2;
661 {
662 register struct lockf *splitlock;
663
664 #ifdef LOCKF_DEBUG
665 if (lockf_debug & 2) {
666 lf_print("lf_split", lock1);
667 lf_print("splitting from", lock2);
668 }
669 #endif /* LOCKF_DEBUG */
670 /*
671 * Check to see if spliting into only two pieces.
672 */
673 if (lock1->lf_start == lock2->lf_start) {
674 lock1->lf_start = lock2->lf_end + 1;
675 lock2->lf_next = lock1;
676 return;
677 }
678 if (lock1->lf_end == lock2->lf_end) {
679 lock1->lf_end = lock2->lf_start - 1;
680 lock2->lf_next = lock1->lf_next;
681 lock1->lf_next = lock2;
682 return;
683 }
684 /*
685 * Make a new lock consisting of the last part of
686 * the encompassing lock
687 */
688 MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
689 bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock);
690 splitlock->lf_start = lock2->lf_end + 1;
691 splitlock->lf_block = NOLOCKF;
692 lock1->lf_end = lock2->lf_start - 1;
693 /*
694 * OK, now link it in
695 */
696 splitlock->lf_next = lock1->lf_next;
697 lock2->lf_next = splitlock;
698 lock1->lf_next = lock2;
699 }
700
701 /*
702 * Wakeup a blocklist
703 */
704 void
705 lf_wakelock(listhead)
706 struct lockf *listhead;
707 {
708 register struct lockf *blocklist, *wakelock;
709
710 blocklist = listhead->lf_block;
711 listhead->lf_block = NOLOCKF;
712 while (blocklist != NOLOCKF) {
713 wakelock = blocklist;
714 blocklist = blocklist->lf_block;
715 wakelock->lf_block = NOLOCKF;
716 wakelock->lf_next = NOLOCKF;
717 #ifdef LOCKF_DEBUG
718 if (lockf_debug & 2)
719 lf_print("lf_wakelock: awakening", wakelock);
720 #endif /* LOCKF_DEBUG */
721 wakeup((caddr_t)wakelock);
722 }
723 }
724
725 #ifdef LOCKF_DEBUG
726 /*
727 * Print out a lock.
728 */
729 lf_print(tag, lock)
730 char *tag;
731 register struct lockf *lock;
732 {
733
734 printf("%s: lock 0x%lx for ", tag, lock);
735 if (lock->lf_flags & F_POSIX)
736 printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
737 else
738 printf("id 0x%x", lock->lf_id);
739 printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d",
740 lock->lf_inode->i_number,
741 major(lock->lf_inode->i_dev),
742 minor(lock->lf_inode->i_dev),
743 lock->lf_type == F_RDLCK ? "shared" :
744 lock->lf_type == F_WRLCK ? "exclusive" :
745 lock->lf_type == F_UNLCK ? "unlock" :
746 "unknown", lock->lf_start, lock->lf_end);
747 if (lock->lf_block)
748 printf(" block 0x%x\n", lock->lf_block);
749 else
750 printf("\n");
751 }
752
753 lf_printlist(tag, lock)
754 char *tag;
755 struct lockf *lock;
756 {
757 register struct lockf *lf;
758
759 printf("%s: Lock list for ino %d on dev <%d, %d>:\n",
760 tag, lock->lf_inode->i_number,
761 major(lock->lf_inode->i_dev),
762 minor(lock->lf_inode->i_dev));
763 for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) {
764 printf("\tlock 0x%lx for ", lf);
765 if (lf->lf_flags & F_POSIX)
766 printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid);
767 else
768 printf("id 0x%x", lf->lf_id);
769 printf(", %s, start %d, end %d",
770 lf->lf_type == F_RDLCK ? "shared" :
771 lf->lf_type == F_WRLCK ? "exclusive" :
772 lf->lf_type == F_UNLCK ? "unlock" :
773 "unknown", lf->lf_start, lf->lf_end);
774 if (lf->lf_block)
775 printf(" block 0x%x\n", lf->lf_block);
776 else
777 printf("\n");
778 }
779 }
780 #endif /* LOCKF_DEBUG */
781