vfs_lockf.c revision 1.25 1 /* $NetBSD: vfs_lockf.c,v 1.25 2003/05/01 13:06:59 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Scooter Morris at Genentech Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.25 2003/05/01 13:06:59 yamt Exp $");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/file.h>
48 #include <sys/proc.h>
49 #include <sys/vnode.h>
50 #include <sys/malloc.h>
51 #include <sys/fcntl.h>
52 #include <sys/lockf.h>
53
54 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
55
56 /*
57 * This variable controls the maximum number of processes that will
58 * be checked in doing deadlock detection.
59 */
60 int maxlockdepth = MAXDEPTH;
61
62 #ifdef LOCKF_DEBUG
63 int lockf_debug = 0;
64 #endif
65
66 #define NOLOCKF (struct lockf *)0
67 #define SELF 0x1
68 #define OTHERS 0x2
69
70 static int lf_clearlock(struct lockf *);
71 static int lf_findoverlap(struct lockf *,
72 struct lockf *, int, struct lockf ***, struct lockf **);
73 static struct lockf *lf_getblock(struct lockf *);
74 static int lf_getlock(struct lockf *, struct flock *);
75 static int lf_setlock(struct lockf *);
76 static void lf_split(struct lockf *, struct lockf *);
77 static void lf_wakelock(struct lockf *);
78
79 #ifdef LOCKF_DEBUG
80 static void lf_print(char *, struct lockf *);
81 static void lf_printlist(char *, struct lockf *);
82 #endif
83
84 /*
85 * XXX TODO
86 * Misc cleanups: "caddr_t id" should be visible in the API as a
87 * "struct proc *".
88 * (This requires rototilling all VFS's which support advisory locking).
89 *
90 * Use pools for lock allocation.
91 */
92
93 /*
94 * XXXSMP TODO: Using either (a) a global lock, or (b) the vnode's
95 * interlock should be sufficient; (b) requires a change to the API
96 * because the vnode isn't visible here.
97 *
98 * If there's a lot of lock contention on a single vnode, locking
99 * schemes which allow for more paralleism would be needed. Given how
100 * infrequently byte-range locks are actually used in typical BSD
101 * code, a more complex approach probably isn't worth it.
102 */
103
104 /*
105 * Do an advisory lock operation.
106 */
107 int
108 lf_advlock(struct vop_advlock_args *ap, struct lockf **head, off_t size)
109 {
110 struct flock *fl = ap->a_fl;
111 struct lockf *lock;
112 off_t start, end;
113 int error;
114
115 /*
116 * Convert the flock structure into a start and end.
117 */
118 switch (fl->l_whence) {
119 case SEEK_SET:
120 case SEEK_CUR:
121 /*
122 * Caller is responsible for adding any necessary offset
123 * when SEEK_CUR is used.
124 */
125 start = fl->l_start;
126 break;
127
128 case SEEK_END:
129 start = size + fl->l_start;
130 break;
131
132 default:
133 return (EINVAL);
134 }
135 if (start < 0)
136 return (EINVAL);
137
138 /*
139 * Avoid the common case of unlocking when inode has no locks.
140 */
141 if (*head == (struct lockf *)0) {
142 if (ap->a_op != F_SETLK) {
143 fl->l_type = F_UNLCK;
144 return (0);
145 }
146 }
147
148 if (fl->l_len == 0)
149 end = -1;
150 else
151 end = start + fl->l_len - 1;
152 /*
153 * Create the lockf structure.
154 */
155 MALLOC(lock, struct lockf *, sizeof(*lock), M_LOCKF, M_WAITOK);
156 lock->lf_start = start;
157 lock->lf_end = end;
158 /* XXX NJWLWP
159 * I don't want to make the entire VFS universe use LWPs, because
160 * they don't need them, for the most part. This is an exception,
161 * and a kluge.
162 */
163
164 lock->lf_head = head;
165 lock->lf_type = fl->l_type;
166 lock->lf_next = (struct lockf *)0;
167 TAILQ_INIT(&lock->lf_blkhd);
168 lock->lf_flags = ap->a_flags;
169 if (lock->lf_flags & F_POSIX) {
170 KASSERT(curproc == (struct proc *)ap->a_id);
171 }
172 lock->lf_id = (struct proc *)ap->a_id;
173 lock->lf_lwp = curlwp;
174
175 /*
176 * Do the requested operation.
177 */
178 switch (ap->a_op) {
179
180 case F_SETLK:
181 return (lf_setlock(lock));
182
183 case F_UNLCK:
184 error = lf_clearlock(lock);
185 FREE(lock, M_LOCKF);
186 return (error);
187
188 case F_GETLK:
189 error = lf_getlock(lock, fl);
190 FREE(lock, M_LOCKF);
191 return (error);
192
193 default:
194 FREE(lock, M_LOCKF);
195 return (EINVAL);
196 }
197 /* NOTREACHED */
198 }
199
200 /*
201 * Set a byte-range lock.
202 */
203 static int
204 lf_setlock(struct lockf *lock)
205 {
206 struct lockf *block;
207 struct lockf **head = lock->lf_head;
208 struct lockf **prev, *overlap, *ltmp;
209 static char lockstr[] = "lockf";
210 int ovcase, priority, needtolink, error;
211
212 #ifdef LOCKF_DEBUG
213 if (lockf_debug & 1)
214 lf_print("lf_setlock", lock);
215 #endif /* LOCKF_DEBUG */
216
217 /*
218 * Set the priority
219 */
220 priority = PLOCK;
221 if (lock->lf_type == F_WRLCK)
222 priority += 4;
223 priority |= PCATCH;
224 /*
225 * Scan lock list for this file looking for locks that would block us.
226 */
227 while ((block = lf_getblock(lock)) != NULL) {
228 /*
229 * Free the structure and return if nonblocking.
230 */
231 if ((lock->lf_flags & F_WAIT) == 0) {
232 FREE(lock, M_LOCKF);
233 return (EAGAIN);
234 }
235 /*
236 * We are blocked. Since flock style locks cover
237 * the whole file, there is no chance for deadlock.
238 * For byte-range locks we must check for deadlock.
239 *
240 * Deadlock detection is done by looking through the
241 * wait channels to see if there are any cycles that
242 * involve us. MAXDEPTH is set just to make sure we
243 * do not go off into neverneverland.
244 */
245 if ((lock->lf_flags & F_POSIX) &&
246 (block->lf_flags & F_POSIX)) {
247 struct lwp *wlwp;
248 struct lockf *waitblock;
249 int i = 0;
250
251 /*
252 * The block is waiting on something. if_lwp will be
253 * 0 once the lock is granted, so we terminate the
254 * loop if we find this.
255 */
256 wlwp = block->lf_lwp;
257 while (wlwp && (i++ < maxlockdepth)) {
258 waitblock = (struct lockf *)wlwp->l_wchan;
259 /* Get the owner of the blocking lock */
260 waitblock = waitblock->lf_next;
261 if ((waitblock->lf_flags & F_POSIX) == 0)
262 break;
263 wlwp = waitblock->lf_lwp;
264 if (wlwp == lock->lf_lwp) {
265 free(lock, M_LOCKF);
266 return (EDEADLK);
267 }
268 }
269 /*
270 * If we're still following a dependancy chain
271 * after maxlockdepth iterations, assume we're in
272 * a cycle to be safe.
273 */
274 if (i >= maxlockdepth) {
275 free(lock, M_LOCKF);
276 return (EDEADLK);
277 }
278 }
279 /*
280 * For flock type locks, we must first remove
281 * any shared locks that we hold before we sleep
282 * waiting for an exclusive lock.
283 */
284 if ((lock->lf_flags & F_FLOCK) &&
285 lock->lf_type == F_WRLCK) {
286 lock->lf_type = F_UNLCK;
287 (void) lf_clearlock(lock);
288 lock->lf_type = F_WRLCK;
289 }
290 /*
291 * Add our lock to the blocked list and sleep until we're free.
292 * Remember who blocked us (for deadlock detection).
293 */
294 lock->lf_next = block;
295 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
296 #ifdef LOCKF_DEBUG
297 if (lockf_debug & 1) {
298 lf_print("lf_setlock: blocking on", block);
299 lf_printlist("lf_setlock", block);
300 }
301 #endif /* LOCKF_DEBUG */
302 error = tsleep((caddr_t)lock, priority, lockstr, 0);
303
304 /*
305 * We may have been awakened by a signal (in
306 * which case we must remove ourselves from the
307 * blocked list) and/or by another process
308 * releasing a lock (in which case we have already
309 * been removed from the blocked list and our
310 * lf_next field set to NOLOCKF).
311 */
312 if (lock->lf_next != NOLOCKF) {
313 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
314 lock->lf_next = NOLOCKF;
315 }
316 if (error) {
317 free(lock, M_LOCKF);
318 return (error);
319 }
320 }
321 /*
322 * No blocks!! Add the lock. Note that we will
323 * downgrade or upgrade any overlapping locks this
324 * process already owns.
325 *
326 * Skip over locks owned by other processes.
327 * Handle any locks that overlap and are owned by ourselves.
328 */
329 lock->lf_lwp = 0;
330 prev = head;
331 block = *head;
332 needtolink = 1;
333 for (;;) {
334 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
335 if (ovcase)
336 block = overlap->lf_next;
337 /*
338 * Six cases:
339 * 0) no overlap
340 * 1) overlap == lock
341 * 2) overlap contains lock
342 * 3) lock contains overlap
343 * 4) overlap starts before lock
344 * 5) overlap ends after lock
345 */
346 switch (ovcase) {
347 case 0: /* no overlap */
348 if (needtolink) {
349 *prev = lock;
350 lock->lf_next = overlap;
351 }
352 break;
353
354 case 1: /* overlap == lock */
355 /*
356 * If downgrading lock, others may be
357 * able to acquire it.
358 */
359 if (lock->lf_type == F_RDLCK &&
360 overlap->lf_type == F_WRLCK)
361 lf_wakelock(overlap);
362 overlap->lf_type = lock->lf_type;
363 FREE(lock, M_LOCKF);
364 lock = overlap; /* for debug output below */
365 break;
366
367 case 2: /* overlap contains lock */
368 /*
369 * Check for common starting point and different types.
370 */
371 if (overlap->lf_type == lock->lf_type) {
372 free(lock, M_LOCKF);
373 lock = overlap; /* for debug output below */
374 break;
375 }
376 if (overlap->lf_start == lock->lf_start) {
377 *prev = lock;
378 lock->lf_next = overlap;
379 overlap->lf_start = lock->lf_end + 1;
380 } else
381 lf_split(overlap, lock);
382 lf_wakelock(overlap);
383 break;
384
385 case 3: /* lock contains overlap */
386 /*
387 * If downgrading lock, others may be able to
388 * acquire it, otherwise take the list.
389 */
390 if (lock->lf_type == F_RDLCK &&
391 overlap->lf_type == F_WRLCK) {
392 lf_wakelock(overlap);
393 } else {
394 while ((ltmp = TAILQ_FIRST(&overlap->lf_blkhd))) {
395 KASSERT(ltmp->lf_next == overlap);
396 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
397 lf_block);
398 ltmp->lf_next = lock;
399 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
400 ltmp, lf_block);
401 }
402 }
403 /*
404 * Add the new lock if necessary and delete the overlap.
405 */
406 if (needtolink) {
407 *prev = lock;
408 lock->lf_next = overlap->lf_next;
409 prev = &lock->lf_next;
410 needtolink = 0;
411 } else
412 *prev = overlap->lf_next;
413 free(overlap, M_LOCKF);
414 continue;
415
416 case 4: /* overlap starts before lock */
417 /*
418 * Add lock after overlap on the list.
419 */
420 lock->lf_next = overlap->lf_next;
421 overlap->lf_next = lock;
422 overlap->lf_end = lock->lf_start - 1;
423 prev = &lock->lf_next;
424 lf_wakelock(overlap);
425 needtolink = 0;
426 continue;
427
428 case 5: /* overlap ends after lock */
429 /*
430 * Add the new lock before overlap.
431 */
432 if (needtolink) {
433 *prev = lock;
434 lock->lf_next = overlap;
435 }
436 overlap->lf_start = lock->lf_end + 1;
437 lf_wakelock(overlap);
438 break;
439 }
440 break;
441 }
442 #ifdef LOCKF_DEBUG
443 if (lockf_debug & 1) {
444 lf_print("lf_setlock: got the lock", lock);
445 lf_printlist("lf_setlock", lock);
446 }
447 #endif /* LOCKF_DEBUG */
448 return (0);
449 }
450
451 /*
452 * Remove a byte-range lock on an inode.
453 *
454 * Generally, find the lock (or an overlap to that lock)
455 * and remove it (or shrink it), then wakeup anyone we can.
456 */
457 static int
458 lf_clearlock(struct lockf *unlock)
459 {
460 struct lockf **head = unlock->lf_head;
461 struct lockf *lf = *head;
462 struct lockf *overlap, **prev;
463 int ovcase;
464
465 if (lf == NOLOCKF)
466 return (0);
467 #ifdef LOCKF_DEBUG
468 if (unlock->lf_type != F_UNLCK)
469 panic("lf_clearlock: bad type");
470 if (lockf_debug & 1)
471 lf_print("lf_clearlock", unlock);
472 #endif /* LOCKF_DEBUG */
473 prev = head;
474 while ((ovcase = lf_findoverlap(lf, unlock, SELF,
475 &prev, &overlap)) != 0) {
476 /*
477 * Wakeup the list of locks to be retried.
478 */
479 lf_wakelock(overlap);
480
481 switch (ovcase) {
482
483 case 1: /* overlap == lock */
484 *prev = overlap->lf_next;
485 FREE(overlap, M_LOCKF);
486 break;
487
488 case 2: /* overlap contains lock: split it */
489 if (overlap->lf_start == unlock->lf_start) {
490 overlap->lf_start = unlock->lf_end + 1;
491 break;
492 }
493 lf_split(overlap, unlock);
494 overlap->lf_next = unlock->lf_next;
495 break;
496
497 case 3: /* lock contains overlap */
498 *prev = overlap->lf_next;
499 lf = overlap->lf_next;
500 free(overlap, M_LOCKF);
501 continue;
502
503 case 4: /* overlap starts before lock */
504 overlap->lf_end = unlock->lf_start - 1;
505 prev = &overlap->lf_next;
506 lf = overlap->lf_next;
507 continue;
508
509 case 5: /* overlap ends after lock */
510 overlap->lf_start = unlock->lf_end + 1;
511 break;
512 }
513 break;
514 }
515 #ifdef LOCKF_DEBUG
516 if (lockf_debug & 1)
517 lf_printlist("lf_clearlock", unlock);
518 #endif /* LOCKF_DEBUG */
519 return (0);
520 }
521
522 /*
523 * Check whether there is a blocking lock,
524 * and if so return its process identifier.
525 */
526 static int
527 lf_getlock(struct lockf *lock, struct flock *fl)
528 {
529 struct lockf *block;
530
531 #ifdef LOCKF_DEBUG
532 if (lockf_debug & 1)
533 lf_print("lf_getlock", lock);
534 #endif /* LOCKF_DEBUG */
535
536 if ((block = lf_getblock(lock)) != NULL) {
537 fl->l_type = block->lf_type;
538 fl->l_whence = SEEK_SET;
539 fl->l_start = block->lf_start;
540 if (block->lf_end == -1)
541 fl->l_len = 0;
542 else
543 fl->l_len = block->lf_end - block->lf_start + 1;
544 if (block->lf_flags & F_POSIX)
545 fl->l_pid = ((struct proc *)block->lf_id)->p_pid;
546 else
547 fl->l_pid = -1;
548 } else {
549 fl->l_type = F_UNLCK;
550 }
551 return (0);
552 }
553
554 /*
555 * Walk the list of locks for an inode and
556 * return the first blocking lock.
557 */
558 static struct lockf *
559 lf_getblock(struct lockf *lock)
560 {
561 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
562
563 prev = lock->lf_head;
564 while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
565 /*
566 * We've found an overlap, see if it blocks us
567 */
568 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
569 return (overlap);
570 /*
571 * Nope, point to the next one on the list and
572 * see if it blocks us
573 */
574 lf = overlap->lf_next;
575 }
576 return (NOLOCKF);
577 }
578
579 /*
580 * Walk the list of locks for an inode to
581 * find an overlapping lock (if any).
582 *
583 * NOTE: this returns only the FIRST overlapping lock. There
584 * may be more than one.
585 */
586 static int
587 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
588 struct lockf ***prev, struct lockf **overlap)
589 {
590 off_t start, end;
591
592 *overlap = lf;
593 if (lf == NOLOCKF)
594 return (0);
595 #ifdef LOCKF_DEBUG
596 if (lockf_debug & 2)
597 lf_print("lf_findoverlap: looking for overlap in", lock);
598 #endif /* LOCKF_DEBUG */
599 start = lock->lf_start;
600 end = lock->lf_end;
601 while (lf != NOLOCKF) {
602 if (((type == SELF) && lf->lf_id != lock->lf_id) ||
603 ((type == OTHERS) && lf->lf_id == lock->lf_id)) {
604 *prev = &lf->lf_next;
605 *overlap = lf = lf->lf_next;
606 continue;
607 }
608 #ifdef LOCKF_DEBUG
609 if (lockf_debug & 2)
610 lf_print("\tchecking", lf);
611 #endif /* LOCKF_DEBUG */
612 /*
613 * OK, check for overlap
614 *
615 * Six cases:
616 * 0) no overlap
617 * 1) overlap == lock
618 * 2) overlap contains lock
619 * 3) lock contains overlap
620 * 4) overlap starts before lock
621 * 5) overlap ends after lock
622 */
623 if ((lf->lf_end != -1 && start > lf->lf_end) ||
624 (end != -1 && lf->lf_start > end)) {
625 /* Case 0 */
626 #ifdef LOCKF_DEBUG
627 if (lockf_debug & 2)
628 printf("no overlap\n");
629 #endif /* LOCKF_DEBUG */
630 if ((type & SELF) && end != -1 && lf->lf_start > end)
631 return (0);
632 *prev = &lf->lf_next;
633 *overlap = lf = lf->lf_next;
634 continue;
635 }
636 if ((lf->lf_start == start) && (lf->lf_end == end)) {
637 /* Case 1 */
638 #ifdef LOCKF_DEBUG
639 if (lockf_debug & 2)
640 printf("overlap == lock\n");
641 #endif /* LOCKF_DEBUG */
642 return (1);
643 }
644 if ((lf->lf_start <= start) &&
645 (end != -1) &&
646 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
647 /* Case 2 */
648 #ifdef LOCKF_DEBUG
649 if (lockf_debug & 2)
650 printf("overlap contains lock\n");
651 #endif /* LOCKF_DEBUG */
652 return (2);
653 }
654 if (start <= lf->lf_start &&
655 (end == -1 ||
656 (lf->lf_end != -1 && end >= lf->lf_end))) {
657 /* Case 3 */
658 #ifdef LOCKF_DEBUG
659 if (lockf_debug & 2)
660 printf("lock contains overlap\n");
661 #endif /* LOCKF_DEBUG */
662 return (3);
663 }
664 if ((lf->lf_start < start) &&
665 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
666 /* Case 4 */
667 #ifdef LOCKF_DEBUG
668 if (lockf_debug & 2)
669 printf("overlap starts before lock\n");
670 #endif /* LOCKF_DEBUG */
671 return (4);
672 }
673 if ((lf->lf_start > start) &&
674 (end != -1) &&
675 ((lf->lf_end > end) || (lf->lf_end == -1))) {
676 /* Case 5 */
677 #ifdef LOCKF_DEBUG
678 if (lockf_debug & 2)
679 printf("overlap ends after lock\n");
680 #endif /* LOCKF_DEBUG */
681 return (5);
682 }
683 panic("lf_findoverlap: default");
684 }
685 return (0);
686 }
687
688 /*
689 * Split a lock and a contained region into
690 * two or three locks as necessary.
691 */
692 static void
693 lf_split(struct lockf *lock1, struct lockf *lock2)
694 {
695 struct lockf *splitlock;
696
697 #ifdef LOCKF_DEBUG
698 if (lockf_debug & 2) {
699 lf_print("lf_split", lock1);
700 lf_print("splitting from", lock2);
701 }
702 #endif /* LOCKF_DEBUG */
703 /*
704 * Check to see if spliting into only two pieces.
705 */
706 if (lock1->lf_start == lock2->lf_start) {
707 lock1->lf_start = lock2->lf_end + 1;
708 lock2->lf_next = lock1;
709 return;
710 }
711 if (lock1->lf_end == lock2->lf_end) {
712 lock1->lf_end = lock2->lf_start - 1;
713 lock2->lf_next = lock1->lf_next;
714 lock1->lf_next = lock2;
715 return;
716 }
717 /*
718 * Make a new lock consisting of the last part of
719 * the encompassing lock
720 */
721 MALLOC(splitlock, struct lockf *, sizeof(*splitlock), M_LOCKF, M_WAITOK);
722 memcpy((caddr_t)splitlock, (caddr_t)lock1, sizeof(*splitlock));
723 splitlock->lf_start = lock2->lf_end + 1;
724 TAILQ_INIT(&splitlock->lf_blkhd);
725 lock1->lf_end = lock2->lf_start - 1;
726 /*
727 * OK, now link it in
728 */
729 splitlock->lf_next = lock1->lf_next;
730 lock2->lf_next = splitlock;
731 lock1->lf_next = lock2;
732 }
733
734 /*
735 * Wakeup a blocklist
736 */
737 static void
738 lf_wakelock(struct lockf *listhead)
739 {
740 struct lockf *wakelock;
741
742 while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) {
743 KASSERT(wakelock->lf_next == listhead);
744 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
745 wakelock->lf_next = NOLOCKF;
746 #ifdef LOCKF_DEBUG
747 if (lockf_debug & 2)
748 lf_print("lf_wakelock: awakening", wakelock);
749 #endif
750 wakeup((caddr_t)wakelock);
751 }
752 }
753
754 #ifdef LOCKF_DEBUG
755 /*
756 * Print out a lock.
757 */
758 static void
759 lf_print(char *tag, struct lockf *lock)
760 {
761
762 printf("%s: lock %p for ", tag, lock);
763 if (lock->lf_flags & F_POSIX)
764 printf("proc %d", ((struct proc *)lock->lf_id)->p_pid);
765 else
766 printf("file 0x%p", (struct file *)lock->lf_id);
767 printf(" %s, start %qx, end %qx",
768 lock->lf_type == F_RDLCK ? "shared" :
769 lock->lf_type == F_WRLCK ? "exclusive" :
770 lock->lf_type == F_UNLCK ? "unlock" :
771 "unknown", lock->lf_start, lock->lf_end);
772 if (TAILQ_FIRST(&lock->lf_blkhd))
773 printf(" block %p\n", TAILQ_FIRST(&lock->lf_blkhd));
774 else
775 printf("\n");
776 }
777
778 static void
779 lf_printlist(char *tag, struct lockf *lock)
780 {
781 struct lockf *lf, *blk;
782
783 printf("%s: Lock list:\n", tag);
784 for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
785 printf("\tlock %p for ", lf);
786 if (lf->lf_flags & F_POSIX)
787 printf("proc %d", ((struct proc *)lf->lf_id)->p_pid);
788 else
789 printf("file 0x%p", (struct file *)lf->lf_id);
790 printf(", %s, start %qx, end %qx",
791 lf->lf_type == F_RDLCK ? "shared" :
792 lf->lf_type == F_WRLCK ? "exclusive" :
793 lf->lf_type == F_UNLCK ? "unlock" :
794 "unknown", lf->lf_start, lf->lf_end);
795 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
796 if (blk->lf_flags & F_POSIX)
797 printf("proc %d",
798 ((struct proc *)blk->lf_id)->p_pid);
799 else
800 printf("file 0x%p", (struct file *)blk->lf_id);
801 printf(", %s, start %qx, end %qx",
802 blk->lf_type == F_RDLCK ? "shared" :
803 blk->lf_type == F_WRLCK ? "exclusive" :
804 blk->lf_type == F_UNLCK ? "unlock" :
805 "unknown", blk->lf_start, blk->lf_end);
806 if (TAILQ_FIRST(&blk->lf_blkhd))
807 panic("lf_printlist: bad list");
808 }
809 printf("\n");
810 }
811 }
812 #endif /* LOCKF_DEBUG */
813