vfs_lockf.c revision 1.26 1 /* $NetBSD: vfs_lockf.c,v 1.26 2003/05/01 13:14:49 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Scooter Morris at Genentech Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.26 2003/05/01 13:14:49 yamt Exp $");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/file.h>
48 #include <sys/proc.h>
49 #include <sys/vnode.h>
50 #include <sys/malloc.h>
51 #include <sys/fcntl.h>
52 #include <sys/lockf.h>
53
54 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
55
56 /*
57 * This variable controls the maximum number of processes that will
58 * be checked in doing deadlock detection.
59 */
60 int maxlockdepth = MAXDEPTH;
61
62 #ifdef LOCKF_DEBUG
63 int lockf_debug = 0;
64 #endif
65
66 #define NOLOCKF (struct lockf *)0
67 #define SELF 0x1
68 #define OTHERS 0x2
69
70 static int lf_clearlock(struct lockf *);
71 static int lf_findoverlap(struct lockf *,
72 struct lockf *, int, struct lockf ***, struct lockf **);
73 static struct lockf *lf_getblock(struct lockf *);
74 static int lf_getlock(struct lockf *, struct flock *);
75 static int lf_setlock(struct lockf *);
76 static void lf_split(struct lockf *, struct lockf *);
77 static void lf_wakelock(struct lockf *);
78
79 #ifdef LOCKF_DEBUG
80 static void lf_print(char *, struct lockf *);
81 static void lf_printlist(char *, struct lockf *);
82 #endif
83
84 /*
85 * XXX TODO
86 * Misc cleanups: "caddr_t id" should be visible in the API as a
87 * "struct proc *".
88 * (This requires rototilling all VFS's which support advisory locking).
89 *
90 * Use pools for lock allocation.
91 */
92
93 /*
94 * XXXSMP TODO: Using the vnode's interlock should be sufficient.
95 *
96 * If there's a lot of lock contention on a single vnode, locking
97 * schemes which allow for more paralleism would be needed. Given how
98 * infrequently byte-range locks are actually used in typical BSD
99 * code, a more complex approach probably isn't worth it.
100 */
101
102 /*
103 * Do an advisory lock operation.
104 */
105 int
106 lf_advlock(struct vop_advlock_args *ap, struct lockf **head, off_t size)
107 {
108 struct flock *fl = ap->a_fl;
109 struct lockf *lock;
110 off_t start, end;
111 int error;
112
113 /*
114 * Convert the flock structure into a start and end.
115 */
116 switch (fl->l_whence) {
117 case SEEK_SET:
118 case SEEK_CUR:
119 /*
120 * Caller is responsible for adding any necessary offset
121 * when SEEK_CUR is used.
122 */
123 start = fl->l_start;
124 break;
125
126 case SEEK_END:
127 start = size + fl->l_start;
128 break;
129
130 default:
131 return (EINVAL);
132 }
133 if (start < 0)
134 return (EINVAL);
135
136 /*
137 * Avoid the common case of unlocking when inode has no locks.
138 */
139 if (*head == (struct lockf *)0) {
140 if (ap->a_op != F_SETLK) {
141 fl->l_type = F_UNLCK;
142 return (0);
143 }
144 }
145
146 if (fl->l_len == 0)
147 end = -1;
148 else
149 end = start + fl->l_len - 1;
150 /*
151 * Create the lockf structure.
152 */
153 MALLOC(lock, struct lockf *, sizeof(*lock), M_LOCKF, M_WAITOK);
154 lock->lf_start = start;
155 lock->lf_end = end;
156 /* XXX NJWLWP
157 * I don't want to make the entire VFS universe use LWPs, because
158 * they don't need them, for the most part. This is an exception,
159 * and a kluge.
160 */
161
162 lock->lf_head = head;
163 lock->lf_type = fl->l_type;
164 lock->lf_next = (struct lockf *)0;
165 TAILQ_INIT(&lock->lf_blkhd);
166 lock->lf_flags = ap->a_flags;
167 if (lock->lf_flags & F_POSIX) {
168 KASSERT(curproc == (struct proc *)ap->a_id);
169 }
170 lock->lf_id = (struct proc *)ap->a_id;
171 lock->lf_lwp = curlwp;
172
173 /*
174 * Do the requested operation.
175 */
176 switch (ap->a_op) {
177
178 case F_SETLK:
179 return (lf_setlock(lock));
180
181 case F_UNLCK:
182 error = lf_clearlock(lock);
183 FREE(lock, M_LOCKF);
184 return (error);
185
186 case F_GETLK:
187 error = lf_getlock(lock, fl);
188 FREE(lock, M_LOCKF);
189 return (error);
190
191 default:
192 FREE(lock, M_LOCKF);
193 return (EINVAL);
194 }
195 /* NOTREACHED */
196 }
197
198 /*
199 * Set a byte-range lock.
200 */
201 static int
202 lf_setlock(struct lockf *lock)
203 {
204 struct lockf *block;
205 struct lockf **head = lock->lf_head;
206 struct lockf **prev, *overlap, *ltmp;
207 static char lockstr[] = "lockf";
208 int ovcase, priority, needtolink, error;
209
210 #ifdef LOCKF_DEBUG
211 if (lockf_debug & 1)
212 lf_print("lf_setlock", lock);
213 #endif /* LOCKF_DEBUG */
214
215 /*
216 * Set the priority
217 */
218 priority = PLOCK;
219 if (lock->lf_type == F_WRLCK)
220 priority += 4;
221 priority |= PCATCH;
222 /*
223 * Scan lock list for this file looking for locks that would block us.
224 */
225 while ((block = lf_getblock(lock)) != NULL) {
226 /*
227 * Free the structure and return if nonblocking.
228 */
229 if ((lock->lf_flags & F_WAIT) == 0) {
230 FREE(lock, M_LOCKF);
231 return (EAGAIN);
232 }
233 /*
234 * We are blocked. Since flock style locks cover
235 * the whole file, there is no chance for deadlock.
236 * For byte-range locks we must check for deadlock.
237 *
238 * Deadlock detection is done by looking through the
239 * wait channels to see if there are any cycles that
240 * involve us. MAXDEPTH is set just to make sure we
241 * do not go off into neverneverland.
242 */
243 if ((lock->lf_flags & F_POSIX) &&
244 (block->lf_flags & F_POSIX)) {
245 struct lwp *wlwp;
246 struct lockf *waitblock;
247 int i = 0;
248
249 /*
250 * The block is waiting on something. if_lwp will be
251 * 0 once the lock is granted, so we terminate the
252 * loop if we find this.
253 */
254 wlwp = block->lf_lwp;
255 while (wlwp && (i++ < maxlockdepth)) {
256 waitblock = (struct lockf *)wlwp->l_wchan;
257 /* Get the owner of the blocking lock */
258 waitblock = waitblock->lf_next;
259 if ((waitblock->lf_flags & F_POSIX) == 0)
260 break;
261 wlwp = waitblock->lf_lwp;
262 if (wlwp == lock->lf_lwp) {
263 free(lock, M_LOCKF);
264 return (EDEADLK);
265 }
266 }
267 /*
268 * If we're still following a dependancy chain
269 * after maxlockdepth iterations, assume we're in
270 * a cycle to be safe.
271 */
272 if (i >= maxlockdepth) {
273 free(lock, M_LOCKF);
274 return (EDEADLK);
275 }
276 }
277 /*
278 * For flock type locks, we must first remove
279 * any shared locks that we hold before we sleep
280 * waiting for an exclusive lock.
281 */
282 if ((lock->lf_flags & F_FLOCK) &&
283 lock->lf_type == F_WRLCK) {
284 lock->lf_type = F_UNLCK;
285 (void) lf_clearlock(lock);
286 lock->lf_type = F_WRLCK;
287 }
288 /*
289 * Add our lock to the blocked list and sleep until we're free.
290 * Remember who blocked us (for deadlock detection).
291 */
292 lock->lf_next = block;
293 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
294 #ifdef LOCKF_DEBUG
295 if (lockf_debug & 1) {
296 lf_print("lf_setlock: blocking on", block);
297 lf_printlist("lf_setlock", block);
298 }
299 #endif /* LOCKF_DEBUG */
300 error = tsleep((caddr_t)lock, priority, lockstr, 0);
301
302 /*
303 * We may have been awakened by a signal (in
304 * which case we must remove ourselves from the
305 * blocked list) and/or by another process
306 * releasing a lock (in which case we have already
307 * been removed from the blocked list and our
308 * lf_next field set to NOLOCKF).
309 */
310 if (lock->lf_next != NOLOCKF) {
311 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
312 lock->lf_next = NOLOCKF;
313 }
314 if (error) {
315 free(lock, M_LOCKF);
316 return (error);
317 }
318 }
319 /*
320 * No blocks!! Add the lock. Note that we will
321 * downgrade or upgrade any overlapping locks this
322 * process already owns.
323 *
324 * Skip over locks owned by other processes.
325 * Handle any locks that overlap and are owned by ourselves.
326 */
327 lock->lf_lwp = 0;
328 prev = head;
329 block = *head;
330 needtolink = 1;
331 for (;;) {
332 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
333 if (ovcase)
334 block = overlap->lf_next;
335 /*
336 * Six cases:
337 * 0) no overlap
338 * 1) overlap == lock
339 * 2) overlap contains lock
340 * 3) lock contains overlap
341 * 4) overlap starts before lock
342 * 5) overlap ends after lock
343 */
344 switch (ovcase) {
345 case 0: /* no overlap */
346 if (needtolink) {
347 *prev = lock;
348 lock->lf_next = overlap;
349 }
350 break;
351
352 case 1: /* overlap == lock */
353 /*
354 * If downgrading lock, others may be
355 * able to acquire it.
356 */
357 if (lock->lf_type == F_RDLCK &&
358 overlap->lf_type == F_WRLCK)
359 lf_wakelock(overlap);
360 overlap->lf_type = lock->lf_type;
361 FREE(lock, M_LOCKF);
362 lock = overlap; /* for debug output below */
363 break;
364
365 case 2: /* overlap contains lock */
366 /*
367 * Check for common starting point and different types.
368 */
369 if (overlap->lf_type == lock->lf_type) {
370 free(lock, M_LOCKF);
371 lock = overlap; /* for debug output below */
372 break;
373 }
374 if (overlap->lf_start == lock->lf_start) {
375 *prev = lock;
376 lock->lf_next = overlap;
377 overlap->lf_start = lock->lf_end + 1;
378 } else
379 lf_split(overlap, lock);
380 lf_wakelock(overlap);
381 break;
382
383 case 3: /* lock contains overlap */
384 /*
385 * If downgrading lock, others may be able to
386 * acquire it, otherwise take the list.
387 */
388 if (lock->lf_type == F_RDLCK &&
389 overlap->lf_type == F_WRLCK) {
390 lf_wakelock(overlap);
391 } else {
392 while ((ltmp = TAILQ_FIRST(&overlap->lf_blkhd))) {
393 KASSERT(ltmp->lf_next == overlap);
394 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
395 lf_block);
396 ltmp->lf_next = lock;
397 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
398 ltmp, lf_block);
399 }
400 }
401 /*
402 * Add the new lock if necessary and delete the overlap.
403 */
404 if (needtolink) {
405 *prev = lock;
406 lock->lf_next = overlap->lf_next;
407 prev = &lock->lf_next;
408 needtolink = 0;
409 } else
410 *prev = overlap->lf_next;
411 free(overlap, M_LOCKF);
412 continue;
413
414 case 4: /* overlap starts before lock */
415 /*
416 * Add lock after overlap on the list.
417 */
418 lock->lf_next = overlap->lf_next;
419 overlap->lf_next = lock;
420 overlap->lf_end = lock->lf_start - 1;
421 prev = &lock->lf_next;
422 lf_wakelock(overlap);
423 needtolink = 0;
424 continue;
425
426 case 5: /* overlap ends after lock */
427 /*
428 * Add the new lock before overlap.
429 */
430 if (needtolink) {
431 *prev = lock;
432 lock->lf_next = overlap;
433 }
434 overlap->lf_start = lock->lf_end + 1;
435 lf_wakelock(overlap);
436 break;
437 }
438 break;
439 }
440 #ifdef LOCKF_DEBUG
441 if (lockf_debug & 1) {
442 lf_print("lf_setlock: got the lock", lock);
443 lf_printlist("lf_setlock", lock);
444 }
445 #endif /* LOCKF_DEBUG */
446 return (0);
447 }
448
449 /*
450 * Remove a byte-range lock on an inode.
451 *
452 * Generally, find the lock (or an overlap to that lock)
453 * and remove it (or shrink it), then wakeup anyone we can.
454 */
455 static int
456 lf_clearlock(struct lockf *unlock)
457 {
458 struct lockf **head = unlock->lf_head;
459 struct lockf *lf = *head;
460 struct lockf *overlap, **prev;
461 int ovcase;
462
463 if (lf == NOLOCKF)
464 return (0);
465 #ifdef LOCKF_DEBUG
466 if (unlock->lf_type != F_UNLCK)
467 panic("lf_clearlock: bad type");
468 if (lockf_debug & 1)
469 lf_print("lf_clearlock", unlock);
470 #endif /* LOCKF_DEBUG */
471 prev = head;
472 while ((ovcase = lf_findoverlap(lf, unlock, SELF,
473 &prev, &overlap)) != 0) {
474 /*
475 * Wakeup the list of locks to be retried.
476 */
477 lf_wakelock(overlap);
478
479 switch (ovcase) {
480
481 case 1: /* overlap == lock */
482 *prev = overlap->lf_next;
483 FREE(overlap, M_LOCKF);
484 break;
485
486 case 2: /* overlap contains lock: split it */
487 if (overlap->lf_start == unlock->lf_start) {
488 overlap->lf_start = unlock->lf_end + 1;
489 break;
490 }
491 lf_split(overlap, unlock);
492 overlap->lf_next = unlock->lf_next;
493 break;
494
495 case 3: /* lock contains overlap */
496 *prev = overlap->lf_next;
497 lf = overlap->lf_next;
498 free(overlap, M_LOCKF);
499 continue;
500
501 case 4: /* overlap starts before lock */
502 overlap->lf_end = unlock->lf_start - 1;
503 prev = &overlap->lf_next;
504 lf = overlap->lf_next;
505 continue;
506
507 case 5: /* overlap ends after lock */
508 overlap->lf_start = unlock->lf_end + 1;
509 break;
510 }
511 break;
512 }
513 #ifdef LOCKF_DEBUG
514 if (lockf_debug & 1)
515 lf_printlist("lf_clearlock", unlock);
516 #endif /* LOCKF_DEBUG */
517 return (0);
518 }
519
520 /*
521 * Check whether there is a blocking lock,
522 * and if so return its process identifier.
523 */
524 static int
525 lf_getlock(struct lockf *lock, struct flock *fl)
526 {
527 struct lockf *block;
528
529 #ifdef LOCKF_DEBUG
530 if (lockf_debug & 1)
531 lf_print("lf_getlock", lock);
532 #endif /* LOCKF_DEBUG */
533
534 if ((block = lf_getblock(lock)) != NULL) {
535 fl->l_type = block->lf_type;
536 fl->l_whence = SEEK_SET;
537 fl->l_start = block->lf_start;
538 if (block->lf_end == -1)
539 fl->l_len = 0;
540 else
541 fl->l_len = block->lf_end - block->lf_start + 1;
542 if (block->lf_flags & F_POSIX)
543 fl->l_pid = ((struct proc *)block->lf_id)->p_pid;
544 else
545 fl->l_pid = -1;
546 } else {
547 fl->l_type = F_UNLCK;
548 }
549 return (0);
550 }
551
552 /*
553 * Walk the list of locks for an inode and
554 * return the first blocking lock.
555 */
556 static struct lockf *
557 lf_getblock(struct lockf *lock)
558 {
559 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
560
561 prev = lock->lf_head;
562 while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
563 /*
564 * We've found an overlap, see if it blocks us
565 */
566 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
567 return (overlap);
568 /*
569 * Nope, point to the next one on the list and
570 * see if it blocks us
571 */
572 lf = overlap->lf_next;
573 }
574 return (NOLOCKF);
575 }
576
577 /*
578 * Walk the list of locks for an inode to
579 * find an overlapping lock (if any).
580 *
581 * NOTE: this returns only the FIRST overlapping lock. There
582 * may be more than one.
583 */
584 static int
585 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
586 struct lockf ***prev, struct lockf **overlap)
587 {
588 off_t start, end;
589
590 *overlap = lf;
591 if (lf == NOLOCKF)
592 return (0);
593 #ifdef LOCKF_DEBUG
594 if (lockf_debug & 2)
595 lf_print("lf_findoverlap: looking for overlap in", lock);
596 #endif /* LOCKF_DEBUG */
597 start = lock->lf_start;
598 end = lock->lf_end;
599 while (lf != NOLOCKF) {
600 if (((type == SELF) && lf->lf_id != lock->lf_id) ||
601 ((type == OTHERS) && lf->lf_id == lock->lf_id)) {
602 *prev = &lf->lf_next;
603 *overlap = lf = lf->lf_next;
604 continue;
605 }
606 #ifdef LOCKF_DEBUG
607 if (lockf_debug & 2)
608 lf_print("\tchecking", lf);
609 #endif /* LOCKF_DEBUG */
610 /*
611 * OK, check for overlap
612 *
613 * Six cases:
614 * 0) no overlap
615 * 1) overlap == lock
616 * 2) overlap contains lock
617 * 3) lock contains overlap
618 * 4) overlap starts before lock
619 * 5) overlap ends after lock
620 */
621 if ((lf->lf_end != -1 && start > lf->lf_end) ||
622 (end != -1 && lf->lf_start > end)) {
623 /* Case 0 */
624 #ifdef LOCKF_DEBUG
625 if (lockf_debug & 2)
626 printf("no overlap\n");
627 #endif /* LOCKF_DEBUG */
628 if ((type & SELF) && end != -1 && lf->lf_start > end)
629 return (0);
630 *prev = &lf->lf_next;
631 *overlap = lf = lf->lf_next;
632 continue;
633 }
634 if ((lf->lf_start == start) && (lf->lf_end == end)) {
635 /* Case 1 */
636 #ifdef LOCKF_DEBUG
637 if (lockf_debug & 2)
638 printf("overlap == lock\n");
639 #endif /* LOCKF_DEBUG */
640 return (1);
641 }
642 if ((lf->lf_start <= start) &&
643 (end != -1) &&
644 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
645 /* Case 2 */
646 #ifdef LOCKF_DEBUG
647 if (lockf_debug & 2)
648 printf("overlap contains lock\n");
649 #endif /* LOCKF_DEBUG */
650 return (2);
651 }
652 if (start <= lf->lf_start &&
653 (end == -1 ||
654 (lf->lf_end != -1 && end >= lf->lf_end))) {
655 /* Case 3 */
656 #ifdef LOCKF_DEBUG
657 if (lockf_debug & 2)
658 printf("lock contains overlap\n");
659 #endif /* LOCKF_DEBUG */
660 return (3);
661 }
662 if ((lf->lf_start < start) &&
663 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
664 /* Case 4 */
665 #ifdef LOCKF_DEBUG
666 if (lockf_debug & 2)
667 printf("overlap starts before lock\n");
668 #endif /* LOCKF_DEBUG */
669 return (4);
670 }
671 if ((lf->lf_start > start) &&
672 (end != -1) &&
673 ((lf->lf_end > end) || (lf->lf_end == -1))) {
674 /* Case 5 */
675 #ifdef LOCKF_DEBUG
676 if (lockf_debug & 2)
677 printf("overlap ends after lock\n");
678 #endif /* LOCKF_DEBUG */
679 return (5);
680 }
681 panic("lf_findoverlap: default");
682 }
683 return (0);
684 }
685
686 /*
687 * Split a lock and a contained region into
688 * two or three locks as necessary.
689 */
690 static void
691 lf_split(struct lockf *lock1, struct lockf *lock2)
692 {
693 struct lockf *splitlock;
694
695 #ifdef LOCKF_DEBUG
696 if (lockf_debug & 2) {
697 lf_print("lf_split", lock1);
698 lf_print("splitting from", lock2);
699 }
700 #endif /* LOCKF_DEBUG */
701 /*
702 * Check to see if spliting into only two pieces.
703 */
704 if (lock1->lf_start == lock2->lf_start) {
705 lock1->lf_start = lock2->lf_end + 1;
706 lock2->lf_next = lock1;
707 return;
708 }
709 if (lock1->lf_end == lock2->lf_end) {
710 lock1->lf_end = lock2->lf_start - 1;
711 lock2->lf_next = lock1->lf_next;
712 lock1->lf_next = lock2;
713 return;
714 }
715 /*
716 * Make a new lock consisting of the last part of
717 * the encompassing lock
718 */
719 MALLOC(splitlock, struct lockf *, sizeof(*splitlock), M_LOCKF, M_WAITOK);
720 memcpy((caddr_t)splitlock, (caddr_t)lock1, sizeof(*splitlock));
721 splitlock->lf_start = lock2->lf_end + 1;
722 TAILQ_INIT(&splitlock->lf_blkhd);
723 lock1->lf_end = lock2->lf_start - 1;
724 /*
725 * OK, now link it in
726 */
727 splitlock->lf_next = lock1->lf_next;
728 lock2->lf_next = splitlock;
729 lock1->lf_next = lock2;
730 }
731
732 /*
733 * Wakeup a blocklist
734 */
735 static void
736 lf_wakelock(struct lockf *listhead)
737 {
738 struct lockf *wakelock;
739
740 while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) {
741 KASSERT(wakelock->lf_next == listhead);
742 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
743 wakelock->lf_next = NOLOCKF;
744 #ifdef LOCKF_DEBUG
745 if (lockf_debug & 2)
746 lf_print("lf_wakelock: awakening", wakelock);
747 #endif
748 wakeup((caddr_t)wakelock);
749 }
750 }
751
752 #ifdef LOCKF_DEBUG
753 /*
754 * Print out a lock.
755 */
756 static void
757 lf_print(char *tag, struct lockf *lock)
758 {
759
760 printf("%s: lock %p for ", tag, lock);
761 if (lock->lf_flags & F_POSIX)
762 printf("proc %d", ((struct proc *)lock->lf_id)->p_pid);
763 else
764 printf("file 0x%p", (struct file *)lock->lf_id);
765 printf(" %s, start %qx, end %qx",
766 lock->lf_type == F_RDLCK ? "shared" :
767 lock->lf_type == F_WRLCK ? "exclusive" :
768 lock->lf_type == F_UNLCK ? "unlock" :
769 "unknown", lock->lf_start, lock->lf_end);
770 if (TAILQ_FIRST(&lock->lf_blkhd))
771 printf(" block %p\n", TAILQ_FIRST(&lock->lf_blkhd));
772 else
773 printf("\n");
774 }
775
776 static void
777 lf_printlist(char *tag, struct lockf *lock)
778 {
779 struct lockf *lf, *blk;
780
781 printf("%s: Lock list:\n", tag);
782 for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
783 printf("\tlock %p for ", lf);
784 if (lf->lf_flags & F_POSIX)
785 printf("proc %d", ((struct proc *)lf->lf_id)->p_pid);
786 else
787 printf("file 0x%p", (struct file *)lf->lf_id);
788 printf(", %s, start %qx, end %qx",
789 lf->lf_type == F_RDLCK ? "shared" :
790 lf->lf_type == F_WRLCK ? "exclusive" :
791 lf->lf_type == F_UNLCK ? "unlock" :
792 "unknown", lf->lf_start, lf->lf_end);
793 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
794 if (blk->lf_flags & F_POSIX)
795 printf("proc %d",
796 ((struct proc *)blk->lf_id)->p_pid);
797 else
798 printf("file 0x%p", (struct file *)blk->lf_id);
799 printf(", %s, start %qx, end %qx",
800 blk->lf_type == F_RDLCK ? "shared" :
801 blk->lf_type == F_WRLCK ? "exclusive" :
802 blk->lf_type == F_UNLCK ? "unlock" :
803 "unknown", blk->lf_start, blk->lf_end);
804 if (TAILQ_FIRST(&blk->lf_blkhd))
805 panic("lf_printlist: bad list");
806 }
807 printf("\n");
808 }
809 }
810 #endif /* LOCKF_DEBUG */
811