vfs_lockf.c revision 1.24 1 /* $NetBSD: vfs_lockf.c,v 1.24 2003/05/01 12:49:17 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Scooter Morris at Genentech Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.24 2003/05/01 12:49:17 yamt Exp $");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/file.h>
48 #include <sys/proc.h>
49 #include <sys/vnode.h>
50 #include <sys/malloc.h>
51 #include <sys/fcntl.h>
52 #include <sys/lockf.h>
53
54 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
55
56 /*
57 * This variable controls the maximum number of processes that will
58 * be checked in doing deadlock detection.
59 */
60 int maxlockdepth = MAXDEPTH;
61
62 #ifdef LOCKF_DEBUG
63 int lockf_debug = 0;
64 #endif
65
66 #define NOLOCKF (struct lockf *)0
67 #define SELF 0x1
68 #define OTHERS 0x2
69
70 static int lf_clearlock __P((struct lockf *));
71 static int lf_findoverlap __P((struct lockf *,
72 struct lockf *, int, struct lockf ***, struct lockf **));
73 static struct lockf * lf_getblock __P((struct lockf *));
74 static int lf_getlock __P((struct lockf *, struct flock *));
75 static int lf_setlock __P((struct lockf *));
76 static void lf_split __P((struct lockf *, struct lockf *));
77 static void lf_wakelock __P((struct lockf *));
78
79 #ifdef LOCKF_DEBUG
80 static void lf_print __P((char *, struct lockf *));
81 static void lf_printlist __P((char *, struct lockf *));
82 #endif
83
84 /*
85 * XXX TODO
86 * Misc cleanups: "caddr_t id" should be visible in the API as a
87 * "struct proc *".
88 * (This requires rototilling all VFS's which support advisory locking).
89 *
90 * Use pools for lock allocation.
91 */
92
93 /*
94 * XXXSMP TODO: Using either (a) a global lock, or (b) the vnode's
95 * interlock should be sufficient; (b) requires a change to the API
96 * because the vnode isn't visible here.
97 *
98 * If there's a lot of lock contention on a single vnode, locking
99 * schemes which allow for more paralleism would be needed. Given how
100 * infrequently byte-range locks are actually used in typical BSD
101 * code, a more complex approach probably isn't worth it.
102 */
103
104 /*
105 * Do an advisory lock operation.
106 */
107 int
108 lf_advlock(ap, head, size)
109 struct vop_advlock_args *ap;
110 struct lockf **head;
111 off_t size;
112 {
113 struct flock *fl = ap->a_fl;
114 struct lockf *lock;
115 off_t start, end;
116 int error;
117
118 /*
119 * Convert the flock structure into a start and end.
120 */
121 switch (fl->l_whence) {
122 case SEEK_SET:
123 case SEEK_CUR:
124 /*
125 * Caller is responsible for adding any necessary offset
126 * when SEEK_CUR is used.
127 */
128 start = fl->l_start;
129 break;
130
131 case SEEK_END:
132 start = size + fl->l_start;
133 break;
134
135 default:
136 return (EINVAL);
137 }
138 if (start < 0)
139 return (EINVAL);
140
141 /*
142 * Avoid the common case of unlocking when inode has no locks.
143 */
144 if (*head == (struct lockf *)0) {
145 if (ap->a_op != F_SETLK) {
146 fl->l_type = F_UNLCK;
147 return (0);
148 }
149 }
150
151 if (fl->l_len == 0)
152 end = -1;
153 else
154 end = start + fl->l_len - 1;
155 /*
156 * Create the lockf structure.
157 */
158 MALLOC(lock, struct lockf *, sizeof(*lock), M_LOCKF, M_WAITOK);
159 lock->lf_start = start;
160 lock->lf_end = end;
161 /* XXX NJWLWP
162 * I don't want to make the entire VFS universe use LWPs, because
163 * they don't need them, for the most part. This is an exception,
164 * and a kluge.
165 */
166
167 lock->lf_head = head;
168 lock->lf_type = fl->l_type;
169 lock->lf_next = (struct lockf *)0;
170 TAILQ_INIT(&lock->lf_blkhd);
171 lock->lf_flags = ap->a_flags;
172 if (lock->lf_flags & F_POSIX) {
173 KASSERT(curproc == (struct proc *)ap->a_id);
174 }
175 lock->lf_id = (struct proc *)ap->a_id;
176 lock->lf_lwp = curlwp;
177
178 /*
179 * Do the requested operation.
180 */
181 switch (ap->a_op) {
182
183 case F_SETLK:
184 return (lf_setlock(lock));
185
186 case F_UNLCK:
187 error = lf_clearlock(lock);
188 FREE(lock, M_LOCKF);
189 return (error);
190
191 case F_GETLK:
192 error = lf_getlock(lock, fl);
193 FREE(lock, M_LOCKF);
194 return (error);
195
196 default:
197 FREE(lock, M_LOCKF);
198 return (EINVAL);
199 }
200 /* NOTREACHED */
201 }
202
203 /*
204 * Set a byte-range lock.
205 */
206 static int
207 lf_setlock(lock)
208 struct lockf *lock;
209 {
210 struct lockf *block;
211 struct lockf **head = lock->lf_head;
212 struct lockf **prev, *overlap, *ltmp;
213 static char lockstr[] = "lockf";
214 int ovcase, priority, needtolink, error;
215
216 #ifdef LOCKF_DEBUG
217 if (lockf_debug & 1)
218 lf_print("lf_setlock", lock);
219 #endif /* LOCKF_DEBUG */
220
221 /*
222 * Set the priority
223 */
224 priority = PLOCK;
225 if (lock->lf_type == F_WRLCK)
226 priority += 4;
227 priority |= PCATCH;
228 /*
229 * Scan lock list for this file looking for locks that would block us.
230 */
231 while ((block = lf_getblock(lock)) != NULL) {
232 /*
233 * Free the structure and return if nonblocking.
234 */
235 if ((lock->lf_flags & F_WAIT) == 0) {
236 FREE(lock, M_LOCKF);
237 return (EAGAIN);
238 }
239 /*
240 * We are blocked. Since flock style locks cover
241 * the whole file, there is no chance for deadlock.
242 * For byte-range locks we must check for deadlock.
243 *
244 * Deadlock detection is done by looking through the
245 * wait channels to see if there are any cycles that
246 * involve us. MAXDEPTH is set just to make sure we
247 * do not go off into neverneverland.
248 */
249 if ((lock->lf_flags & F_POSIX) &&
250 (block->lf_flags & F_POSIX)) {
251 struct lwp *wlwp;
252 struct lockf *waitblock;
253 int i = 0;
254
255 /*
256 * The block is waiting on something. if_lwp will be
257 * 0 once the lock is granted, so we terminate the
258 * loop if we find this.
259 */
260 wlwp = block->lf_lwp;
261 while (wlwp && (i++ < maxlockdepth)) {
262 waitblock = (struct lockf *)wlwp->l_wchan;
263 /* Get the owner of the blocking lock */
264 waitblock = waitblock->lf_next;
265 if ((waitblock->lf_flags & F_POSIX) == 0)
266 break;
267 wlwp = waitblock->lf_lwp;
268 if (wlwp == lock->lf_lwp) {
269 free(lock, M_LOCKF);
270 return (EDEADLK);
271 }
272 }
273 /*
274 * If we're still following a dependancy chain
275 * after maxlockdepth iterations, assume we're in
276 * a cycle to be safe.
277 */
278 if (i >= maxlockdepth) {
279 free(lock, M_LOCKF);
280 return (EDEADLK);
281 }
282 }
283 /*
284 * For flock type locks, we must first remove
285 * any shared locks that we hold before we sleep
286 * waiting for an exclusive lock.
287 */
288 if ((lock->lf_flags & F_FLOCK) &&
289 lock->lf_type == F_WRLCK) {
290 lock->lf_type = F_UNLCK;
291 (void) lf_clearlock(lock);
292 lock->lf_type = F_WRLCK;
293 }
294 /*
295 * Add our lock to the blocked list and sleep until we're free.
296 * Remember who blocked us (for deadlock detection).
297 */
298 lock->lf_next = block;
299 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
300 #ifdef LOCKF_DEBUG
301 if (lockf_debug & 1) {
302 lf_print("lf_setlock: blocking on", block);
303 lf_printlist("lf_setlock", block);
304 }
305 #endif /* LOCKF_DEBUG */
306 error = tsleep((caddr_t)lock, priority, lockstr, 0);
307
308 /*
309 * We may have been awakened by a signal (in
310 * which case we must remove ourselves from the
311 * blocked list) and/or by another process
312 * releasing a lock (in which case we have already
313 * been removed from the blocked list and our
314 * lf_next field set to NOLOCKF).
315 */
316 if (lock->lf_next != NOLOCKF) {
317 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
318 lock->lf_next = NOLOCKF;
319 }
320 if (error) {
321 free(lock, M_LOCKF);
322 return (error);
323 }
324 }
325 /*
326 * No blocks!! Add the lock. Note that we will
327 * downgrade or upgrade any overlapping locks this
328 * process already owns.
329 *
330 * Skip over locks owned by other processes.
331 * Handle any locks that overlap and are owned by ourselves.
332 */
333 lock->lf_lwp = 0;
334 prev = head;
335 block = *head;
336 needtolink = 1;
337 for (;;) {
338 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
339 if (ovcase)
340 block = overlap->lf_next;
341 /*
342 * Six cases:
343 * 0) no overlap
344 * 1) overlap == lock
345 * 2) overlap contains lock
346 * 3) lock contains overlap
347 * 4) overlap starts before lock
348 * 5) overlap ends after lock
349 */
350 switch (ovcase) {
351 case 0: /* no overlap */
352 if (needtolink) {
353 *prev = lock;
354 lock->lf_next = overlap;
355 }
356 break;
357
358 case 1: /* overlap == lock */
359 /*
360 * If downgrading lock, others may be
361 * able to acquire it.
362 */
363 if (lock->lf_type == F_RDLCK &&
364 overlap->lf_type == F_WRLCK)
365 lf_wakelock(overlap);
366 overlap->lf_type = lock->lf_type;
367 FREE(lock, M_LOCKF);
368 lock = overlap; /* for debug output below */
369 break;
370
371 case 2: /* overlap contains lock */
372 /*
373 * Check for common starting point and different types.
374 */
375 if (overlap->lf_type == lock->lf_type) {
376 free(lock, M_LOCKF);
377 lock = overlap; /* for debug output below */
378 break;
379 }
380 if (overlap->lf_start == lock->lf_start) {
381 *prev = lock;
382 lock->lf_next = overlap;
383 overlap->lf_start = lock->lf_end + 1;
384 } else
385 lf_split(overlap, lock);
386 lf_wakelock(overlap);
387 break;
388
389 case 3: /* lock contains overlap */
390 /*
391 * If downgrading lock, others may be able to
392 * acquire it, otherwise take the list.
393 */
394 if (lock->lf_type == F_RDLCK &&
395 overlap->lf_type == F_WRLCK) {
396 lf_wakelock(overlap);
397 } else {
398 while ((ltmp = TAILQ_FIRST(&overlap->lf_blkhd))) {
399 KASSERT(ltmp->lf_next == overlap);
400 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
401 lf_block);
402 ltmp->lf_next = lock;
403 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
404 ltmp, lf_block);
405 }
406 }
407 /*
408 * Add the new lock if necessary and delete the overlap.
409 */
410 if (needtolink) {
411 *prev = lock;
412 lock->lf_next = overlap->lf_next;
413 prev = &lock->lf_next;
414 needtolink = 0;
415 } else
416 *prev = overlap->lf_next;
417 free(overlap, M_LOCKF);
418 continue;
419
420 case 4: /* overlap starts before lock */
421 /*
422 * Add lock after overlap on the list.
423 */
424 lock->lf_next = overlap->lf_next;
425 overlap->lf_next = lock;
426 overlap->lf_end = lock->lf_start - 1;
427 prev = &lock->lf_next;
428 lf_wakelock(overlap);
429 needtolink = 0;
430 continue;
431
432 case 5: /* overlap ends after lock */
433 /*
434 * Add the new lock before overlap.
435 */
436 if (needtolink) {
437 *prev = lock;
438 lock->lf_next = overlap;
439 }
440 overlap->lf_start = lock->lf_end + 1;
441 lf_wakelock(overlap);
442 break;
443 }
444 break;
445 }
446 #ifdef LOCKF_DEBUG
447 if (lockf_debug & 1) {
448 lf_print("lf_setlock: got the lock", lock);
449 lf_printlist("lf_setlock", lock);
450 }
451 #endif /* LOCKF_DEBUG */
452 return (0);
453 }
454
455 /*
456 * Remove a byte-range lock on an inode.
457 *
458 * Generally, find the lock (or an overlap to that lock)
459 * and remove it (or shrink it), then wakeup anyone we can.
460 */
461 static int
462 lf_clearlock(unlock)
463 struct lockf *unlock;
464 {
465 struct lockf **head = unlock->lf_head;
466 struct lockf *lf = *head;
467 struct lockf *overlap, **prev;
468 int ovcase;
469
470 if (lf == NOLOCKF)
471 return (0);
472 #ifdef LOCKF_DEBUG
473 if (unlock->lf_type != F_UNLCK)
474 panic("lf_clearlock: bad type");
475 if (lockf_debug & 1)
476 lf_print("lf_clearlock", unlock);
477 #endif /* LOCKF_DEBUG */
478 prev = head;
479 while ((ovcase = lf_findoverlap(lf, unlock, SELF,
480 &prev, &overlap)) != 0) {
481 /*
482 * Wakeup the list of locks to be retried.
483 */
484 lf_wakelock(overlap);
485
486 switch (ovcase) {
487
488 case 1: /* overlap == lock */
489 *prev = overlap->lf_next;
490 FREE(overlap, M_LOCKF);
491 break;
492
493 case 2: /* overlap contains lock: split it */
494 if (overlap->lf_start == unlock->lf_start) {
495 overlap->lf_start = unlock->lf_end + 1;
496 break;
497 }
498 lf_split(overlap, unlock);
499 overlap->lf_next = unlock->lf_next;
500 break;
501
502 case 3: /* lock contains overlap */
503 *prev = overlap->lf_next;
504 lf = overlap->lf_next;
505 free(overlap, M_LOCKF);
506 continue;
507
508 case 4: /* overlap starts before lock */
509 overlap->lf_end = unlock->lf_start - 1;
510 prev = &overlap->lf_next;
511 lf = overlap->lf_next;
512 continue;
513
514 case 5: /* overlap ends after lock */
515 overlap->lf_start = unlock->lf_end + 1;
516 break;
517 }
518 break;
519 }
520 #ifdef LOCKF_DEBUG
521 if (lockf_debug & 1)
522 lf_printlist("lf_clearlock", unlock);
523 #endif /* LOCKF_DEBUG */
524 return (0);
525 }
526
527 /*
528 * Check whether there is a blocking lock,
529 * and if so return its process identifier.
530 */
531 static int
532 lf_getlock(lock, fl)
533 struct lockf *lock;
534 struct flock *fl;
535 {
536 struct lockf *block;
537
538 #ifdef LOCKF_DEBUG
539 if (lockf_debug & 1)
540 lf_print("lf_getlock", lock);
541 #endif /* LOCKF_DEBUG */
542
543 if ((block = lf_getblock(lock)) != NULL) {
544 fl->l_type = block->lf_type;
545 fl->l_whence = SEEK_SET;
546 fl->l_start = block->lf_start;
547 if (block->lf_end == -1)
548 fl->l_len = 0;
549 else
550 fl->l_len = block->lf_end - block->lf_start + 1;
551 if (block->lf_flags & F_POSIX)
552 fl->l_pid = ((struct proc *)block->lf_id)->p_pid;
553 else
554 fl->l_pid = -1;
555 } else {
556 fl->l_type = F_UNLCK;
557 }
558 return (0);
559 }
560
561 /*
562 * Walk the list of locks for an inode and
563 * return the first blocking lock.
564 */
565 static struct lockf *
566 lf_getblock(lock)
567 struct lockf *lock;
568 {
569 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
570
571 prev = lock->lf_head;
572 while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
573 /*
574 * We've found an overlap, see if it blocks us
575 */
576 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
577 return (overlap);
578 /*
579 * Nope, point to the next one on the list and
580 * see if it blocks us
581 */
582 lf = overlap->lf_next;
583 }
584 return (NOLOCKF);
585 }
586
587 /*
588 * Walk the list of locks for an inode to
589 * find an overlapping lock (if any).
590 *
591 * NOTE: this returns only the FIRST overlapping lock. There
592 * may be more than one.
593 */
594 static int
595 lf_findoverlap(lf, lock, type, prev, overlap)
596 struct lockf *lf;
597 struct lockf *lock;
598 int type;
599 struct lockf ***prev;
600 struct lockf **overlap;
601 {
602 off_t start, end;
603
604 *overlap = lf;
605 if (lf == NOLOCKF)
606 return (0);
607 #ifdef LOCKF_DEBUG
608 if (lockf_debug & 2)
609 lf_print("lf_findoverlap: looking for overlap in", lock);
610 #endif /* LOCKF_DEBUG */
611 start = lock->lf_start;
612 end = lock->lf_end;
613 while (lf != NOLOCKF) {
614 if (((type == SELF) && lf->lf_id != lock->lf_id) ||
615 ((type == OTHERS) && lf->lf_id == lock->lf_id)) {
616 *prev = &lf->lf_next;
617 *overlap = lf = lf->lf_next;
618 continue;
619 }
620 #ifdef LOCKF_DEBUG
621 if (lockf_debug & 2)
622 lf_print("\tchecking", lf);
623 #endif /* LOCKF_DEBUG */
624 /*
625 * OK, check for overlap
626 *
627 * Six cases:
628 * 0) no overlap
629 * 1) overlap == lock
630 * 2) overlap contains lock
631 * 3) lock contains overlap
632 * 4) overlap starts before lock
633 * 5) overlap ends after lock
634 */
635 if ((lf->lf_end != -1 && start > lf->lf_end) ||
636 (end != -1 && lf->lf_start > end)) {
637 /* Case 0 */
638 #ifdef LOCKF_DEBUG
639 if (lockf_debug & 2)
640 printf("no overlap\n");
641 #endif /* LOCKF_DEBUG */
642 if ((type & SELF) && end != -1 && lf->lf_start > end)
643 return (0);
644 *prev = &lf->lf_next;
645 *overlap = lf = lf->lf_next;
646 continue;
647 }
648 if ((lf->lf_start == start) && (lf->lf_end == end)) {
649 /* Case 1 */
650 #ifdef LOCKF_DEBUG
651 if (lockf_debug & 2)
652 printf("overlap == lock\n");
653 #endif /* LOCKF_DEBUG */
654 return (1);
655 }
656 if ((lf->lf_start <= start) &&
657 (end != -1) &&
658 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
659 /* Case 2 */
660 #ifdef LOCKF_DEBUG
661 if (lockf_debug & 2)
662 printf("overlap contains lock\n");
663 #endif /* LOCKF_DEBUG */
664 return (2);
665 }
666 if (start <= lf->lf_start &&
667 (end == -1 ||
668 (lf->lf_end != -1 && end >= lf->lf_end))) {
669 /* Case 3 */
670 #ifdef LOCKF_DEBUG
671 if (lockf_debug & 2)
672 printf("lock contains overlap\n");
673 #endif /* LOCKF_DEBUG */
674 return (3);
675 }
676 if ((lf->lf_start < start) &&
677 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
678 /* Case 4 */
679 #ifdef LOCKF_DEBUG
680 if (lockf_debug & 2)
681 printf("overlap starts before lock\n");
682 #endif /* LOCKF_DEBUG */
683 return (4);
684 }
685 if ((lf->lf_start > start) &&
686 (end != -1) &&
687 ((lf->lf_end > end) || (lf->lf_end == -1))) {
688 /* Case 5 */
689 #ifdef LOCKF_DEBUG
690 if (lockf_debug & 2)
691 printf("overlap ends after lock\n");
692 #endif /* LOCKF_DEBUG */
693 return (5);
694 }
695 panic("lf_findoverlap: default");
696 }
697 return (0);
698 }
699
700 /*
701 * Split a lock and a contained region into
702 * two or three locks as necessary.
703 */
704 static void
705 lf_split(lock1, lock2)
706 struct lockf *lock1;
707 struct lockf *lock2;
708 {
709 struct lockf *splitlock;
710
711 #ifdef LOCKF_DEBUG
712 if (lockf_debug & 2) {
713 lf_print("lf_split", lock1);
714 lf_print("splitting from", lock2);
715 }
716 #endif /* LOCKF_DEBUG */
717 /*
718 * Check to see if spliting into only two pieces.
719 */
720 if (lock1->lf_start == lock2->lf_start) {
721 lock1->lf_start = lock2->lf_end + 1;
722 lock2->lf_next = lock1;
723 return;
724 }
725 if (lock1->lf_end == lock2->lf_end) {
726 lock1->lf_end = lock2->lf_start - 1;
727 lock2->lf_next = lock1->lf_next;
728 lock1->lf_next = lock2;
729 return;
730 }
731 /*
732 * Make a new lock consisting of the last part of
733 * the encompassing lock
734 */
735 MALLOC(splitlock, struct lockf *, sizeof(*splitlock), M_LOCKF, M_WAITOK);
736 memcpy((caddr_t)splitlock, (caddr_t)lock1, sizeof(*splitlock));
737 splitlock->lf_start = lock2->lf_end + 1;
738 TAILQ_INIT(&splitlock->lf_blkhd);
739 lock1->lf_end = lock2->lf_start - 1;
740 /*
741 * OK, now link it in
742 */
743 splitlock->lf_next = lock1->lf_next;
744 lock2->lf_next = splitlock;
745 lock1->lf_next = lock2;
746 }
747
748 /*
749 * Wakeup a blocklist
750 */
751 static void
752 lf_wakelock(listhead)
753 struct lockf *listhead;
754 {
755 struct lockf *wakelock;
756
757 while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) {
758 KASSERT(wakelock->lf_next == listhead);
759 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
760 wakelock->lf_next = NOLOCKF;
761 #ifdef LOCKF_DEBUG
762 if (lockf_debug & 2)
763 lf_print("lf_wakelock: awakening", wakelock);
764 #endif
765 wakeup((caddr_t)wakelock);
766 }
767 }
768
769 #ifdef LOCKF_DEBUG
770 /*
771 * Print out a lock.
772 */
773 static void
774 lf_print(tag, lock)
775 char *tag;
776 struct lockf *lock;
777 {
778
779 printf("%s: lock %p for ", tag, lock);
780 if (lock->lf_flags & F_POSIX)
781 printf("proc %d", ((struct proc *)lock->lf_id)->p_pid);
782 else
783 printf("file 0x%p", (struct file *)lock->lf_id);
784 printf(" %s, start %qx, end %qx",
785 lock->lf_type == F_RDLCK ? "shared" :
786 lock->lf_type == F_WRLCK ? "exclusive" :
787 lock->lf_type == F_UNLCK ? "unlock" :
788 "unknown", lock->lf_start, lock->lf_end);
789 if (TAILQ_FIRST(&lock->lf_blkhd))
790 printf(" block %p\n", TAILQ_FIRST(&lock->lf_blkhd));
791 else
792 printf("\n");
793 }
794
795 static void
796 lf_printlist(tag, lock)
797 char *tag;
798 struct lockf *lock;
799 {
800 struct lockf *lf, *blk;
801
802 printf("%s: Lock list:\n", tag);
803 for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
804 printf("\tlock %p for ", lf);
805 if (lf->lf_flags & F_POSIX)
806 printf("proc %d", ((struct proc *)lf->lf_id)->p_pid);
807 else
808 printf("file 0x%p", (struct file *)lf->lf_id);
809 printf(", %s, start %qx, end %qx",
810 lf->lf_type == F_RDLCK ? "shared" :
811 lf->lf_type == F_WRLCK ? "exclusive" :
812 lf->lf_type == F_UNLCK ? "unlock" :
813 "unknown", lf->lf_start, lf->lf_end);
814 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
815 if (blk->lf_flags & F_POSIX)
816 printf("proc %d",
817 ((struct proc *)blk->lf_id)->p_pid);
818 else
819 printf("file 0x%p", (struct file *)blk->lf_id);
820 printf(", %s, start %qx, end %qx",
821 blk->lf_type == F_RDLCK ? "shared" :
822 blk->lf_type == F_WRLCK ? "exclusive" :
823 blk->lf_type == F_UNLCK ? "unlock" :
824 "unknown", blk->lf_start, blk->lf_end);
825 if (TAILQ_FIRST(&blk->lf_blkhd))
826 panic("lf_printlist: bad list");
827 }
828 printf("\n");
829 }
830 }
831 #endif /* LOCKF_DEBUG */
832