vfs_lockf.c revision 1.27 1 /* $NetBSD: vfs_lockf.c,v 1.27 2003/05/01 14:36:43 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Scooter Morris at Genentech Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.27 2003/05/01 14:36:43 yamt Exp $");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/file.h>
48 #include <sys/proc.h>
49 #include <sys/vnode.h>
50 #include <sys/malloc.h>
51 #include <sys/fcntl.h>
52 #include <sys/lockf.h>
53
54 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
55
56 /*
57 * This variable controls the maximum number of processes that will
58 * be checked in doing deadlock detection.
59 */
60 int maxlockdepth = MAXDEPTH;
61
62 #ifdef LOCKF_DEBUG
63 int lockf_debug = 0;
64 #endif
65
66 #define NOLOCKF (struct lockf *)0
67 #define SELF 0x1
68 #define OTHERS 0x2
69
70 static int lf_clearlock(struct lockf *, struct lockf **);
71 static int lf_findoverlap(struct lockf *,
72 struct lockf *, int, struct lockf ***, struct lockf **);
73 static struct lockf *lf_getblock(struct lockf *);
74 static int lf_getlock(struct lockf *, struct flock *);
75 static int lf_setlock(struct lockf *, struct lockf **, struct simplelock *);
76 static void lf_split(struct lockf *, struct lockf *, struct lockf **);
77 static void lf_wakelock(struct lockf *);
78
79 #ifdef LOCKF_DEBUG
80 static void lf_print(char *, struct lockf *);
81 static void lf_printlist(char *, struct lockf *);
82 #endif
83
84 /*
85 * XXX TODO
86 * Misc cleanups: "caddr_t id" should be visible in the API as a
87 * "struct proc *".
88 * (This requires rototilling all VFS's which support advisory locking).
89 *
90 * Use pools for lock allocation.
91 */
92
93 /*
94 * If there's a lot of lock contention on a single vnode, locking
95 * schemes which allow for more paralleism would be needed. Given how
96 * infrequently byte-range locks are actually used in typical BSD
97 * code, a more complex approach probably isn't worth it.
98 */
99
100 /*
101 * Do an advisory lock operation.
102 */
103 int
104 lf_advlock(struct vop_advlock_args *ap, struct lockf **head, off_t size)
105 {
106 struct flock *fl = ap->a_fl;
107 struct lockf *lock = NULL;
108 struct lockf *sparelock;
109 struct vnode *vp = ap->a_vp;
110 off_t start, end;
111 int error;
112
113 /*
114 * Convert the flock structure into a start and end.
115 */
116 switch (fl->l_whence) {
117 case SEEK_SET:
118 case SEEK_CUR:
119 /*
120 * Caller is responsible for adding any necessary offset
121 * when SEEK_CUR is used.
122 */
123 start = fl->l_start;
124 break;
125
126 case SEEK_END:
127 start = size + fl->l_start;
128 break;
129
130 default:
131 return (EINVAL);
132 }
133 if (start < 0)
134 return (EINVAL);
135
136 /*
137 * allocate locks before acquire simple lock.
138 * we need two locks in the worst case.
139 */
140 switch (ap->a_op) {
141 case F_SETLK:
142 case F_UNLCK:
143 /*
144 * XXX for F_UNLCK case, we can re-use lock.
145 */
146 if ((fl->l_type & F_FLOCK) == 0) {
147 /*
148 * byte-range lock might need one more lock.
149 */
150 MALLOC(sparelock, struct lockf *, sizeof(*lock),
151 M_LOCKF, M_WAITOK);
152 if (sparelock == NULL) {
153 error = ENOMEM;
154 goto quit;
155 }
156 break;
157 }
158 /* FALLTHROUGH */
159
160 case F_GETLK:
161 sparelock = NULL;
162 break;
163
164 default:
165 return (EINVAL);
166 }
167
168 MALLOC(lock, struct lockf *, sizeof(*lock), M_LOCKF, M_WAITOK);
169 if (lock == NULL) {
170 error = ENOMEM;
171 goto quit;
172 }
173
174 simple_lock(&vp->v_interlock);
175
176 /*
177 * Avoid the common case of unlocking when inode has no locks.
178 */
179 if (*head == (struct lockf *)0) {
180 if (ap->a_op != F_SETLK) {
181 fl->l_type = F_UNLCK;
182 error = 0;
183 goto quit_unlock;
184 }
185 }
186
187 if (fl->l_len == 0)
188 end = -1;
189 else
190 end = start + fl->l_len - 1;
191 /*
192 * Create the lockf structure.
193 */
194 lock->lf_start = start;
195 lock->lf_end = end;
196 /* XXX NJWLWP
197 * I don't want to make the entire VFS universe use LWPs, because
198 * they don't need them, for the most part. This is an exception,
199 * and a kluge.
200 */
201
202 lock->lf_head = head;
203 lock->lf_type = fl->l_type;
204 lock->lf_next = (struct lockf *)0;
205 TAILQ_INIT(&lock->lf_blkhd);
206 lock->lf_flags = ap->a_flags;
207 if (lock->lf_flags & F_POSIX) {
208 KASSERT(curproc == (struct proc *)ap->a_id);
209 }
210 lock->lf_id = (struct proc *)ap->a_id;
211 lock->lf_lwp = curlwp;
212
213 /*
214 * Do the requested operation.
215 */
216 switch (ap->a_op) {
217
218 case F_SETLK:
219 error = lf_setlock(lock, &sparelock, &vp->v_interlock);
220 lock = NULL; /* lf_setlock freed it */
221 break;
222
223 case F_UNLCK:
224 error = lf_clearlock(lock, &sparelock);
225 break;
226
227 case F_GETLK:
228 error = lf_getlock(lock, fl);
229 break;
230
231 default:
232 /* NOTREACHED */
233 }
234
235 quit_unlock:
236 simple_unlock(&vp->v_interlock);
237 quit:
238 if (lock)
239 FREE(lock, M_LOCKF);
240 if (sparelock)
241 FREE(sparelock, M_LOCKF);
242
243 return (error);
244 }
245
246 /*
247 * Set a byte-range lock.
248 */
249 static int
250 lf_setlock(struct lockf *lock, struct lockf **sparelock,
251 struct simplelock *interlock)
252 {
253 struct lockf *block;
254 struct lockf **head = lock->lf_head;
255 struct lockf **prev, *overlap, *ltmp;
256 static char lockstr[] = "lockf";
257 int ovcase, priority, needtolink, error;
258
259 #ifdef LOCKF_DEBUG
260 if (lockf_debug & 1)
261 lf_print("lf_setlock", lock);
262 #endif /* LOCKF_DEBUG */
263
264 /*
265 * Set the priority
266 */
267 priority = PLOCK;
268 if (lock->lf_type == F_WRLCK)
269 priority += 4;
270 priority |= PCATCH;
271 /*
272 * Scan lock list for this file looking for locks that would block us.
273 */
274 while ((block = lf_getblock(lock)) != NULL) {
275 /*
276 * Free the structure and return if nonblocking.
277 */
278 if ((lock->lf_flags & F_WAIT) == 0) {
279 FREE(lock, M_LOCKF);
280 return (EAGAIN);
281 }
282 /*
283 * We are blocked. Since flock style locks cover
284 * the whole file, there is no chance for deadlock.
285 * For byte-range locks we must check for deadlock.
286 *
287 * Deadlock detection is done by looking through the
288 * wait channels to see if there are any cycles that
289 * involve us. MAXDEPTH is set just to make sure we
290 * do not go off into neverneverland.
291 */
292 if ((lock->lf_flags & F_POSIX) &&
293 (block->lf_flags & F_POSIX)) {
294 struct lwp *wlwp;
295 struct lockf *waitblock;
296 int i = 0;
297
298 /*
299 * The block is waiting on something. if_lwp will be
300 * 0 once the lock is granted, so we terminate the
301 * loop if we find this.
302 */
303 wlwp = block->lf_lwp;
304 while (wlwp && (i++ < maxlockdepth)) {
305 waitblock = (struct lockf *)wlwp->l_wchan;
306 /* Get the owner of the blocking lock */
307 waitblock = waitblock->lf_next;
308 if ((waitblock->lf_flags & F_POSIX) == 0)
309 break;
310 wlwp = waitblock->lf_lwp;
311 if (wlwp == lock->lf_lwp) {
312 free(lock, M_LOCKF);
313 return (EDEADLK);
314 }
315 }
316 /*
317 * If we're still following a dependancy chain
318 * after maxlockdepth iterations, assume we're in
319 * a cycle to be safe.
320 */
321 if (i >= maxlockdepth) {
322 free(lock, M_LOCKF);
323 return (EDEADLK);
324 }
325 }
326 /*
327 * For flock type locks, we must first remove
328 * any shared locks that we hold before we sleep
329 * waiting for an exclusive lock.
330 */
331 if ((lock->lf_flags & F_FLOCK) &&
332 lock->lf_type == F_WRLCK) {
333 lock->lf_type = F_UNLCK;
334 (void) lf_clearlock(lock, NULL);
335 lock->lf_type = F_WRLCK;
336 }
337 /*
338 * Add our lock to the blocked list and sleep until we're free.
339 * Remember who blocked us (for deadlock detection).
340 */
341 lock->lf_next = block;
342 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
343 #ifdef LOCKF_DEBUG
344 if (lockf_debug & 1) {
345 lf_print("lf_setlock: blocking on", block);
346 lf_printlist("lf_setlock", block);
347 }
348 #endif /* LOCKF_DEBUG */
349 error = ltsleep(lock, priority, lockstr, 0, interlock);
350
351 /*
352 * We may have been awakened by a signal (in
353 * which case we must remove ourselves from the
354 * blocked list) and/or by another process
355 * releasing a lock (in which case we have already
356 * been removed from the blocked list and our
357 * lf_next field set to NOLOCKF).
358 */
359 if (lock->lf_next != NOLOCKF) {
360 TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
361 lock->lf_next = NOLOCKF;
362 }
363 if (error) {
364 free(lock, M_LOCKF);
365 return (error);
366 }
367 }
368 /*
369 * No blocks!! Add the lock. Note that we will
370 * downgrade or upgrade any overlapping locks this
371 * process already owns.
372 *
373 * Skip over locks owned by other processes.
374 * Handle any locks that overlap and are owned by ourselves.
375 */
376 lock->lf_lwp = 0;
377 prev = head;
378 block = *head;
379 needtolink = 1;
380 for (;;) {
381 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
382 if (ovcase)
383 block = overlap->lf_next;
384 /*
385 * Six cases:
386 * 0) no overlap
387 * 1) overlap == lock
388 * 2) overlap contains lock
389 * 3) lock contains overlap
390 * 4) overlap starts before lock
391 * 5) overlap ends after lock
392 */
393 switch (ovcase) {
394 case 0: /* no overlap */
395 if (needtolink) {
396 *prev = lock;
397 lock->lf_next = overlap;
398 }
399 break;
400
401 case 1: /* overlap == lock */
402 /*
403 * If downgrading lock, others may be
404 * able to acquire it.
405 */
406 if (lock->lf_type == F_RDLCK &&
407 overlap->lf_type == F_WRLCK)
408 lf_wakelock(overlap);
409 overlap->lf_type = lock->lf_type;
410 FREE(lock, M_LOCKF);
411 lock = overlap; /* for debug output below */
412 break;
413
414 case 2: /* overlap contains lock */
415 /*
416 * Check for common starting point and different types.
417 */
418 if (overlap->lf_type == lock->lf_type) {
419 free(lock, M_LOCKF);
420 lock = overlap; /* for debug output below */
421 break;
422 }
423 if (overlap->lf_start == lock->lf_start) {
424 *prev = lock;
425 lock->lf_next = overlap;
426 overlap->lf_start = lock->lf_end + 1;
427 } else
428 lf_split(overlap, lock, sparelock);
429 lf_wakelock(overlap);
430 break;
431
432 case 3: /* lock contains overlap */
433 /*
434 * If downgrading lock, others may be able to
435 * acquire it, otherwise take the list.
436 */
437 if (lock->lf_type == F_RDLCK &&
438 overlap->lf_type == F_WRLCK) {
439 lf_wakelock(overlap);
440 } else {
441 while ((ltmp = TAILQ_FIRST(&overlap->lf_blkhd))) {
442 KASSERT(ltmp->lf_next == overlap);
443 TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
444 lf_block);
445 ltmp->lf_next = lock;
446 TAILQ_INSERT_TAIL(&lock->lf_blkhd,
447 ltmp, lf_block);
448 }
449 }
450 /*
451 * Add the new lock if necessary and delete the overlap.
452 */
453 if (needtolink) {
454 *prev = lock;
455 lock->lf_next = overlap->lf_next;
456 prev = &lock->lf_next;
457 needtolink = 0;
458 } else
459 *prev = overlap->lf_next;
460 free(overlap, M_LOCKF);
461 continue;
462
463 case 4: /* overlap starts before lock */
464 /*
465 * Add lock after overlap on the list.
466 */
467 lock->lf_next = overlap->lf_next;
468 overlap->lf_next = lock;
469 overlap->lf_end = lock->lf_start - 1;
470 prev = &lock->lf_next;
471 lf_wakelock(overlap);
472 needtolink = 0;
473 continue;
474
475 case 5: /* overlap ends after lock */
476 /*
477 * Add the new lock before overlap.
478 */
479 if (needtolink) {
480 *prev = lock;
481 lock->lf_next = overlap;
482 }
483 overlap->lf_start = lock->lf_end + 1;
484 lf_wakelock(overlap);
485 break;
486 }
487 break;
488 }
489 #ifdef LOCKF_DEBUG
490 if (lockf_debug & 1) {
491 lf_print("lf_setlock: got the lock", lock);
492 lf_printlist("lf_setlock", lock);
493 }
494 #endif /* LOCKF_DEBUG */
495 return (0);
496 }
497
498 /*
499 * Remove a byte-range lock on an inode.
500 *
501 * Generally, find the lock (or an overlap to that lock)
502 * and remove it (or shrink it), then wakeup anyone we can.
503 */
504 static int
505 lf_clearlock(struct lockf *unlock, struct lockf **sparelock)
506 {
507 struct lockf **head = unlock->lf_head;
508 struct lockf *lf = *head;
509 struct lockf *overlap, **prev;
510 int ovcase;
511
512 if (lf == NOLOCKF)
513 return (0);
514 #ifdef LOCKF_DEBUG
515 if (unlock->lf_type != F_UNLCK)
516 panic("lf_clearlock: bad type");
517 if (lockf_debug & 1)
518 lf_print("lf_clearlock", unlock);
519 #endif /* LOCKF_DEBUG */
520 prev = head;
521 while ((ovcase = lf_findoverlap(lf, unlock, SELF,
522 &prev, &overlap)) != 0) {
523 /*
524 * Wakeup the list of locks to be retried.
525 */
526 lf_wakelock(overlap);
527
528 switch (ovcase) {
529
530 case 1: /* overlap == lock */
531 *prev = overlap->lf_next;
532 FREE(overlap, M_LOCKF);
533 break;
534
535 case 2: /* overlap contains lock: split it */
536 if (overlap->lf_start == unlock->lf_start) {
537 overlap->lf_start = unlock->lf_end + 1;
538 break;
539 }
540 lf_split(overlap, unlock, sparelock);
541 overlap->lf_next = unlock->lf_next;
542 break;
543
544 case 3: /* lock contains overlap */
545 *prev = overlap->lf_next;
546 lf = overlap->lf_next;
547 free(overlap, M_LOCKF);
548 continue;
549
550 case 4: /* overlap starts before lock */
551 overlap->lf_end = unlock->lf_start - 1;
552 prev = &overlap->lf_next;
553 lf = overlap->lf_next;
554 continue;
555
556 case 5: /* overlap ends after lock */
557 overlap->lf_start = unlock->lf_end + 1;
558 break;
559 }
560 break;
561 }
562 #ifdef LOCKF_DEBUG
563 if (lockf_debug & 1)
564 lf_printlist("lf_clearlock", unlock);
565 #endif /* LOCKF_DEBUG */
566 return (0);
567 }
568
569 /*
570 * Check whether there is a blocking lock,
571 * and if so return its process identifier.
572 */
573 static int
574 lf_getlock(struct lockf *lock, struct flock *fl)
575 {
576 struct lockf *block;
577
578 #ifdef LOCKF_DEBUG
579 if (lockf_debug & 1)
580 lf_print("lf_getlock", lock);
581 #endif /* LOCKF_DEBUG */
582
583 if ((block = lf_getblock(lock)) != NULL) {
584 fl->l_type = block->lf_type;
585 fl->l_whence = SEEK_SET;
586 fl->l_start = block->lf_start;
587 if (block->lf_end == -1)
588 fl->l_len = 0;
589 else
590 fl->l_len = block->lf_end - block->lf_start + 1;
591 if (block->lf_flags & F_POSIX)
592 fl->l_pid = ((struct proc *)block->lf_id)->p_pid;
593 else
594 fl->l_pid = -1;
595 } else {
596 fl->l_type = F_UNLCK;
597 }
598 return (0);
599 }
600
601 /*
602 * Walk the list of locks for an inode and
603 * return the first blocking lock.
604 */
605 static struct lockf *
606 lf_getblock(struct lockf *lock)
607 {
608 struct lockf **prev, *overlap, *lf = *(lock->lf_head);
609
610 prev = lock->lf_head;
611 while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
612 /*
613 * We've found an overlap, see if it blocks us
614 */
615 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
616 return (overlap);
617 /*
618 * Nope, point to the next one on the list and
619 * see if it blocks us
620 */
621 lf = overlap->lf_next;
622 }
623 return (NOLOCKF);
624 }
625
626 /*
627 * Walk the list of locks for an inode to
628 * find an overlapping lock (if any).
629 *
630 * NOTE: this returns only the FIRST overlapping lock. There
631 * may be more than one.
632 */
633 static int
634 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
635 struct lockf ***prev, struct lockf **overlap)
636 {
637 off_t start, end;
638
639 *overlap = lf;
640 if (lf == NOLOCKF)
641 return (0);
642 #ifdef LOCKF_DEBUG
643 if (lockf_debug & 2)
644 lf_print("lf_findoverlap: looking for overlap in", lock);
645 #endif /* LOCKF_DEBUG */
646 start = lock->lf_start;
647 end = lock->lf_end;
648 while (lf != NOLOCKF) {
649 if (((type == SELF) && lf->lf_id != lock->lf_id) ||
650 ((type == OTHERS) && lf->lf_id == lock->lf_id)) {
651 *prev = &lf->lf_next;
652 *overlap = lf = lf->lf_next;
653 continue;
654 }
655 #ifdef LOCKF_DEBUG
656 if (lockf_debug & 2)
657 lf_print("\tchecking", lf);
658 #endif /* LOCKF_DEBUG */
659 /*
660 * OK, check for overlap
661 *
662 * Six cases:
663 * 0) no overlap
664 * 1) overlap == lock
665 * 2) overlap contains lock
666 * 3) lock contains overlap
667 * 4) overlap starts before lock
668 * 5) overlap ends after lock
669 */
670 if ((lf->lf_end != -1 && start > lf->lf_end) ||
671 (end != -1 && lf->lf_start > end)) {
672 /* Case 0 */
673 #ifdef LOCKF_DEBUG
674 if (lockf_debug & 2)
675 printf("no overlap\n");
676 #endif /* LOCKF_DEBUG */
677 if ((type & SELF) && end != -1 && lf->lf_start > end)
678 return (0);
679 *prev = &lf->lf_next;
680 *overlap = lf = lf->lf_next;
681 continue;
682 }
683 if ((lf->lf_start == start) && (lf->lf_end == end)) {
684 /* Case 1 */
685 #ifdef LOCKF_DEBUG
686 if (lockf_debug & 2)
687 printf("overlap == lock\n");
688 #endif /* LOCKF_DEBUG */
689 return (1);
690 }
691 if ((lf->lf_start <= start) &&
692 (end != -1) &&
693 ((lf->lf_end >= end) || (lf->lf_end == -1))) {
694 /* Case 2 */
695 #ifdef LOCKF_DEBUG
696 if (lockf_debug & 2)
697 printf("overlap contains lock\n");
698 #endif /* LOCKF_DEBUG */
699 return (2);
700 }
701 if (start <= lf->lf_start &&
702 (end == -1 ||
703 (lf->lf_end != -1 && end >= lf->lf_end))) {
704 /* Case 3 */
705 #ifdef LOCKF_DEBUG
706 if (lockf_debug & 2)
707 printf("lock contains overlap\n");
708 #endif /* LOCKF_DEBUG */
709 return (3);
710 }
711 if ((lf->lf_start < start) &&
712 ((lf->lf_end >= start) || (lf->lf_end == -1))) {
713 /* Case 4 */
714 #ifdef LOCKF_DEBUG
715 if (lockf_debug & 2)
716 printf("overlap starts before lock\n");
717 #endif /* LOCKF_DEBUG */
718 return (4);
719 }
720 if ((lf->lf_start > start) &&
721 (end != -1) &&
722 ((lf->lf_end > end) || (lf->lf_end == -1))) {
723 /* Case 5 */
724 #ifdef LOCKF_DEBUG
725 if (lockf_debug & 2)
726 printf("overlap ends after lock\n");
727 #endif /* LOCKF_DEBUG */
728 return (5);
729 }
730 panic("lf_findoverlap: default");
731 }
732 return (0);
733 }
734
735 /*
736 * Split a lock and a contained region into
737 * two or three locks as necessary.
738 */
739 static void
740 lf_split(struct lockf *lock1, struct lockf *lock2, struct lockf **sparelock)
741 {
742 struct lockf *splitlock;
743
744 #ifdef LOCKF_DEBUG
745 if (lockf_debug & 2) {
746 lf_print("lf_split", lock1);
747 lf_print("splitting from", lock2);
748 }
749 #endif /* LOCKF_DEBUG */
750 /*
751 * Check to see if spliting into only two pieces.
752 */
753 if (lock1->lf_start == lock2->lf_start) {
754 lock1->lf_start = lock2->lf_end + 1;
755 lock2->lf_next = lock1;
756 return;
757 }
758 if (lock1->lf_end == lock2->lf_end) {
759 lock1->lf_end = lock2->lf_start - 1;
760 lock2->lf_next = lock1->lf_next;
761 lock1->lf_next = lock2;
762 return;
763 }
764 /*
765 * Make a new lock consisting of the last part of
766 * the encompassing lock
767 */
768 splitlock = *sparelock;
769 *sparelock = NULL;
770 memcpy((caddr_t)splitlock, (caddr_t)lock1, sizeof(*splitlock));
771 splitlock->lf_start = lock2->lf_end + 1;
772 TAILQ_INIT(&splitlock->lf_blkhd);
773 lock1->lf_end = lock2->lf_start - 1;
774 /*
775 * OK, now link it in
776 */
777 splitlock->lf_next = lock1->lf_next;
778 lock2->lf_next = splitlock;
779 lock1->lf_next = lock2;
780 }
781
782 /*
783 * Wakeup a blocklist
784 */
785 static void
786 lf_wakelock(struct lockf *listhead)
787 {
788 struct lockf *wakelock;
789
790 while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) {
791 KASSERT(wakelock->lf_next == listhead);
792 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
793 wakelock->lf_next = NOLOCKF;
794 #ifdef LOCKF_DEBUG
795 if (lockf_debug & 2)
796 lf_print("lf_wakelock: awakening", wakelock);
797 #endif
798 wakeup((caddr_t)wakelock);
799 }
800 }
801
802 #ifdef LOCKF_DEBUG
803 /*
804 * Print out a lock.
805 */
806 static void
807 lf_print(char *tag, struct lockf *lock)
808 {
809
810 printf("%s: lock %p for ", tag, lock);
811 if (lock->lf_flags & F_POSIX)
812 printf("proc %d", ((struct proc *)lock->lf_id)->p_pid);
813 else
814 printf("file 0x%p", (struct file *)lock->lf_id);
815 printf(" %s, start %qx, end %qx",
816 lock->lf_type == F_RDLCK ? "shared" :
817 lock->lf_type == F_WRLCK ? "exclusive" :
818 lock->lf_type == F_UNLCK ? "unlock" :
819 "unknown", lock->lf_start, lock->lf_end);
820 if (TAILQ_FIRST(&lock->lf_blkhd))
821 printf(" block %p\n", TAILQ_FIRST(&lock->lf_blkhd));
822 else
823 printf("\n");
824 }
825
826 static void
827 lf_printlist(char *tag, struct lockf *lock)
828 {
829 struct lockf *lf, *blk;
830
831 printf("%s: Lock list:\n", tag);
832 for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
833 printf("\tlock %p for ", lf);
834 if (lf->lf_flags & F_POSIX)
835 printf("proc %d", ((struct proc *)lf->lf_id)->p_pid);
836 else
837 printf("file 0x%p", (struct file *)lf->lf_id);
838 printf(", %s, start %qx, end %qx",
839 lf->lf_type == F_RDLCK ? "shared" :
840 lf->lf_type == F_WRLCK ? "exclusive" :
841 lf->lf_type == F_UNLCK ? "unlock" :
842 "unknown", lf->lf_start, lf->lf_end);
843 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
844 if (blk->lf_flags & F_POSIX)
845 printf("proc %d",
846 ((struct proc *)blk->lf_id)->p_pid);
847 else
848 printf("file 0x%p", (struct file *)blk->lf_id);
849 printf(", %s, start %qx, end %qx",
850 blk->lf_type == F_RDLCK ? "shared" :
851 blk->lf_type == F_WRLCK ? "exclusive" :
852 blk->lf_type == F_UNLCK ? "unlock" :
853 "unknown", blk->lf_start, blk->lf_end);
854 if (TAILQ_FIRST(&blk->lf_blkhd))
855 panic("lf_printlist: bad list");
856 }
857 printf("\n");
858 }
859 }
860 #endif /* LOCKF_DEBUG */
861