Home | History | Annotate | Line # | Download | only in kern
vfs_lockf.c revision 1.21
      1 /*	$NetBSD: vfs_lockf.c,v 1.21 2003/01/18 10:06:37 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Scooter Morris at Genentech Inc.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the University of
     21  *	California, Berkeley and its contributors.
     22  * 4. Neither the name of the University nor the names of its contributors
     23  *    may be used to endorse or promote products derived from this software
     24  *    without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     36  * SUCH DAMAGE.
     37  *
     38  *	@(#)ufs_lockf.c	8.4 (Berkeley) 10/26/94
     39  */
     40 
     41 #include <sys/cdefs.h>
     42 __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.21 2003/01/18 10:06:37 thorpej Exp $");
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/kernel.h>
     47 #include <sys/file.h>
     48 #include <sys/proc.h>
     49 #include <sys/vnode.h>
     50 #include <sys/malloc.h>
     51 #include <sys/fcntl.h>
     52 #include <sys/lockf.h>
     53 
     54 /*
     55  * This variable controls the maximum number of processes that will
     56  * be checked in doing deadlock detection.
     57  */
     58 int maxlockdepth = MAXDEPTH;
     59 
     60 #ifdef LOCKF_DEBUG
     61 int	lockf_debug = 0;
     62 #endif
     63 
     64 #define NOLOCKF (struct lockf *)0
     65 #define SELF	0x1
     66 #define OTHERS	0x2
     67 
     68 /*
     69  * XXX TODO
     70  * Misc cleanups: "caddr_t id" should be visible in the API as a
     71  * "struct proc *".
     72  * (This requires rototilling all VFS's which support advisory locking).
     73  *
     74  * Use pools for lock allocation.
     75  */
     76 
     77 /*
     78  * XXXSMP TODO: Using either (a) a global lock, or (b) the vnode's
     79  * interlock should be sufficient; (b) requires a change to the API
     80  * because the vnode isn't visible here.
     81  *
     82  * If there's a lot of lock contention on a single vnode, locking
     83  * schemes which allow for more paralleism would be needed.  Given how
     84  * infrequently byte-range locks are actually used in typical BSD
     85  * code, a more complex approach probably isn't worth it.
     86  */
     87 
     88 /*
     89  * Do an advisory lock operation.
     90  */
     91 int
     92 lf_advlock(ap, head, size)
     93 	struct vop_advlock_args *ap;
     94 	struct lockf **head;
     95 	off_t size;
     96 {
     97 	struct flock *fl = ap->a_fl;
     98 	struct lockf *lock;
     99 	off_t start, end;
    100 	int error;
    101 
    102 	/*
    103 	 * Convert the flock structure into a start and end.
    104 	 */
    105 	switch (fl->l_whence) {
    106 	case SEEK_SET:
    107 	case SEEK_CUR:
    108 		/*
    109 		 * Caller is responsible for adding any necessary offset
    110 		 * when SEEK_CUR is used.
    111 		 */
    112 		start = fl->l_start;
    113 		break;
    114 
    115 	case SEEK_END:
    116 		start = size + fl->l_start;
    117 		break;
    118 
    119 	default:
    120 		return (EINVAL);
    121 	}
    122 	if (start < 0)
    123 		return (EINVAL);
    124 
    125 	/*
    126 	 * Avoid the common case of unlocking when inode has no locks.
    127 	 */
    128 	if (*head == (struct lockf *)0) {
    129 		if (ap->a_op != F_SETLK) {
    130 			fl->l_type = F_UNLCK;
    131 			return (0);
    132 		}
    133 	}
    134 
    135 	if (fl->l_len == 0)
    136 		end = -1;
    137 	else
    138 		end = start + fl->l_len - 1;
    139 	/*
    140 	 * Create the lockf structure.
    141 	 */
    142 	MALLOC(lock, struct lockf *, sizeof(*lock), M_LOCKF, M_WAITOK);
    143 	lock->lf_start = start;
    144 	lock->lf_end = end;
    145 	/* XXX NJWLWP
    146 	 * I don't want to make the entire VFS universe use LWPs, because
    147 	 * they don't need them, for the most part. This is an exception,
    148 	 * and a kluge.
    149 	 */
    150 
    151 	lock->lf_head = head;
    152 	lock->lf_type = fl->l_type;
    153 	lock->lf_next = (struct lockf *)0;
    154 	TAILQ_INIT(&lock->lf_blkhd);
    155 	lock->lf_flags = ap->a_flags;
    156 	if (lock->lf_flags & F_POSIX) {
    157 		KASSERT(curproc == (struct proc *)ap->a_id);
    158 		lock->lf_id = (caddr_t) curlwp;
    159 	} else {
    160 		lock->lf_id = ap->a_id; /* Not a proc at all, but a file * */
    161 	}
    162 
    163 
    164 	/*
    165 	 * Do the requested operation.
    166 	 */
    167 	switch (ap->a_op) {
    168 
    169 	case F_SETLK:
    170 		return (lf_setlock(lock));
    171 
    172 	case F_UNLCK:
    173 		error = lf_clearlock(lock);
    174 		FREE(lock, M_LOCKF);
    175 		return (error);
    176 
    177 	case F_GETLK:
    178 		error = lf_getlock(lock, fl);
    179 		FREE(lock, M_LOCKF);
    180 		return (error);
    181 
    182 	default:
    183 		FREE(lock, M_LOCKF);
    184 		return (EINVAL);
    185 	}
    186 	/* NOTREACHED */
    187 }
    188 
    189 /*
    190  * Set a byte-range lock.
    191  */
    192 int
    193 lf_setlock(lock)
    194 	struct lockf *lock;
    195 {
    196 	struct lockf *block;
    197 	struct lockf **head = lock->lf_head;
    198 	struct lockf **prev, *overlap, *ltmp;
    199 	static char lockstr[] = "lockf";
    200 	int ovcase, priority, needtolink, error;
    201 
    202 #ifdef LOCKF_DEBUG
    203 	if (lockf_debug & 1)
    204 		lf_print("lf_setlock", lock);
    205 #endif /* LOCKF_DEBUG */
    206 
    207 	/*
    208 	 * Set the priority
    209 	 */
    210 	priority = PLOCK;
    211 	if (lock->lf_type == F_WRLCK)
    212 		priority += 4;
    213 	priority |= PCATCH;
    214 	/*
    215 	 * Scan lock list for this file looking for locks that would block us.
    216 	 */
    217 	while ((block = lf_getblock(lock)) != NULL) {
    218 		/*
    219 		 * Free the structure and return if nonblocking.
    220 		 */
    221 		if ((lock->lf_flags & F_WAIT) == 0) {
    222 			FREE(lock, M_LOCKF);
    223 			return (EAGAIN);
    224 		}
    225 		/*
    226 		 * We are blocked. Since flock style locks cover
    227 		 * the whole file, there is no chance for deadlock.
    228 		 * For byte-range locks we must check for deadlock.
    229 		 *
    230 		 * Deadlock detection is done by looking through the
    231 		 * wait channels to see if there are any cycles that
    232 		 * involve us. MAXDEPTH is set just to make sure we
    233 		 * do not go off into neverneverland.
    234 		 */
    235 		if ((lock->lf_flags & F_POSIX) &&
    236 		    (block->lf_flags & F_POSIX)) {
    237 			struct lwp *wlwp;
    238 			struct lockf *waitblock;
    239 			int i = 0;
    240 
    241 			/* The block is waiting on something */
    242 			wlwp = (struct lwp *)block->lf_id;
    243 			while (wlwp->l_wchan &&
    244 			       (wlwp->l_wmesg == lockstr) &&
    245 			       (i++ < maxlockdepth)) {
    246 				waitblock = (struct lockf *)wlwp->l_wchan;
    247 				/* Get the owner of the blocking lock */
    248 				waitblock = waitblock->lf_next;
    249 				if ((waitblock->lf_flags & F_POSIX) == 0)
    250 					break;
    251 				wlwp = (struct lwp *)waitblock->lf_id;
    252 				if (wlwp == (struct lwp *)lock->lf_id) {
    253 					free(lock, M_LOCKF);
    254 					return (EDEADLK);
    255 				}
    256 			}
    257 			/*
    258 			 * If we're still following a dependancy chain
    259 			 * after maxlockdepth iterations, assume we're in
    260 			 * a cycle to be safe.
    261 			 */
    262 			if (i >= maxlockdepth) {
    263 				free(lock, M_LOCKF);
    264 				return (EDEADLK);
    265 			}
    266 		}
    267 		/*
    268 		 * For flock type locks, we must first remove
    269 		 * any shared locks that we hold before we sleep
    270 		 * waiting for an exclusive lock.
    271 		 */
    272 		if ((lock->lf_flags & F_FLOCK) &&
    273 		    lock->lf_type == F_WRLCK) {
    274 			lock->lf_type = F_UNLCK;
    275 			(void) lf_clearlock(lock);
    276 			lock->lf_type = F_WRLCK;
    277 		}
    278 		/*
    279 		 * Add our lock to the blocked list and sleep until we're free.
    280 		 * Remember who blocked us (for deadlock detection).
    281 		 */
    282 		lock->lf_next = block;
    283 		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
    284 #ifdef LOCKF_DEBUG
    285 		if (lockf_debug & 1) {
    286 			lf_print("lf_setlock: blocking on", block);
    287 			lf_printlist("lf_setlock", block);
    288 		}
    289 #endif /* LOCKF_DEBUG */
    290 		error = tsleep((caddr_t)lock, priority, lockstr, 0);
    291 
    292 		/*
    293 		 * We may have been awakened by a signal (in
    294 		 * which case we must remove ourselves from the
    295 		 * blocked list) and/or by another process
    296 		 * releasing a lock (in which case we have already
    297 		 * been removed from the blocked list and our
    298 		 * lf_next field set to NOLOCKF).
    299 		 */
    300 		if (lock->lf_next != NOLOCKF) {
    301 			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
    302 			lock->lf_next = NOLOCKF;
    303 		}
    304 		if (error) {
    305 			free(lock, M_LOCKF);
    306 			return (error);
    307 		}
    308 	}
    309 	/*
    310 	 * No blocks!!  Add the lock.  Note that we will
    311 	 * downgrade or upgrade any overlapping locks this
    312 	 * process already owns.
    313 	 *
    314 	 * Skip over locks owned by other processes.
    315 	 * Handle any locks that overlap and are owned by ourselves.
    316 	 */
    317 	prev = head;
    318 	block = *head;
    319 	needtolink = 1;
    320 	for (;;) {
    321 		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
    322 		if (ovcase)
    323 			block = overlap->lf_next;
    324 		/*
    325 		 * Six cases:
    326 		 *	0) no overlap
    327 		 *	1) overlap == lock
    328 		 *	2) overlap contains lock
    329 		 *	3) lock contains overlap
    330 		 *	4) overlap starts before lock
    331 		 *	5) overlap ends after lock
    332 		 */
    333 		switch (ovcase) {
    334 		case 0: /* no overlap */
    335 			if (needtolink) {
    336 				*prev = lock;
    337 				lock->lf_next = overlap;
    338 			}
    339 			break;
    340 
    341 		case 1: /* overlap == lock */
    342 			/*
    343 			 * If downgrading lock, others may be
    344 			 * able to acquire it.
    345 			 */
    346 			if (lock->lf_type == F_RDLCK &&
    347 			    overlap->lf_type == F_WRLCK)
    348 				lf_wakelock(overlap);
    349 			overlap->lf_type = lock->lf_type;
    350 			FREE(lock, M_LOCKF);
    351 			lock = overlap; /* for debug output below */
    352 			break;
    353 
    354 		case 2: /* overlap contains lock */
    355 			/*
    356 			 * Check for common starting point and different types.
    357 			 */
    358 			if (overlap->lf_type == lock->lf_type) {
    359 				free(lock, M_LOCKF);
    360 				lock = overlap; /* for debug output below */
    361 				break;
    362 			}
    363 			if (overlap->lf_start == lock->lf_start) {
    364 				*prev = lock;
    365 				lock->lf_next = overlap;
    366 				overlap->lf_start = lock->lf_end + 1;
    367 			} else
    368 				lf_split(overlap, lock);
    369 			lf_wakelock(overlap);
    370 			break;
    371 
    372 		case 3: /* lock contains overlap */
    373 			/*
    374 			 * If downgrading lock, others may be able to
    375 			 * acquire it, otherwise take the list.
    376 			 */
    377 			if (lock->lf_type == F_RDLCK &&
    378 			    overlap->lf_type == F_WRLCK) {
    379 				lf_wakelock(overlap);
    380 			} else {
    381 				while ((ltmp = TAILQ_FIRST(&overlap->lf_blkhd))) {
    382 					KASSERT(ltmp->lf_next == overlap);
    383 					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
    384 					    lf_block);
    385 					ltmp->lf_next = lock;
    386 					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
    387 					    ltmp, lf_block);
    388 				}
    389 			}
    390 			/*
    391 			 * Add the new lock if necessary and delete the overlap.
    392 			 */
    393 			if (needtolink) {
    394 				*prev = lock;
    395 				lock->lf_next = overlap->lf_next;
    396 				prev = &lock->lf_next;
    397 				needtolink = 0;
    398 			} else
    399 				*prev = overlap->lf_next;
    400 			free(overlap, M_LOCKF);
    401 			continue;
    402 
    403 		case 4: /* overlap starts before lock */
    404 			/*
    405 			 * Add lock after overlap on the list.
    406 			 */
    407 			lock->lf_next = overlap->lf_next;
    408 			overlap->lf_next = lock;
    409 			overlap->lf_end = lock->lf_start - 1;
    410 			prev = &lock->lf_next;
    411 			lf_wakelock(overlap);
    412 			needtolink = 0;
    413 			continue;
    414 
    415 		case 5: /* overlap ends after lock */
    416 			/*
    417 			 * Add the new lock before overlap.
    418 			 */
    419 			if (needtolink) {
    420 				*prev = lock;
    421 				lock->lf_next = overlap;
    422 			}
    423 			overlap->lf_start = lock->lf_end + 1;
    424 			lf_wakelock(overlap);
    425 			break;
    426 		}
    427 		break;
    428 	}
    429 #ifdef LOCKF_DEBUG
    430 	if (lockf_debug & 1) {
    431 		lf_print("lf_setlock: got the lock", lock);
    432 		lf_printlist("lf_setlock", lock);
    433 	}
    434 #endif /* LOCKF_DEBUG */
    435 	return (0);
    436 }
    437 
    438 /*
    439  * Remove a byte-range lock on an inode.
    440  *
    441  * Generally, find the lock (or an overlap to that lock)
    442  * and remove it (or shrink it), then wakeup anyone we can.
    443  */
    444 int
    445 lf_clearlock(unlock)
    446 	struct lockf *unlock;
    447 {
    448 	struct lockf **head = unlock->lf_head;
    449 	struct lockf *lf = *head;
    450 	struct lockf *overlap, **prev;
    451 	int ovcase;
    452 
    453 	if (lf == NOLOCKF)
    454 		return (0);
    455 #ifdef LOCKF_DEBUG
    456 	if (unlock->lf_type != F_UNLCK)
    457 		panic("lf_clearlock: bad type");
    458 	if (lockf_debug & 1)
    459 		lf_print("lf_clearlock", unlock);
    460 #endif /* LOCKF_DEBUG */
    461 	prev = head;
    462 	while ((ovcase = lf_findoverlap(lf, unlock, SELF,
    463 					&prev, &overlap)) != 0) {
    464 		/*
    465 		 * Wakeup the list of locks to be retried.
    466 		 */
    467 		lf_wakelock(overlap);
    468 
    469 		switch (ovcase) {
    470 
    471 		case 1: /* overlap == lock */
    472 			*prev = overlap->lf_next;
    473 			FREE(overlap, M_LOCKF);
    474 			break;
    475 
    476 		case 2: /* overlap contains lock: split it */
    477 			if (overlap->lf_start == unlock->lf_start) {
    478 				overlap->lf_start = unlock->lf_end + 1;
    479 				break;
    480 			}
    481 			lf_split(overlap, unlock);
    482 			overlap->lf_next = unlock->lf_next;
    483 			break;
    484 
    485 		case 3: /* lock contains overlap */
    486 			*prev = overlap->lf_next;
    487 			lf = overlap->lf_next;
    488 			free(overlap, M_LOCKF);
    489 			continue;
    490 
    491 		case 4: /* overlap starts before lock */
    492 			overlap->lf_end = unlock->lf_start - 1;
    493 			prev = &overlap->lf_next;
    494 			lf = overlap->lf_next;
    495 			continue;
    496 
    497 		case 5: /* overlap ends after lock */
    498 			overlap->lf_start = unlock->lf_end + 1;
    499 			break;
    500 		}
    501 		break;
    502 	}
    503 #ifdef LOCKF_DEBUG
    504 	if (lockf_debug & 1)
    505 		lf_printlist("lf_clearlock", unlock);
    506 #endif /* LOCKF_DEBUG */
    507 	return (0);
    508 }
    509 
    510 /*
    511  * Check whether there is a blocking lock,
    512  * and if so return its process identifier.
    513  */
    514 int
    515 lf_getlock(lock, fl)
    516 	struct lockf *lock;
    517 	struct flock *fl;
    518 {
    519 	struct lockf *block;
    520 
    521 #ifdef LOCKF_DEBUG
    522 	if (lockf_debug & 1)
    523 		lf_print("lf_getlock", lock);
    524 #endif /* LOCKF_DEBUG */
    525 
    526 	if ((block = lf_getblock(lock)) != NULL) {
    527 		fl->l_type = block->lf_type;
    528 		fl->l_whence = SEEK_SET;
    529 		fl->l_start = block->lf_start;
    530 		if (block->lf_end == -1)
    531 			fl->l_len = 0;
    532 		else
    533 			fl->l_len = block->lf_end - block->lf_start + 1;
    534 		if (block->lf_flags & F_POSIX)
    535 			fl->l_pid = ((struct lwp *)(block->lf_id))->l_proc->p_pid;
    536 		else
    537 			fl->l_pid = -1;
    538 	} else {
    539 		fl->l_type = F_UNLCK;
    540 	}
    541 	return (0);
    542 }
    543 
    544 /*
    545  * Walk the list of locks for an inode and
    546  * return the first blocking lock.
    547  */
    548 struct lockf *
    549 lf_getblock(lock)
    550 	struct lockf *lock;
    551 {
    552 	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
    553 
    554 	prev = lock->lf_head;
    555 	while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
    556 		/*
    557 		 * We've found an overlap, see if it blocks us
    558 		 */
    559 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
    560 			return (overlap);
    561 		/*
    562 		 * Nope, point to the next one on the list and
    563 		 * see if it blocks us
    564 		 */
    565 		lf = overlap->lf_next;
    566 	}
    567 	return (NOLOCKF);
    568 }
    569 
    570 /*
    571  * Walk the list of locks for an inode to
    572  * find an overlapping lock (if any).
    573  *
    574  * NOTE: this returns only the FIRST overlapping lock.  There
    575  *	 may be more than one.
    576  */
    577 int
    578 lf_findoverlap(lf, lock, type, prev, overlap)
    579 	struct lockf *lf;
    580 	struct lockf *lock;
    581 	int type;
    582 	struct lockf ***prev;
    583 	struct lockf **overlap;
    584 {
    585 	off_t start, end;
    586 
    587 	*overlap = lf;
    588 	if (lf == NOLOCKF)
    589 		return (0);
    590 #ifdef LOCKF_DEBUG
    591 	if (lockf_debug & 2)
    592 		lf_print("lf_findoverlap: looking for overlap in", lock);
    593 #endif /* LOCKF_DEBUG */
    594 	start = lock->lf_start;
    595 	end = lock->lf_end;
    596 	while (lf != NOLOCKF) {
    597 		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
    598 		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
    599 			*prev = &lf->lf_next;
    600 			*overlap = lf = lf->lf_next;
    601 			continue;
    602 		}
    603 #ifdef LOCKF_DEBUG
    604 		if (lockf_debug & 2)
    605 			lf_print("\tchecking", lf);
    606 #endif /* LOCKF_DEBUG */
    607 		/*
    608 		 * OK, check for overlap
    609 		 *
    610 		 * Six cases:
    611 		 *	0) no overlap
    612 		 *	1) overlap == lock
    613 		 *	2) overlap contains lock
    614 		 *	3) lock contains overlap
    615 		 *	4) overlap starts before lock
    616 		 *	5) overlap ends after lock
    617 		 */
    618 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
    619 		    (end != -1 && lf->lf_start > end)) {
    620 			/* Case 0 */
    621 #ifdef LOCKF_DEBUG
    622 			if (lockf_debug & 2)
    623 				printf("no overlap\n");
    624 #endif /* LOCKF_DEBUG */
    625 			if ((type & SELF) && end != -1 && lf->lf_start > end)
    626 				return (0);
    627 			*prev = &lf->lf_next;
    628 			*overlap = lf = lf->lf_next;
    629 			continue;
    630 		}
    631 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
    632 			/* Case 1 */
    633 #ifdef LOCKF_DEBUG
    634 			if (lockf_debug & 2)
    635 				printf("overlap == lock\n");
    636 #endif /* LOCKF_DEBUG */
    637 			return (1);
    638 		}
    639 		if ((lf->lf_start <= start) &&
    640 		    (end != -1) &&
    641 		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
    642 			/* Case 2 */
    643 #ifdef LOCKF_DEBUG
    644 			if (lockf_debug & 2)
    645 				printf("overlap contains lock\n");
    646 #endif /* LOCKF_DEBUG */
    647 			return (2);
    648 		}
    649 		if (start <= lf->lf_start &&
    650 		           (end == -1 ||
    651 			   (lf->lf_end != -1 && end >= lf->lf_end))) {
    652 			/* Case 3 */
    653 #ifdef LOCKF_DEBUG
    654 			if (lockf_debug & 2)
    655 				printf("lock contains overlap\n");
    656 #endif /* LOCKF_DEBUG */
    657 			return (3);
    658 		}
    659 		if ((lf->lf_start < start) &&
    660 			((lf->lf_end >= start) || (lf->lf_end == -1))) {
    661 			/* Case 4 */
    662 #ifdef LOCKF_DEBUG
    663 			if (lockf_debug & 2)
    664 				printf("overlap starts before lock\n");
    665 #endif /* LOCKF_DEBUG */
    666 			return (4);
    667 		}
    668 		if ((lf->lf_start > start) &&
    669 			(end != -1) &&
    670 			((lf->lf_end > end) || (lf->lf_end == -1))) {
    671 			/* Case 5 */
    672 #ifdef LOCKF_DEBUG
    673 			if (lockf_debug & 2)
    674 				printf("overlap ends after lock\n");
    675 #endif /* LOCKF_DEBUG */
    676 			return (5);
    677 		}
    678 		panic("lf_findoverlap: default");
    679 	}
    680 	return (0);
    681 }
    682 
    683 /*
    684  * Split a lock and a contained region into
    685  * two or three locks as necessary.
    686  */
    687 void
    688 lf_split(lock1, lock2)
    689 	struct lockf *lock1;
    690 	struct lockf *lock2;
    691 {
    692 	struct lockf *splitlock;
    693 
    694 #ifdef LOCKF_DEBUG
    695 	if (lockf_debug & 2) {
    696 		lf_print("lf_split", lock1);
    697 		lf_print("splitting from", lock2);
    698 	}
    699 #endif /* LOCKF_DEBUG */
    700 	/*
    701 	 * Check to see if spliting into only two pieces.
    702 	 */
    703 	if (lock1->lf_start == lock2->lf_start) {
    704 		lock1->lf_start = lock2->lf_end + 1;
    705 		lock2->lf_next = lock1;
    706 		return;
    707 	}
    708 	if (lock1->lf_end == lock2->lf_end) {
    709 		lock1->lf_end = lock2->lf_start - 1;
    710 		lock2->lf_next = lock1->lf_next;
    711 		lock1->lf_next = lock2;
    712 		return;
    713 	}
    714 	/*
    715 	 * Make a new lock consisting of the last part of
    716 	 * the encompassing lock
    717 	 */
    718 	MALLOC(splitlock, struct lockf *, sizeof(*splitlock), M_LOCKF, M_WAITOK);
    719 	memcpy((caddr_t)splitlock, (caddr_t)lock1, sizeof(*splitlock));
    720 	splitlock->lf_start = lock2->lf_end + 1;
    721 	TAILQ_INIT(&splitlock->lf_blkhd);
    722 	lock1->lf_end = lock2->lf_start - 1;
    723 	/*
    724 	 * OK, now link it in
    725 	 */
    726 	splitlock->lf_next = lock1->lf_next;
    727 	lock2->lf_next = splitlock;
    728 	lock1->lf_next = lock2;
    729 }
    730 
    731 /*
    732  * Wakeup a blocklist
    733  */
    734 void
    735 lf_wakelock(listhead)
    736 	struct lockf *listhead;
    737 {
    738 	struct lockf *wakelock;
    739 
    740 	while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) {
    741 		KASSERT(wakelock->lf_next == listhead);
    742 		TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
    743 		wakelock->lf_next = NOLOCKF;
    744 #ifdef LOCKF_DEBUG
    745 		if (lockf_debug & 2)
    746 			lf_print("lf_wakelock: awakening", wakelock);
    747 #endif
    748 		wakeup((caddr_t)wakelock);
    749 	}
    750 }
    751 
    752 #ifdef LOCKF_DEBUG
    753 /*
    754  * Print out a lock.
    755  */
    756 void
    757 lf_print(tag, lock)
    758 	char *tag;
    759 	struct lockf *lock;
    760 {
    761 
    762 	printf("%s: lock %p for ", tag, lock);
    763 	if (lock->lf_flags & F_POSIX)
    764 		printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
    765 	else
    766 		printf("id 0x%p", lock->lf_id);
    767 	printf(" %s, start %qx, end %qx",
    768 		lock->lf_type == F_RDLCK ? "shared" :
    769 		lock->lf_type == F_WRLCK ? "exclusive" :
    770 		lock->lf_type == F_UNLCK ? "unlock" :
    771 		"unknown", lock->lf_start, lock->lf_end);
    772 	if (TAILQ_FIRST(&lock->lf_blkhd))
    773 		printf(" block %p\n", TAILQ_FIRST(&lock->lf_blkhd));
    774 	else
    775 		printf("\n");
    776 }
    777 
    778 void
    779 lf_printlist(tag, lock)
    780 	char *tag;
    781 	struct lockf *lock;
    782 {
    783 	struct lockf *lf, *blk;
    784 
    785 	printf("%s: Lock list:\n", tag);
    786 	for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
    787 		printf("\tlock %p for ", lf);
    788 		if (lf->lf_flags & F_POSIX)
    789 			printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid);
    790 		else
    791 			printf("id 0x%p", lf->lf_id);
    792 		printf(", %s, start %qx, end %qx",
    793 			lf->lf_type == F_RDLCK ? "shared" :
    794 			lf->lf_type == F_WRLCK ? "exclusive" :
    795 			lf->lf_type == F_UNLCK ? "unlock" :
    796 			"unknown", lf->lf_start, lf->lf_end);
    797 		TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
    798 			if (blk->lf_flags & F_POSIX)
    799 				printf("proc %d",
    800 				    ((struct proc *)(blk->lf_id))->p_pid);
    801 			else
    802 				printf("id 0x%p", blk->lf_id);
    803 			printf(", %s, start %qx, end %qx",
    804 				blk->lf_type == F_RDLCK ? "shared" :
    805 				blk->lf_type == F_WRLCK ? "exclusive" :
    806 				blk->lf_type == F_UNLCK ? "unlock" :
    807 				"unknown", blk->lf_start, blk->lf_end);
    808 			if (TAILQ_FIRST(&blk->lf_blkhd))
    809 				 panic("lf_printlist: bad list");
    810 		}
    811 		printf("\n");
    812 	}
    813 }
    814 #endif /* LOCKF_DEBUG */
    815