Home | History | Annotate | Line # | Download | only in kern
vfs_lockf.c revision 1.23
      1 /*	$NetBSD: vfs_lockf.c,v 1.23 2003/03/05 18:28:22 mycroft Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Scooter Morris at Genentech Inc.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the University of
     21  *	California, Berkeley and its contributors.
     22  * 4. Neither the name of the University nor the names of its contributors
     23  *    may be used to endorse or promote products derived from this software
     24  *    without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     36  * SUCH DAMAGE.
     37  *
     38  *	@(#)ufs_lockf.c	8.4 (Berkeley) 10/26/94
     39  */
     40 
     41 #include <sys/cdefs.h>
     42 __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.23 2003/03/05 18:28:22 mycroft Exp $");
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/kernel.h>
     47 #include <sys/file.h>
     48 #include <sys/proc.h>
     49 #include <sys/vnode.h>
     50 #include <sys/malloc.h>
     51 #include <sys/fcntl.h>
     52 #include <sys/lockf.h>
     53 
     54 MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
     55 
     56 /*
     57  * This variable controls the maximum number of processes that will
     58  * be checked in doing deadlock detection.
     59  */
     60 int maxlockdepth = MAXDEPTH;
     61 
     62 #ifdef LOCKF_DEBUG
     63 int	lockf_debug = 0;
     64 #endif
     65 
     66 #define NOLOCKF (struct lockf *)0
     67 #define SELF	0x1
     68 #define OTHERS	0x2
     69 
     70 /*
     71  * XXX TODO
     72  * Misc cleanups: "caddr_t id" should be visible in the API as a
     73  * "struct proc *".
     74  * (This requires rototilling all VFS's which support advisory locking).
     75  *
     76  * Use pools for lock allocation.
     77  */
     78 
     79 /*
     80  * XXXSMP TODO: Using either (a) a global lock, or (b) the vnode's
     81  * interlock should be sufficient; (b) requires a change to the API
     82  * because the vnode isn't visible here.
     83  *
     84  * If there's a lot of lock contention on a single vnode, locking
     85  * schemes which allow for more paralleism would be needed.  Given how
     86  * infrequently byte-range locks are actually used in typical BSD
     87  * code, a more complex approach probably isn't worth it.
     88  */
     89 
     90 /*
     91  * Do an advisory lock operation.
     92  */
     93 int
     94 lf_advlock(ap, head, size)
     95 	struct vop_advlock_args *ap;
     96 	struct lockf **head;
     97 	off_t size;
     98 {
     99 	struct flock *fl = ap->a_fl;
    100 	struct lockf *lock;
    101 	off_t start, end;
    102 	int error;
    103 
    104 	/*
    105 	 * Convert the flock structure into a start and end.
    106 	 */
    107 	switch (fl->l_whence) {
    108 	case SEEK_SET:
    109 	case SEEK_CUR:
    110 		/*
    111 		 * Caller is responsible for adding any necessary offset
    112 		 * when SEEK_CUR is used.
    113 		 */
    114 		start = fl->l_start;
    115 		break;
    116 
    117 	case SEEK_END:
    118 		start = size + fl->l_start;
    119 		break;
    120 
    121 	default:
    122 		return (EINVAL);
    123 	}
    124 	if (start < 0)
    125 		return (EINVAL);
    126 
    127 	/*
    128 	 * Avoid the common case of unlocking when inode has no locks.
    129 	 */
    130 	if (*head == (struct lockf *)0) {
    131 		if (ap->a_op != F_SETLK) {
    132 			fl->l_type = F_UNLCK;
    133 			return (0);
    134 		}
    135 	}
    136 
    137 	if (fl->l_len == 0)
    138 		end = -1;
    139 	else
    140 		end = start + fl->l_len - 1;
    141 	/*
    142 	 * Create the lockf structure.
    143 	 */
    144 	MALLOC(lock, struct lockf *, sizeof(*lock), M_LOCKF, M_WAITOK);
    145 	lock->lf_start = start;
    146 	lock->lf_end = end;
    147 	/* XXX NJWLWP
    148 	 * I don't want to make the entire VFS universe use LWPs, because
    149 	 * they don't need them, for the most part. This is an exception,
    150 	 * and a kluge.
    151 	 */
    152 
    153 	lock->lf_head = head;
    154 	lock->lf_type = fl->l_type;
    155 	lock->lf_next = (struct lockf *)0;
    156 	TAILQ_INIT(&lock->lf_blkhd);
    157 	lock->lf_flags = ap->a_flags;
    158 	if (lock->lf_flags & F_POSIX) {
    159 		KASSERT(curproc == (struct proc *)ap->a_id);
    160 	}
    161 	lock->lf_id = (struct proc *)ap->a_id;
    162 	lock->lf_lwp = curlwp;
    163 
    164 	/*
    165 	 * Do the requested operation.
    166 	 */
    167 	switch (ap->a_op) {
    168 
    169 	case F_SETLK:
    170 		return (lf_setlock(lock));
    171 
    172 	case F_UNLCK:
    173 		error = lf_clearlock(lock);
    174 		FREE(lock, M_LOCKF);
    175 		return (error);
    176 
    177 	case F_GETLK:
    178 		error = lf_getlock(lock, fl);
    179 		FREE(lock, M_LOCKF);
    180 		return (error);
    181 
    182 	default:
    183 		FREE(lock, M_LOCKF);
    184 		return (EINVAL);
    185 	}
    186 	/* NOTREACHED */
    187 }
    188 
    189 /*
    190  * Set a byte-range lock.
    191  */
    192 int
    193 lf_setlock(lock)
    194 	struct lockf *lock;
    195 {
    196 	struct lockf *block;
    197 	struct lockf **head = lock->lf_head;
    198 	struct lockf **prev, *overlap, *ltmp;
    199 	static char lockstr[] = "lockf";
    200 	int ovcase, priority, needtolink, error;
    201 
    202 #ifdef LOCKF_DEBUG
    203 	if (lockf_debug & 1)
    204 		lf_print("lf_setlock", lock);
    205 #endif /* LOCKF_DEBUG */
    206 
    207 	/*
    208 	 * Set the priority
    209 	 */
    210 	priority = PLOCK;
    211 	if (lock->lf_type == F_WRLCK)
    212 		priority += 4;
    213 	priority |= PCATCH;
    214 	/*
    215 	 * Scan lock list for this file looking for locks that would block us.
    216 	 */
    217 	while ((block = lf_getblock(lock)) != NULL) {
    218 		/*
    219 		 * Free the structure and return if nonblocking.
    220 		 */
    221 		if ((lock->lf_flags & F_WAIT) == 0) {
    222 			FREE(lock, M_LOCKF);
    223 			return (EAGAIN);
    224 		}
    225 		/*
    226 		 * We are blocked. Since flock style locks cover
    227 		 * the whole file, there is no chance for deadlock.
    228 		 * For byte-range locks we must check for deadlock.
    229 		 *
    230 		 * Deadlock detection is done by looking through the
    231 		 * wait channels to see if there are any cycles that
    232 		 * involve us. MAXDEPTH is set just to make sure we
    233 		 * do not go off into neverneverland.
    234 		 */
    235 		if ((lock->lf_flags & F_POSIX) &&
    236 		    (block->lf_flags & F_POSIX)) {
    237 			struct lwp *wlwp;
    238 			struct lockf *waitblock;
    239 			int i = 0;
    240 
    241 			/*
    242 			 * The block is waiting on something.  if_lwp will be
    243 			 * 0 once the lock is granted, so we terminate the
    244 			 * loop if we find this.
    245 			 */
    246 			wlwp = block->lf_lwp;
    247 			while (wlwp && (i++ < maxlockdepth)) {
    248 				waitblock = (struct lockf *)wlwp->l_wchan;
    249 				/* Get the owner of the blocking lock */
    250 				waitblock = waitblock->lf_next;
    251 				if ((waitblock->lf_flags & F_POSIX) == 0)
    252 					break;
    253 				wlwp = waitblock->lf_lwp;
    254 				if (wlwp == lock->lf_lwp) {
    255 					free(lock, M_LOCKF);
    256 					return (EDEADLK);
    257 				}
    258 			}
    259 			/*
    260 			 * If we're still following a dependancy chain
    261 			 * after maxlockdepth iterations, assume we're in
    262 			 * a cycle to be safe.
    263 			 */
    264 			if (i >= maxlockdepth) {
    265 				free(lock, M_LOCKF);
    266 				return (EDEADLK);
    267 			}
    268 		}
    269 		/*
    270 		 * For flock type locks, we must first remove
    271 		 * any shared locks that we hold before we sleep
    272 		 * waiting for an exclusive lock.
    273 		 */
    274 		if ((lock->lf_flags & F_FLOCK) &&
    275 		    lock->lf_type == F_WRLCK) {
    276 			lock->lf_type = F_UNLCK;
    277 			(void) lf_clearlock(lock);
    278 			lock->lf_type = F_WRLCK;
    279 		}
    280 		/*
    281 		 * Add our lock to the blocked list and sleep until we're free.
    282 		 * Remember who blocked us (for deadlock detection).
    283 		 */
    284 		lock->lf_next = block;
    285 		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
    286 #ifdef LOCKF_DEBUG
    287 		if (lockf_debug & 1) {
    288 			lf_print("lf_setlock: blocking on", block);
    289 			lf_printlist("lf_setlock", block);
    290 		}
    291 #endif /* LOCKF_DEBUG */
    292 		error = tsleep((caddr_t)lock, priority, lockstr, 0);
    293 
    294 		/*
    295 		 * We may have been awakened by a signal (in
    296 		 * which case we must remove ourselves from the
    297 		 * blocked list) and/or by another process
    298 		 * releasing a lock (in which case we have already
    299 		 * been removed from the blocked list and our
    300 		 * lf_next field set to NOLOCKF).
    301 		 */
    302 		if (lock->lf_next != NOLOCKF) {
    303 			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
    304 			lock->lf_next = NOLOCKF;
    305 		}
    306 		if (error) {
    307 			free(lock, M_LOCKF);
    308 			return (error);
    309 		}
    310 	}
    311 	/*
    312 	 * No blocks!!  Add the lock.  Note that we will
    313 	 * downgrade or upgrade any overlapping locks this
    314 	 * process already owns.
    315 	 *
    316 	 * Skip over locks owned by other processes.
    317 	 * Handle any locks that overlap and are owned by ourselves.
    318 	 */
    319 	lock->lf_lwp = 0;
    320 	prev = head;
    321 	block = *head;
    322 	needtolink = 1;
    323 	for (;;) {
    324 		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
    325 		if (ovcase)
    326 			block = overlap->lf_next;
    327 		/*
    328 		 * Six cases:
    329 		 *	0) no overlap
    330 		 *	1) overlap == lock
    331 		 *	2) overlap contains lock
    332 		 *	3) lock contains overlap
    333 		 *	4) overlap starts before lock
    334 		 *	5) overlap ends after lock
    335 		 */
    336 		switch (ovcase) {
    337 		case 0: /* no overlap */
    338 			if (needtolink) {
    339 				*prev = lock;
    340 				lock->lf_next = overlap;
    341 			}
    342 			break;
    343 
    344 		case 1: /* overlap == lock */
    345 			/*
    346 			 * If downgrading lock, others may be
    347 			 * able to acquire it.
    348 			 */
    349 			if (lock->lf_type == F_RDLCK &&
    350 			    overlap->lf_type == F_WRLCK)
    351 				lf_wakelock(overlap);
    352 			overlap->lf_type = lock->lf_type;
    353 			FREE(lock, M_LOCKF);
    354 			lock = overlap; /* for debug output below */
    355 			break;
    356 
    357 		case 2: /* overlap contains lock */
    358 			/*
    359 			 * Check for common starting point and different types.
    360 			 */
    361 			if (overlap->lf_type == lock->lf_type) {
    362 				free(lock, M_LOCKF);
    363 				lock = overlap; /* for debug output below */
    364 				break;
    365 			}
    366 			if (overlap->lf_start == lock->lf_start) {
    367 				*prev = lock;
    368 				lock->lf_next = overlap;
    369 				overlap->lf_start = lock->lf_end + 1;
    370 			} else
    371 				lf_split(overlap, lock);
    372 			lf_wakelock(overlap);
    373 			break;
    374 
    375 		case 3: /* lock contains overlap */
    376 			/*
    377 			 * If downgrading lock, others may be able to
    378 			 * acquire it, otherwise take the list.
    379 			 */
    380 			if (lock->lf_type == F_RDLCK &&
    381 			    overlap->lf_type == F_WRLCK) {
    382 				lf_wakelock(overlap);
    383 			} else {
    384 				while ((ltmp = TAILQ_FIRST(&overlap->lf_blkhd))) {
    385 					KASSERT(ltmp->lf_next == overlap);
    386 					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
    387 					    lf_block);
    388 					ltmp->lf_next = lock;
    389 					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
    390 					    ltmp, lf_block);
    391 				}
    392 			}
    393 			/*
    394 			 * Add the new lock if necessary and delete the overlap.
    395 			 */
    396 			if (needtolink) {
    397 				*prev = lock;
    398 				lock->lf_next = overlap->lf_next;
    399 				prev = &lock->lf_next;
    400 				needtolink = 0;
    401 			} else
    402 				*prev = overlap->lf_next;
    403 			free(overlap, M_LOCKF);
    404 			continue;
    405 
    406 		case 4: /* overlap starts before lock */
    407 			/*
    408 			 * Add lock after overlap on the list.
    409 			 */
    410 			lock->lf_next = overlap->lf_next;
    411 			overlap->lf_next = lock;
    412 			overlap->lf_end = lock->lf_start - 1;
    413 			prev = &lock->lf_next;
    414 			lf_wakelock(overlap);
    415 			needtolink = 0;
    416 			continue;
    417 
    418 		case 5: /* overlap ends after lock */
    419 			/*
    420 			 * Add the new lock before overlap.
    421 			 */
    422 			if (needtolink) {
    423 				*prev = lock;
    424 				lock->lf_next = overlap;
    425 			}
    426 			overlap->lf_start = lock->lf_end + 1;
    427 			lf_wakelock(overlap);
    428 			break;
    429 		}
    430 		break;
    431 	}
    432 #ifdef LOCKF_DEBUG
    433 	if (lockf_debug & 1) {
    434 		lf_print("lf_setlock: got the lock", lock);
    435 		lf_printlist("lf_setlock", lock);
    436 	}
    437 #endif /* LOCKF_DEBUG */
    438 	return (0);
    439 }
    440 
    441 /*
    442  * Remove a byte-range lock on an inode.
    443  *
    444  * Generally, find the lock (or an overlap to that lock)
    445  * and remove it (or shrink it), then wakeup anyone we can.
    446  */
    447 int
    448 lf_clearlock(unlock)
    449 	struct lockf *unlock;
    450 {
    451 	struct lockf **head = unlock->lf_head;
    452 	struct lockf *lf = *head;
    453 	struct lockf *overlap, **prev;
    454 	int ovcase;
    455 
    456 	if (lf == NOLOCKF)
    457 		return (0);
    458 #ifdef LOCKF_DEBUG
    459 	if (unlock->lf_type != F_UNLCK)
    460 		panic("lf_clearlock: bad type");
    461 	if (lockf_debug & 1)
    462 		lf_print("lf_clearlock", unlock);
    463 #endif /* LOCKF_DEBUG */
    464 	prev = head;
    465 	while ((ovcase = lf_findoverlap(lf, unlock, SELF,
    466 					&prev, &overlap)) != 0) {
    467 		/*
    468 		 * Wakeup the list of locks to be retried.
    469 		 */
    470 		lf_wakelock(overlap);
    471 
    472 		switch (ovcase) {
    473 
    474 		case 1: /* overlap == lock */
    475 			*prev = overlap->lf_next;
    476 			FREE(overlap, M_LOCKF);
    477 			break;
    478 
    479 		case 2: /* overlap contains lock: split it */
    480 			if (overlap->lf_start == unlock->lf_start) {
    481 				overlap->lf_start = unlock->lf_end + 1;
    482 				break;
    483 			}
    484 			lf_split(overlap, unlock);
    485 			overlap->lf_next = unlock->lf_next;
    486 			break;
    487 
    488 		case 3: /* lock contains overlap */
    489 			*prev = overlap->lf_next;
    490 			lf = overlap->lf_next;
    491 			free(overlap, M_LOCKF);
    492 			continue;
    493 
    494 		case 4: /* overlap starts before lock */
    495 			overlap->lf_end = unlock->lf_start - 1;
    496 			prev = &overlap->lf_next;
    497 			lf = overlap->lf_next;
    498 			continue;
    499 
    500 		case 5: /* overlap ends after lock */
    501 			overlap->lf_start = unlock->lf_end + 1;
    502 			break;
    503 		}
    504 		break;
    505 	}
    506 #ifdef LOCKF_DEBUG
    507 	if (lockf_debug & 1)
    508 		lf_printlist("lf_clearlock", unlock);
    509 #endif /* LOCKF_DEBUG */
    510 	return (0);
    511 }
    512 
    513 /*
    514  * Check whether there is a blocking lock,
    515  * and if so return its process identifier.
    516  */
    517 int
    518 lf_getlock(lock, fl)
    519 	struct lockf *lock;
    520 	struct flock *fl;
    521 {
    522 	struct lockf *block;
    523 
    524 #ifdef LOCKF_DEBUG
    525 	if (lockf_debug & 1)
    526 		lf_print("lf_getlock", lock);
    527 #endif /* LOCKF_DEBUG */
    528 
    529 	if ((block = lf_getblock(lock)) != NULL) {
    530 		fl->l_type = block->lf_type;
    531 		fl->l_whence = SEEK_SET;
    532 		fl->l_start = block->lf_start;
    533 		if (block->lf_end == -1)
    534 			fl->l_len = 0;
    535 		else
    536 			fl->l_len = block->lf_end - block->lf_start + 1;
    537 		if (block->lf_flags & F_POSIX)
    538 			fl->l_pid = ((struct proc *)block->lf_id)->p_pid;
    539 		else
    540 			fl->l_pid = -1;
    541 	} else {
    542 		fl->l_type = F_UNLCK;
    543 	}
    544 	return (0);
    545 }
    546 
    547 /*
    548  * Walk the list of locks for an inode and
    549  * return the first blocking lock.
    550  */
    551 struct lockf *
    552 lf_getblock(lock)
    553 	struct lockf *lock;
    554 {
    555 	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
    556 
    557 	prev = lock->lf_head;
    558 	while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
    559 		/*
    560 		 * We've found an overlap, see if it blocks us
    561 		 */
    562 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
    563 			return (overlap);
    564 		/*
    565 		 * Nope, point to the next one on the list and
    566 		 * see if it blocks us
    567 		 */
    568 		lf = overlap->lf_next;
    569 	}
    570 	return (NOLOCKF);
    571 }
    572 
    573 /*
    574  * Walk the list of locks for an inode to
    575  * find an overlapping lock (if any).
    576  *
    577  * NOTE: this returns only the FIRST overlapping lock.  There
    578  *	 may be more than one.
    579  */
    580 int
    581 lf_findoverlap(lf, lock, type, prev, overlap)
    582 	struct lockf *lf;
    583 	struct lockf *lock;
    584 	int type;
    585 	struct lockf ***prev;
    586 	struct lockf **overlap;
    587 {
    588 	off_t start, end;
    589 
    590 	*overlap = lf;
    591 	if (lf == NOLOCKF)
    592 		return (0);
    593 #ifdef LOCKF_DEBUG
    594 	if (lockf_debug & 2)
    595 		lf_print("lf_findoverlap: looking for overlap in", lock);
    596 #endif /* LOCKF_DEBUG */
    597 	start = lock->lf_start;
    598 	end = lock->lf_end;
    599 	while (lf != NOLOCKF) {
    600 		if (((type == SELF) && lf->lf_id != lock->lf_id) ||
    601 		    ((type == OTHERS) && lf->lf_id == lock->lf_id)) {
    602 			*prev = &lf->lf_next;
    603 			*overlap = lf = lf->lf_next;
    604 			continue;
    605 		}
    606 #ifdef LOCKF_DEBUG
    607 		if (lockf_debug & 2)
    608 			lf_print("\tchecking", lf);
    609 #endif /* LOCKF_DEBUG */
    610 		/*
    611 		 * OK, check for overlap
    612 		 *
    613 		 * Six cases:
    614 		 *	0) no overlap
    615 		 *	1) overlap == lock
    616 		 *	2) overlap contains lock
    617 		 *	3) lock contains overlap
    618 		 *	4) overlap starts before lock
    619 		 *	5) overlap ends after lock
    620 		 */
    621 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
    622 		    (end != -1 && lf->lf_start > end)) {
    623 			/* Case 0 */
    624 #ifdef LOCKF_DEBUG
    625 			if (lockf_debug & 2)
    626 				printf("no overlap\n");
    627 #endif /* LOCKF_DEBUG */
    628 			if ((type & SELF) && end != -1 && lf->lf_start > end)
    629 				return (0);
    630 			*prev = &lf->lf_next;
    631 			*overlap = lf = lf->lf_next;
    632 			continue;
    633 		}
    634 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
    635 			/* Case 1 */
    636 #ifdef LOCKF_DEBUG
    637 			if (lockf_debug & 2)
    638 				printf("overlap == lock\n");
    639 #endif /* LOCKF_DEBUG */
    640 			return (1);
    641 		}
    642 		if ((lf->lf_start <= start) &&
    643 		    (end != -1) &&
    644 		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
    645 			/* Case 2 */
    646 #ifdef LOCKF_DEBUG
    647 			if (lockf_debug & 2)
    648 				printf("overlap contains lock\n");
    649 #endif /* LOCKF_DEBUG */
    650 			return (2);
    651 		}
    652 		if (start <= lf->lf_start &&
    653 		           (end == -1 ||
    654 			   (lf->lf_end != -1 && end >= lf->lf_end))) {
    655 			/* Case 3 */
    656 #ifdef LOCKF_DEBUG
    657 			if (lockf_debug & 2)
    658 				printf("lock contains overlap\n");
    659 #endif /* LOCKF_DEBUG */
    660 			return (3);
    661 		}
    662 		if ((lf->lf_start < start) &&
    663 			((lf->lf_end >= start) || (lf->lf_end == -1))) {
    664 			/* Case 4 */
    665 #ifdef LOCKF_DEBUG
    666 			if (lockf_debug & 2)
    667 				printf("overlap starts before lock\n");
    668 #endif /* LOCKF_DEBUG */
    669 			return (4);
    670 		}
    671 		if ((lf->lf_start > start) &&
    672 			(end != -1) &&
    673 			((lf->lf_end > end) || (lf->lf_end == -1))) {
    674 			/* Case 5 */
    675 #ifdef LOCKF_DEBUG
    676 			if (lockf_debug & 2)
    677 				printf("overlap ends after lock\n");
    678 #endif /* LOCKF_DEBUG */
    679 			return (5);
    680 		}
    681 		panic("lf_findoverlap: default");
    682 	}
    683 	return (0);
    684 }
    685 
    686 /*
    687  * Split a lock and a contained region into
    688  * two or three locks as necessary.
    689  */
    690 void
    691 lf_split(lock1, lock2)
    692 	struct lockf *lock1;
    693 	struct lockf *lock2;
    694 {
    695 	struct lockf *splitlock;
    696 
    697 #ifdef LOCKF_DEBUG
    698 	if (lockf_debug & 2) {
    699 		lf_print("lf_split", lock1);
    700 		lf_print("splitting from", lock2);
    701 	}
    702 #endif /* LOCKF_DEBUG */
    703 	/*
    704 	 * Check to see if spliting into only two pieces.
    705 	 */
    706 	if (lock1->lf_start == lock2->lf_start) {
    707 		lock1->lf_start = lock2->lf_end + 1;
    708 		lock2->lf_next = lock1;
    709 		return;
    710 	}
    711 	if (lock1->lf_end == lock2->lf_end) {
    712 		lock1->lf_end = lock2->lf_start - 1;
    713 		lock2->lf_next = lock1->lf_next;
    714 		lock1->lf_next = lock2;
    715 		return;
    716 	}
    717 	/*
    718 	 * Make a new lock consisting of the last part of
    719 	 * the encompassing lock
    720 	 */
    721 	MALLOC(splitlock, struct lockf *, sizeof(*splitlock), M_LOCKF, M_WAITOK);
    722 	memcpy((caddr_t)splitlock, (caddr_t)lock1, sizeof(*splitlock));
    723 	splitlock->lf_start = lock2->lf_end + 1;
    724 	TAILQ_INIT(&splitlock->lf_blkhd);
    725 	lock1->lf_end = lock2->lf_start - 1;
    726 	/*
    727 	 * OK, now link it in
    728 	 */
    729 	splitlock->lf_next = lock1->lf_next;
    730 	lock2->lf_next = splitlock;
    731 	lock1->lf_next = lock2;
    732 }
    733 
    734 /*
    735  * Wakeup a blocklist
    736  */
    737 void
    738 lf_wakelock(listhead)
    739 	struct lockf *listhead;
    740 {
    741 	struct lockf *wakelock;
    742 
    743 	while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) {
    744 		KASSERT(wakelock->lf_next == listhead);
    745 		TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
    746 		wakelock->lf_next = NOLOCKF;
    747 #ifdef LOCKF_DEBUG
    748 		if (lockf_debug & 2)
    749 			lf_print("lf_wakelock: awakening", wakelock);
    750 #endif
    751 		wakeup((caddr_t)wakelock);
    752 	}
    753 }
    754 
    755 #ifdef LOCKF_DEBUG
    756 /*
    757  * Print out a lock.
    758  */
    759 void
    760 lf_print(tag, lock)
    761 	char *tag;
    762 	struct lockf *lock;
    763 {
    764 
    765 	printf("%s: lock %p for ", tag, lock);
    766 	if (lock->lf_flags & F_POSIX)
    767 		printf("proc %d", ((struct proc *)lock->lf_id)->p_pid);
    768 	else
    769 		printf("file 0x%p", (struct file *)lock->lf_id);
    770 	printf(" %s, start %qx, end %qx",
    771 		lock->lf_type == F_RDLCK ? "shared" :
    772 		lock->lf_type == F_WRLCK ? "exclusive" :
    773 		lock->lf_type == F_UNLCK ? "unlock" :
    774 		"unknown", lock->lf_start, lock->lf_end);
    775 	if (TAILQ_FIRST(&lock->lf_blkhd))
    776 		printf(" block %p\n", TAILQ_FIRST(&lock->lf_blkhd));
    777 	else
    778 		printf("\n");
    779 }
    780 
    781 void
    782 lf_printlist(tag, lock)
    783 	char *tag;
    784 	struct lockf *lock;
    785 {
    786 	struct lockf *lf, *blk;
    787 
    788 	printf("%s: Lock list:\n", tag);
    789 	for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
    790 		printf("\tlock %p for ", lf);
    791 		if (lf->lf_flags & F_POSIX)
    792 			printf("proc %d", ((struct proc *)lf->lf_id)->p_pid);
    793 		else
    794 			printf("file 0x%p", (struct file *)lf->lf_id);
    795 		printf(", %s, start %qx, end %qx",
    796 			lf->lf_type == F_RDLCK ? "shared" :
    797 			lf->lf_type == F_WRLCK ? "exclusive" :
    798 			lf->lf_type == F_UNLCK ? "unlock" :
    799 			"unknown", lf->lf_start, lf->lf_end);
    800 		TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
    801 			if (blk->lf_flags & F_POSIX)
    802 				printf("proc %d",
    803 				    ((struct proc *)blk->lf_id)->p_pid);
    804 			else
    805 				printf("file 0x%p", (struct file *)blk->lf_id);
    806 			printf(", %s, start %qx, end %qx",
    807 				blk->lf_type == F_RDLCK ? "shared" :
    808 				blk->lf_type == F_WRLCK ? "exclusive" :
    809 				blk->lf_type == F_UNLCK ? "unlock" :
    810 				"unknown", blk->lf_start, blk->lf_end);
    811 			if (TAILQ_FIRST(&blk->lf_blkhd))
    812 				 panic("lf_printlist: bad list");
    813 		}
    814 		printf("\n");
    815 	}
    816 }
    817 #endif /* LOCKF_DEBUG */
    818