Home | History | Annotate | Line # | Download | only in kern
vfs_lockf.c revision 1.67
      1 /*	$NetBSD: vfs_lockf.c,v 1.67 2008/08/07 07:42:06 skrll Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Scooter Morris at Genentech Inc.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. Neither the name of the University nor the names of its contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  *
     34  *	@(#)ufs_lockf.c	8.4 (Berkeley) 10/26/94
     35  */
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.67 2008/08/07 07:42:06 skrll Exp $");
     39 
     40 #include <sys/param.h>
     41 #include <sys/systm.h>
     42 #include <sys/kernel.h>
     43 #include <sys/file.h>
     44 #include <sys/proc.h>
     45 #include <sys/vnode.h>
     46 #include <sys/pool.h>
     47 #include <sys/fcntl.h>
     48 #include <sys/lockf.h>
     49 #include <sys/atomic.h>
     50 #include <sys/kauth.h>
     51 
     52 /*
     53  * The lockf structure is a kernel structure which contains the information
     54  * associated with a byte range lock.  The lockf structures are linked into
     55  * the vnode structure.  Locks are sorted by the starting byte of the lock for
     56  * efficiency.
     57  *
     58  * lf_next is used for two purposes, depending on whether the lock is
     59  * being held, or is in conflict with an existing lock.  If this lock
     60  * is held, it indicates the next lock on the same vnode.
     61  * For pending locks, if lock->lf_next is non-NULL, then lock->lf_block
     62  * must be queued on the lf_blkhd TAILQ of lock->lf_next.
     63  */
     64 
     65 TAILQ_HEAD(locklist, lockf);
     66 
     67 struct lockf {
     68 	kcondvar_t lf_cv;	 /* Signalling */
     69 	short	lf_flags;	 /* Lock semantics: F_POSIX, F_FLOCK, F_WAIT */
     70 	short	lf_type;	 /* Lock type: F_RDLCK, F_WRLCK */
     71 	off_t	lf_start;	 /* The byte # of the start of the lock */
     72 	off_t	lf_end;		 /* The byte # of the end of the lock (-1=EOF)*/
     73 	void	*lf_id;		 /* process or file description holding lock */
     74 	struct	lockf **lf_head; /* Back pointer to the head of lockf list */
     75 	struct	lockf *lf_next;	 /* Next lock on this vnode, or blocking lock */
     76 	struct  locklist lf_blkhd; /* List of requests blocked on this lock */
     77 	TAILQ_ENTRY(lockf) lf_block;/* A request waiting for a lock */
     78 	uid_t	lf_uid;		 /* User ID responsible */
     79 };
     80 
     81 /* Maximum length of sleep chains to traverse to try and detect deadlock. */
     82 #define MAXDEPTH 50
     83 
     84 static pool_cache_t lockf_cache;
     85 static kmutex_t *lockf_lock;
     86 static char lockstr[] = "lockf";
     87 
     88 /*
     89  * This variable controls the maximum number of processes that will
     90  * be checked in doing deadlock detection.
     91  */
     92 int maxlockdepth = MAXDEPTH;
     93 
     94 #ifdef LOCKF_DEBUG
     95 int	lockf_debug = 0;
     96 #endif
     97 
     98 #define SELF	0x1
     99 #define OTHERS	0x2
    100 
    101 /*
    102  * XXX TODO
    103  * Misc cleanups: "void *id" should be visible in the API as a
    104  * "struct proc *".
    105  * (This requires rototilling all VFS's which support advisory locking).
    106  */
    107 
    108 /*
    109  * If there's a lot of lock contention on a single vnode, locking
    110  * schemes which allow for more paralleism would be needed.  Given how
    111  * infrequently byte-range locks are actually used in typical BSD
    112  * code, a more complex approach probably isn't worth it.
    113  */
    114 
    115 /*
    116  * We enforce a limit on locks by uid, so that a single user cannot
    117  * run the kernel out of memory.  For now, the limit is pretty coarse.
    118  * There is no limit on root.
    119  *
    120  * Splitting a lock will always succeed, regardless of current allocations.
    121  * If you're slightly above the limit, we still have to permit an allocation
    122  * so that the unlock can succeed.  If the unlocking causes too many splits,
    123  * however, you're totally cutoff.
    124  */
    125 int maxlocksperuid = 1024;
    126 
    127 #ifdef LOCKF_DEBUG
    128 /*
    129  * Print out a lock.
    130  */
    131 static void
    132 lf_print(const char *tag, struct lockf *lock)
    133 {
    134 
    135 	printf("%s: lock %p for ", tag, lock);
    136 	if (lock->lf_flags & F_POSIX)
    137 		printf("proc %d", ((struct proc *)lock->lf_id)->p_pid);
    138 	else
    139 		printf("file %p", (struct file *)lock->lf_id);
    140 	printf(" %s, start %qx, end %qx",
    141 		lock->lf_type == F_RDLCK ? "shared" :
    142 		lock->lf_type == F_WRLCK ? "exclusive" :
    143 		lock->lf_type == F_UNLCK ? "unlock" :
    144 		"unknown", lock->lf_start, lock->lf_end);
    145 	if (TAILQ_FIRST(&lock->lf_blkhd))
    146 		printf(" block %p\n", TAILQ_FIRST(&lock->lf_blkhd));
    147 	else
    148 		printf("\n");
    149 }
    150 
    151 static void
    152 lf_printlist(const char *tag, struct lockf *lock)
    153 {
    154 	struct lockf *lf, *blk;
    155 
    156 	printf("%s: Lock list:\n", tag);
    157 	for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
    158 		printf("\tlock %p for ", lf);
    159 		if (lf->lf_flags & F_POSIX)
    160 			printf("proc %d", ((struct proc *)lf->lf_id)->p_pid);
    161 		else
    162 			printf("file %p", (struct file *)lf->lf_id);
    163 		printf(", %s, start %qx, end %qx",
    164 			lf->lf_type == F_RDLCK ? "shared" :
    165 			lf->lf_type == F_WRLCK ? "exclusive" :
    166 			lf->lf_type == F_UNLCK ? "unlock" :
    167 			"unknown", lf->lf_start, lf->lf_end);
    168 		TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
    169 			if (blk->lf_flags & F_POSIX)
    170 				printf("; proc %d",
    171 				    ((struct proc *)blk->lf_id)->p_pid);
    172 			else
    173 				printf("; file %p", (struct file *)blk->lf_id);
    174 			printf(", %s, start %qx, end %qx",
    175 				blk->lf_type == F_RDLCK ? "shared" :
    176 				blk->lf_type == F_WRLCK ? "exclusive" :
    177 				blk->lf_type == F_UNLCK ? "unlock" :
    178 				"unknown", blk->lf_start, blk->lf_end);
    179 			if (TAILQ_FIRST(&blk->lf_blkhd))
    180 				 panic("lf_printlist: bad list");
    181 		}
    182 		printf("\n");
    183 	}
    184 }
    185 #endif /* LOCKF_DEBUG */
    186 
    187 /*
    188  * 3 options for allowfail.
    189  * 0 - always allocate.  1 - cutoff at limit.  2 - cutoff at double limit.
    190  */
    191 static struct lockf *
    192 lf_alloc(uid_t uid, int allowfail)
    193 {
    194 	struct uidinfo *uip;
    195 	struct lockf *lock;
    196 	u_long lcnt;
    197 
    198 	uip = uid_find(uid);
    199 	lcnt = atomic_inc_ulong_nv(&uip->ui_lockcnt);
    200 	if (uid && allowfail && lcnt >
    201 	    (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) {
    202 		atomic_dec_ulong(&uip->ui_lockcnt);
    203 		return NULL;
    204 	}
    205 
    206 	lock = pool_cache_get(lockf_cache, PR_WAITOK);
    207 	lock->lf_uid = uid;
    208 	return lock;
    209 }
    210 
    211 static void
    212 lf_free(struct lockf *lock)
    213 {
    214 	struct uidinfo *uip;
    215 
    216 	uip = uid_find(lock->lf_uid);
    217 	atomic_dec_ulong(&uip->ui_lockcnt);
    218 	pool_cache_put(lockf_cache, lock);
    219 }
    220 
    221 static int
    222 lf_ctor(void *arg, void *obj, int flag)
    223 {
    224 	struct lockf *lock;
    225 
    226 	lock = obj;
    227 	cv_init(&lock->lf_cv, lockstr);
    228 
    229 	return 0;
    230 }
    231 
    232 static void
    233 lf_dtor(void *arg, void *obj)
    234 {
    235 	struct lockf *lock;
    236 
    237 	lock = obj;
    238 	cv_destroy(&lock->lf_cv);
    239 }
    240 
    241 /*
    242  * Walk the list of locks for an inode to
    243  * find an overlapping lock (if any).
    244  *
    245  * NOTE: this returns only the FIRST overlapping lock.  There
    246  *	 may be more than one.
    247  */
    248 static int
    249 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
    250     struct lockf ***prev, struct lockf **overlap)
    251 {
    252 	off_t start, end;
    253 
    254 	*overlap = lf;
    255 	if (lf == NULL)
    256 		return 0;
    257 #ifdef LOCKF_DEBUG
    258 	if (lockf_debug & 2)
    259 		lf_print("lf_findoverlap: looking for overlap in", lock);
    260 #endif /* LOCKF_DEBUG */
    261 	start = lock->lf_start;
    262 	end = lock->lf_end;
    263 	while (lf != NULL) {
    264 		if (((type == SELF) && lf->lf_id != lock->lf_id) ||
    265 		    ((type == OTHERS) && lf->lf_id == lock->lf_id)) {
    266 			*prev = &lf->lf_next;
    267 			*overlap = lf = lf->lf_next;
    268 			continue;
    269 		}
    270 #ifdef LOCKF_DEBUG
    271 		if (lockf_debug & 2)
    272 			lf_print("\tchecking", lf);
    273 #endif /* LOCKF_DEBUG */
    274 		/*
    275 		 * OK, check for overlap
    276 		 *
    277 		 * Six cases:
    278 		 *	0) no overlap
    279 		 *	1) overlap == lock
    280 		 *	2) overlap contains lock
    281 		 *	3) lock contains overlap
    282 		 *	4) overlap starts before lock
    283 		 *	5) overlap ends after lock
    284 		 */
    285 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
    286 		    (end != -1 && lf->lf_start > end)) {
    287 			/* Case 0 */
    288 #ifdef LOCKF_DEBUG
    289 			if (lockf_debug & 2)
    290 				printf("no overlap\n");
    291 #endif /* LOCKF_DEBUG */
    292 			if ((type & SELF) && end != -1 && lf->lf_start > end)
    293 				return 0;
    294 			*prev = &lf->lf_next;
    295 			*overlap = lf = lf->lf_next;
    296 			continue;
    297 		}
    298 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
    299 			/* Case 1 */
    300 #ifdef LOCKF_DEBUG
    301 			if (lockf_debug & 2)
    302 				printf("overlap == lock\n");
    303 #endif /* LOCKF_DEBUG */
    304 			return 1;
    305 		}
    306 		if ((lf->lf_start <= start) &&
    307 		    (end != -1) &&
    308 		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
    309 			/* Case 2 */
    310 #ifdef LOCKF_DEBUG
    311 			if (lockf_debug & 2)
    312 				printf("overlap contains lock\n");
    313 #endif /* LOCKF_DEBUG */
    314 			return 2;
    315 		}
    316 		if (start <= lf->lf_start &&
    317 		           (end == -1 ||
    318 			   (lf->lf_end != -1 && end >= lf->lf_end))) {
    319 			/* Case 3 */
    320 #ifdef LOCKF_DEBUG
    321 			if (lockf_debug & 2)
    322 				printf("lock contains overlap\n");
    323 #endif /* LOCKF_DEBUG */
    324 			return 3;
    325 		}
    326 		if ((lf->lf_start < start) &&
    327 			((lf->lf_end >= start) || (lf->lf_end == -1))) {
    328 			/* Case 4 */
    329 #ifdef LOCKF_DEBUG
    330 			if (lockf_debug & 2)
    331 				printf("overlap starts before lock\n");
    332 #endif /* LOCKF_DEBUG */
    333 			return 4;
    334 		}
    335 		if ((lf->lf_start > start) &&
    336 			(end != -1) &&
    337 			((lf->lf_end > end) || (lf->lf_end == -1))) {
    338 			/* Case 5 */
    339 #ifdef LOCKF_DEBUG
    340 			if (lockf_debug & 2)
    341 				printf("overlap ends after lock\n");
    342 #endif /* LOCKF_DEBUG */
    343 			return 5;
    344 		}
    345 		panic("lf_findoverlap: default");
    346 	}
    347 	return 0;
    348 }
    349 
    350 /*
    351  * Split a lock and a contained region into
    352  * two or three locks as necessary.
    353  */
    354 static void
    355 lf_split(struct lockf *lock1, struct lockf *lock2, struct lockf **sparelock)
    356 {
    357 	struct lockf *splitlock;
    358 
    359 #ifdef LOCKF_DEBUG
    360 	if (lockf_debug & 2) {
    361 		lf_print("lf_split", lock1);
    362 		lf_print("splitting from", lock2);
    363 	}
    364 #endif /* LOCKF_DEBUG */
    365 	/*
    366 	 * Check to see if spliting into only two pieces.
    367 	 */
    368 	if (lock1->lf_start == lock2->lf_start) {
    369 		lock1->lf_start = lock2->lf_end + 1;
    370 		lock2->lf_next = lock1;
    371 		return;
    372 	}
    373 	if (lock1->lf_end == lock2->lf_end) {
    374 		lock1->lf_end = lock2->lf_start - 1;
    375 		lock2->lf_next = lock1->lf_next;
    376 		lock1->lf_next = lock2;
    377 		return;
    378 	}
    379 	/*
    380 	 * Make a new lock consisting of the last part of
    381 	 * the encompassing lock
    382 	 */
    383 	splitlock = *sparelock;
    384 	*sparelock = NULL;
    385 	memcpy(splitlock, lock1, sizeof(*splitlock));
    386 	cv_init(&splitlock->lf_cv, lockstr);
    387 
    388 	splitlock->lf_start = lock2->lf_end + 1;
    389 	TAILQ_INIT(&splitlock->lf_blkhd);
    390 	lock1->lf_end = lock2->lf_start - 1;
    391 	/*
    392 	 * OK, now link it in
    393 	 */
    394 	splitlock->lf_next = lock1->lf_next;
    395 	lock2->lf_next = splitlock;
    396 	lock1->lf_next = lock2;
    397 }
    398 
    399 /*
    400  * Wakeup a blocklist
    401  */
    402 static void
    403 lf_wakelock(struct lockf *listhead)
    404 {
    405 	struct lockf *wakelock;
    406 
    407 	while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) {
    408 		KASSERT(wakelock->lf_next == listhead);
    409 		TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
    410 		wakelock->lf_next = NULL;
    411 #ifdef LOCKF_DEBUG
    412 		if (lockf_debug & 2)
    413 			lf_print("lf_wakelock: awakening", wakelock);
    414 #endif
    415 		cv_broadcast(&wakelock->lf_cv);
    416 	}
    417 }
    418 
    419 /*
    420  * Remove a byte-range lock on an inode.
    421  *
    422  * Generally, find the lock (or an overlap to that lock)
    423  * and remove it (or shrink it), then wakeup anyone we can.
    424  */
    425 static int
    426 lf_clearlock(struct lockf *unlock, struct lockf **sparelock)
    427 {
    428 	struct lockf **head = unlock->lf_head;
    429 	struct lockf *lf = *head;
    430 	struct lockf *overlap, **prev;
    431 	int ovcase;
    432 
    433 	if (lf == NULL)
    434 		return 0;
    435 #ifdef LOCKF_DEBUG
    436 	if (unlock->lf_type != F_UNLCK)
    437 		panic("lf_clearlock: bad type");
    438 	if (lockf_debug & 1)
    439 		lf_print("lf_clearlock", unlock);
    440 #endif /* LOCKF_DEBUG */
    441 	prev = head;
    442 	while ((ovcase = lf_findoverlap(lf, unlock, SELF,
    443 	    &prev, &overlap)) != 0) {
    444 		/*
    445 		 * Wakeup the list of locks to be retried.
    446 		 */
    447 		lf_wakelock(overlap);
    448 
    449 		switch (ovcase) {
    450 
    451 		case 1: /* overlap == lock */
    452 			*prev = overlap->lf_next;
    453 			lf_free(overlap);
    454 			break;
    455 
    456 		case 2: /* overlap contains lock: split it */
    457 			if (overlap->lf_start == unlock->lf_start) {
    458 				overlap->lf_start = unlock->lf_end + 1;
    459 				break;
    460 			}
    461 			lf_split(overlap, unlock, sparelock);
    462 			overlap->lf_next = unlock->lf_next;
    463 			break;
    464 
    465 		case 3: /* lock contains overlap */
    466 			*prev = overlap->lf_next;
    467 			lf = overlap->lf_next;
    468 			lf_free(overlap);
    469 			continue;
    470 
    471 		case 4: /* overlap starts before lock */
    472 			overlap->lf_end = unlock->lf_start - 1;
    473 			prev = &overlap->lf_next;
    474 			lf = overlap->lf_next;
    475 			continue;
    476 
    477 		case 5: /* overlap ends after lock */
    478 			overlap->lf_start = unlock->lf_end + 1;
    479 			break;
    480 		}
    481 		break;
    482 	}
    483 #ifdef LOCKF_DEBUG
    484 	if (lockf_debug & 1)
    485 		lf_printlist("lf_clearlock", unlock);
    486 #endif /* LOCKF_DEBUG */
    487 	return 0;
    488 }
    489 
    490 /*
    491  * Walk the list of locks for an inode and
    492  * return the first blocking lock.
    493  */
    494 static struct lockf *
    495 lf_getblock(struct lockf *lock)
    496 {
    497 	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
    498 
    499 	prev = lock->lf_head;
    500 	while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
    501 		/*
    502 		 * We've found an overlap, see if it blocks us
    503 		 */
    504 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
    505 			return overlap;
    506 		/*
    507 		 * Nope, point to the next one on the list and
    508 		 * see if it blocks us
    509 		 */
    510 		lf = overlap->lf_next;
    511 	}
    512 	return NULL;
    513 }
    514 
    515 /*
    516  * Set a byte-range lock.
    517  */
    518 static int
    519 lf_setlock(struct lockf *lock, struct lockf **sparelock,
    520     kmutex_t *interlock)
    521 {
    522 	struct lockf *block;
    523 	struct lockf **head = lock->lf_head;
    524 	struct lockf **prev, *overlap, *ltmp;
    525 	int ovcase, needtolink, error;
    526 
    527 #ifdef LOCKF_DEBUG
    528 	if (lockf_debug & 1)
    529 		lf_print("lf_setlock", lock);
    530 #endif /* LOCKF_DEBUG */
    531 
    532 	/*
    533 	 * Scan lock list for this file looking for locks that would block us.
    534 	 */
    535 	while ((block = lf_getblock(lock)) != NULL) {
    536 		/*
    537 		 * Free the structure and return if nonblocking.
    538 		 */
    539 		if ((lock->lf_flags & F_WAIT) == 0) {
    540 			lf_free(lock);
    541 			return EAGAIN;
    542 		}
    543 		/*
    544 		 * We are blocked. Since flock style locks cover
    545 		 * the whole file, there is no chance for deadlock.
    546 		 * For byte-range locks we must check for deadlock.
    547 		 *
    548 		 * Deadlock detection is done by looking through the
    549 		 * wait channels to see if there are any cycles that
    550 		 * involve us. MAXDEPTH is set just to make sure we
    551 		 * do not go off into neverneverland.
    552 		 */
    553 		if ((lock->lf_flags & F_POSIX) &&
    554 		    (block->lf_flags & F_POSIX)) {
    555 			struct lwp *wlwp;
    556 			volatile const struct lockf *waitblock;
    557 			int i = 0;
    558 			struct proc *p;
    559 
    560 			p = (struct proc *)block->lf_id;
    561 			KASSERT(p != NULL);
    562 			while (i++ < maxlockdepth) {
    563 				mutex_enter(p->p_lock);
    564 				if (p->p_nlwps > 1) {
    565 					mutex_exit(p->p_lock);
    566 					break;
    567 				}
    568 				wlwp = LIST_FIRST(&p->p_lwps);
    569 				lwp_lock(wlwp);
    570 				if (wlwp->l_wchan == NULL ||
    571 				    wlwp->l_wmesg != lockstr) {
    572 					lwp_unlock(wlwp);
    573 					mutex_exit(p->p_lock);
    574 					break;
    575 				}
    576 				waitblock = wlwp->l_wchan;
    577 				lwp_unlock(wlwp);
    578 				mutex_exit(p->p_lock);
    579 				if (waitblock == NULL) {
    580 					/*
    581 					 * this lwp just got up but
    582 					 * not returned from ltsleep yet.
    583 					 */
    584 					break;
    585 				}
    586 				/* Get the owner of the blocking lock */
    587 				waitblock = waitblock->lf_next;
    588 				if ((waitblock->lf_flags & F_POSIX) == 0)
    589 					break;
    590 				p = (struct proc *)waitblock->lf_id;
    591 				if (p == curproc) {
    592 					lf_free(lock);
    593 					return EDEADLK;
    594 				}
    595 			}
    596 			/*
    597 			 * If we're still following a dependency chain
    598 			 * after maxlockdepth iterations, assume we're in
    599 			 * a cycle to be safe.
    600 			 */
    601 			if (i >= maxlockdepth) {
    602 				lf_free(lock);
    603 				return EDEADLK;
    604 			}
    605 		}
    606 		/*
    607 		 * For flock type locks, we must first remove
    608 		 * any shared locks that we hold before we sleep
    609 		 * waiting for an exclusive lock.
    610 		 */
    611 		if ((lock->lf_flags & F_FLOCK) &&
    612 		    lock->lf_type == F_WRLCK) {
    613 			lock->lf_type = F_UNLCK;
    614 			(void) lf_clearlock(lock, NULL);
    615 			lock->lf_type = F_WRLCK;
    616 		}
    617 		/*
    618 		 * Add our lock to the blocked list and sleep until we're free.
    619 		 * Remember who blocked us (for deadlock detection).
    620 		 */
    621 		lock->lf_next = block;
    622 		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
    623 #ifdef LOCKF_DEBUG
    624 		if (lockf_debug & 1) {
    625 			lf_print("lf_setlock: blocking on", block);
    626 			lf_printlist("lf_setlock", block);
    627 		}
    628 #endif /* LOCKF_DEBUG */
    629 		error = cv_wait_sig(&lock->lf_cv, interlock);
    630 
    631 		/*
    632 		 * We may have been awoken by a signal (in
    633 		 * which case we must remove ourselves from the
    634 		 * blocked list) and/or by another process
    635 		 * releasing a lock (in which case we have already
    636 		 * been removed from the blocked list and our
    637 		 * lf_next field set to NULL).
    638 		 */
    639 		if (lock->lf_next != NULL) {
    640 			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
    641 			lock->lf_next = NULL;
    642 		}
    643 		if (error) {
    644 			lf_free(lock);
    645 			return error;
    646 		}
    647 	}
    648 	/*
    649 	 * No blocks!!  Add the lock.  Note that we will
    650 	 * downgrade or upgrade any overlapping locks this
    651 	 * process already owns.
    652 	 *
    653 	 * Skip over locks owned by other processes.
    654 	 * Handle any locks that overlap and are owned by ourselves.
    655 	 */
    656 	prev = head;
    657 	block = *head;
    658 	needtolink = 1;
    659 	for (;;) {
    660 		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
    661 		if (ovcase)
    662 			block = overlap->lf_next;
    663 		/*
    664 		 * Six cases:
    665 		 *	0) no overlap
    666 		 *	1) overlap == lock
    667 		 *	2) overlap contains lock
    668 		 *	3) lock contains overlap
    669 		 *	4) overlap starts before lock
    670 		 *	5) overlap ends after lock
    671 		 */
    672 		switch (ovcase) {
    673 		case 0: /* no overlap */
    674 			if (needtolink) {
    675 				*prev = lock;
    676 				lock->lf_next = overlap;
    677 			}
    678 			break;
    679 
    680 		case 1: /* overlap == lock */
    681 			/*
    682 			 * If downgrading lock, others may be
    683 			 * able to acquire it.
    684 			 */
    685 			if (lock->lf_type == F_RDLCK &&
    686 			    overlap->lf_type == F_WRLCK)
    687 				lf_wakelock(overlap);
    688 			overlap->lf_type = lock->lf_type;
    689 			lf_free(lock);
    690 			lock = overlap; /* for debug output below */
    691 			break;
    692 
    693 		case 2: /* overlap contains lock */
    694 			/*
    695 			 * Check for common starting point and different types.
    696 			 */
    697 			if (overlap->lf_type == lock->lf_type) {
    698 				lf_free(lock);
    699 				lock = overlap; /* for debug output below */
    700 				break;
    701 			}
    702 			if (overlap->lf_start == lock->lf_start) {
    703 				*prev = lock;
    704 				lock->lf_next = overlap;
    705 				overlap->lf_start = lock->lf_end + 1;
    706 			} else
    707 				lf_split(overlap, lock, sparelock);
    708 			lf_wakelock(overlap);
    709 			break;
    710 
    711 		case 3: /* lock contains overlap */
    712 			/*
    713 			 * If downgrading lock, others may be able to
    714 			 * acquire it, otherwise take the list.
    715 			 */
    716 			if (lock->lf_type == F_RDLCK &&
    717 			    overlap->lf_type == F_WRLCK) {
    718 				lf_wakelock(overlap);
    719 			} else {
    720 				while ((ltmp = TAILQ_FIRST(&overlap->lf_blkhd))) {
    721 					KASSERT(ltmp->lf_next == overlap);
    722 					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
    723 					    lf_block);
    724 					ltmp->lf_next = lock;
    725 					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
    726 					    ltmp, lf_block);
    727 				}
    728 			}
    729 			/*
    730 			 * Add the new lock if necessary and delete the overlap.
    731 			 */
    732 			if (needtolink) {
    733 				*prev = lock;
    734 				lock->lf_next = overlap->lf_next;
    735 				prev = &lock->lf_next;
    736 				needtolink = 0;
    737 			} else
    738 				*prev = overlap->lf_next;
    739 			lf_free(overlap);
    740 			continue;
    741 
    742 		case 4: /* overlap starts before lock */
    743 			/*
    744 			 * Add lock after overlap on the list.
    745 			 */
    746 			lock->lf_next = overlap->lf_next;
    747 			overlap->lf_next = lock;
    748 			overlap->lf_end = lock->lf_start - 1;
    749 			prev = &lock->lf_next;
    750 			lf_wakelock(overlap);
    751 			needtolink = 0;
    752 			continue;
    753 
    754 		case 5: /* overlap ends after lock */
    755 			/*
    756 			 * Add the new lock before overlap.
    757 			 */
    758 			if (needtolink) {
    759 				*prev = lock;
    760 				lock->lf_next = overlap;
    761 			}
    762 			overlap->lf_start = lock->lf_end + 1;
    763 			lf_wakelock(overlap);
    764 			break;
    765 		}
    766 		break;
    767 	}
    768 #ifdef LOCKF_DEBUG
    769 	if (lockf_debug & 1) {
    770 		lf_print("lf_setlock: got the lock", lock);
    771 		lf_printlist("lf_setlock", lock);
    772 	}
    773 #endif /* LOCKF_DEBUG */
    774 	return 0;
    775 }
    776 
    777 /*
    778  * Check whether there is a blocking lock,
    779  * and if so return its process identifier.
    780  */
    781 static int
    782 lf_getlock(struct lockf *lock, struct flock *fl)
    783 {
    784 	struct lockf *block;
    785 
    786 #ifdef LOCKF_DEBUG
    787 	if (lockf_debug & 1)
    788 		lf_print("lf_getlock", lock);
    789 #endif /* LOCKF_DEBUG */
    790 
    791 	if ((block = lf_getblock(lock)) != NULL) {
    792 		fl->l_type = block->lf_type;
    793 		fl->l_whence = SEEK_SET;
    794 		fl->l_start = block->lf_start;
    795 		if (block->lf_end == -1)
    796 			fl->l_len = 0;
    797 		else
    798 			fl->l_len = block->lf_end - block->lf_start + 1;
    799 		if (block->lf_flags & F_POSIX)
    800 			fl->l_pid = ((struct proc *)block->lf_id)->p_pid;
    801 		else
    802 			fl->l_pid = -1;
    803 	} else {
    804 		fl->l_type = F_UNLCK;
    805 	}
    806 	return 0;
    807 }
    808 
    809 /*
    810  * Do an advisory lock operation.
    811  */
    812 int
    813 lf_advlock(struct vop_advlock_args *ap, struct lockf **head, off_t size)
    814 {
    815 	struct lwp *l = curlwp;
    816 	struct flock *fl = ap->a_fl;
    817 	struct lockf *lock = NULL;
    818 	struct lockf *sparelock;
    819 	kmutex_t *interlock = lockf_lock;
    820 	off_t start, end;
    821 	int error = 0;
    822 
    823 	/*
    824 	 * Convert the flock structure into a start and end.
    825 	 */
    826 	switch (fl->l_whence) {
    827 	case SEEK_SET:
    828 	case SEEK_CUR:
    829 		/*
    830 		 * Caller is responsible for adding any necessary offset
    831 		 * when SEEK_CUR is used.
    832 		 */
    833 		start = fl->l_start;
    834 		break;
    835 
    836 	case SEEK_END:
    837 		start = size + fl->l_start;
    838 		break;
    839 
    840 	default:
    841 		return EINVAL;
    842 	}
    843 	if (start < 0)
    844 		return EINVAL;
    845 
    846 	/*
    847 	 * Allocate locks before acquiring the interlock.  We need two
    848 	 * locks in the worst case.
    849 	 */
    850 	switch (ap->a_op) {
    851 	case F_SETLK:
    852 	case F_UNLCK:
    853 		/*
    854 		 * XXX For F_UNLCK case, we can re-use the lock.
    855 		 */
    856 		if ((ap->a_flags & F_FLOCK) == 0) {
    857 			/*
    858 			 * Byte-range lock might need one more lock.
    859 			 */
    860 			sparelock = lf_alloc(kauth_cred_geteuid(l->l_cred), 0);
    861 			if (sparelock == NULL) {
    862 				error = ENOMEM;
    863 				goto quit;
    864 			}
    865 			break;
    866 		}
    867 		/* FALLTHROUGH */
    868 
    869 	case F_GETLK:
    870 		sparelock = NULL;
    871 		break;
    872 
    873 	default:
    874 		return EINVAL;
    875 	}
    876 
    877 	lock = lf_alloc(kauth_cred_geteuid(l->l_cred),
    878 	    ap->a_op != F_UNLCK ? 1 : 2);
    879 	if (lock == NULL) {
    880 		error = ENOMEM;
    881 		goto quit;
    882 	}
    883 
    884 	mutex_enter(interlock);
    885 
    886 	/*
    887 	 * Avoid the common case of unlocking when inode has no locks.
    888 	 */
    889 	if (*head == (struct lockf *)0) {
    890 		if (ap->a_op != F_SETLK) {
    891 			fl->l_type = F_UNLCK;
    892 			error = 0;
    893 			goto quit_unlock;
    894 		}
    895 	}
    896 
    897 	if (fl->l_len == 0)
    898 		end = -1;
    899 	else
    900 		end = start + fl->l_len - 1;
    901 	/*
    902 	 * Create the lockf structure.
    903 	 */
    904 	lock->lf_start = start;
    905 	lock->lf_end = end;
    906 	lock->lf_head = head;
    907 	lock->lf_type = fl->l_type;
    908 	lock->lf_next = (struct lockf *)0;
    909 	TAILQ_INIT(&lock->lf_blkhd);
    910 	lock->lf_flags = ap->a_flags;
    911 	if (lock->lf_flags & F_POSIX) {
    912 		KASSERT(curproc == (struct proc *)ap->a_id);
    913 	}
    914 	lock->lf_id = (struct proc *)ap->a_id;
    915 
    916 	/*
    917 	 * Do the requested operation.
    918 	 */
    919 	switch (ap->a_op) {
    920 
    921 	case F_SETLK:
    922 		error = lf_setlock(lock, &sparelock, interlock);
    923 		lock = NULL; /* lf_setlock freed it */
    924 		break;
    925 
    926 	case F_UNLCK:
    927 		error = lf_clearlock(lock, &sparelock);
    928 		break;
    929 
    930 	case F_GETLK:
    931 		error = lf_getlock(lock, fl);
    932 		break;
    933 
    934 	default:
    935 		break;
    936 		/* NOTREACHED */
    937 	}
    938 
    939 quit_unlock:
    940 	mutex_exit(interlock);
    941 quit:
    942 	if (lock)
    943 		lf_free(lock);
    944 	if (sparelock)
    945 		lf_free(sparelock);
    946 
    947 	return error;
    948 }
    949 
    950 /*
    951  * Initialize subsystem.   XXX We use a global lock.  This could be the
    952  * vnode interlock, but the deadlock detection code may need to inspect
    953  * locks belonging to other files.
    954  */
    955 void
    956 lf_init(void)
    957 {
    958 
    959 	lockf_cache = pool_cache_init(sizeof(struct lockf), 0, 0, 0, "lockf",
    960  	    NULL, IPL_NONE, lf_ctor, lf_dtor, NULL);
    961         lockf_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    962 }
    963