Home | History | Annotate | Line # | Download | only in kern
vfs_lockf.c revision 1.53
      1 /*	$NetBSD: vfs_lockf.c,v 1.53 2006/05/20 12:19:30 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Scooter Morris at Genentech Inc.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. Neither the name of the University nor the names of its contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  *
     34  *	@(#)ufs_lockf.c	8.4 (Berkeley) 10/26/94
     35  */
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.53 2006/05/20 12:19:30 yamt Exp $");
     39 
     40 #include <sys/param.h>
     41 #include <sys/systm.h>
     42 #include <sys/kernel.h>
     43 #include <sys/file.h>
     44 #include <sys/proc.h>
     45 #include <sys/vnode.h>
     46 #include <sys/pool.h>
     47 #include <sys/fcntl.h>
     48 #include <sys/lockf.h>
     49 #include <sys/kauth.h>
     50 
     51 /*
     52  * The lockf structure is a kernel structure which contains the information
     53  * associated with a byte range lock.  The lockf structures are linked into
     54  * the inode structure. Locks are sorted by the starting byte of the lock for
     55  * efficiency.
     56  *
     57  * lf_next is used for two purposes, depending on whether the lock is
     58  * being held, or is in conflict with an existing lock.  If this lock
     59  * is held, it indicates the next lock on the same vnode.
     60  * For pending locks, if lock->lf_next is non-NULL, then lock->lf_block
     61  * must be queued on the lf_blkhd TAILQ of lock->lf_next.
     62  */
     63 
     64 TAILQ_HEAD(locklist, lockf);
     65 
     66 struct lockf {
     67 	short	lf_flags;	 /* Lock semantics: F_POSIX, F_FLOCK, F_WAIT */
     68 	short	lf_type;	 /* Lock type: F_RDLCK, F_WRLCK */
     69 	off_t	lf_start;	 /* The byte # of the start of the lock */
     70 	off_t	lf_end;		 /* The byte # of the end of the lock (-1=EOF)*/
     71 	void	*lf_id;		 /* process or file description holding lock */
     72 	struct	lockf **lf_head; /* Back pointer to the head of lockf list */
     73 	struct	lockf *lf_next;	 /* Next lock on this vnode, or blocking lock */
     74 	struct  locklist lf_blkhd; /* List of requests blocked on this lock */
     75 	TAILQ_ENTRY(lockf) lf_block;/* A request waiting for a lock */
     76 	uid_t	lf_uid;		 /* User ID responsible */
     77 };
     78 
     79 /* Maximum length of sleep chains to traverse to try and detect deadlock. */
     80 #define MAXDEPTH 50
     81 
     82 static POOL_INIT(lockfpool, sizeof(struct lockf), 0, 0, 0, "lockfpl",
     83     &pool_allocator_nointr);
     84 
     85 /*
     86  * This variable controls the maximum number of processes that will
     87  * be checked in doing deadlock detection.
     88  */
     89 int maxlockdepth = MAXDEPTH;
     90 
     91 #ifdef LOCKF_DEBUG
     92 int	lockf_debug = 0;
     93 #endif
     94 
     95 #define NOLOCKF (struct lockf *)0
     96 #define SELF	0x1
     97 #define OTHERS	0x2
     98 
     99 /*
    100  * XXX TODO
    101  * Misc cleanups: "caddr_t id" should be visible in the API as a
    102  * "struct proc *".
    103  * (This requires rototilling all VFS's which support advisory locking).
    104  */
    105 
    106 /*
    107  * If there's a lot of lock contention on a single vnode, locking
    108  * schemes which allow for more paralleism would be needed.  Given how
    109  * infrequently byte-range locks are actually used in typical BSD
    110  * code, a more complex approach probably isn't worth it.
    111  */
    112 
    113 /*
    114  * We enforce a limit on locks by uid, so that a single user cannot
    115  * run the kernel out of memory.  For now, the limit is pretty coarse.
    116  * There is no limit on root.
    117  *
    118  * Splitting a lock will always succeed, regardless of current allocations.
    119  * If you're slightly above the limit, we still have to permit an allocation
    120  * so that the unlock can succeed.  If the unlocking causes too many splits,
    121  * however, you're totally cutoff.
    122  */
    123 int maxlocksperuid = 1024;
    124 
    125 #ifdef LOCKF_DEBUG
    126 /*
    127  * Print out a lock.
    128  */
    129 static void
    130 lf_print(char *tag, struct lockf *lock)
    131 {
    132 
    133 	printf("%s: lock %p for ", tag, lock);
    134 	if (lock->lf_flags & F_POSIX)
    135 		printf("proc %d", ((struct proc *)lock->lf_id)->p_pid);
    136 	else
    137 		printf("file %p", (struct file *)lock->lf_id);
    138 	printf(" %s, start %qx, end %qx",
    139 		lock->lf_type == F_RDLCK ? "shared" :
    140 		lock->lf_type == F_WRLCK ? "exclusive" :
    141 		lock->lf_type == F_UNLCK ? "unlock" :
    142 		"unknown", lock->lf_start, lock->lf_end);
    143 	if (TAILQ_FIRST(&lock->lf_blkhd))
    144 		printf(" block %p\n", TAILQ_FIRST(&lock->lf_blkhd));
    145 	else
    146 		printf("\n");
    147 }
    148 
    149 static void
    150 lf_printlist(char *tag, struct lockf *lock)
    151 {
    152 	struct lockf *lf, *blk;
    153 
    154 	printf("%s: Lock list:\n", tag);
    155 	for (lf = *lock->lf_head; lf; lf = lf->lf_next) {
    156 		printf("\tlock %p for ", lf);
    157 		if (lf->lf_flags & F_POSIX)
    158 			printf("proc %d", ((struct proc *)lf->lf_id)->p_pid);
    159 		else
    160 			printf("file %p", (struct file *)lf->lf_id);
    161 		printf(", %s, start %qx, end %qx",
    162 			lf->lf_type == F_RDLCK ? "shared" :
    163 			lf->lf_type == F_WRLCK ? "exclusive" :
    164 			lf->lf_type == F_UNLCK ? "unlock" :
    165 			"unknown", lf->lf_start, lf->lf_end);
    166 		TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
    167 			if (blk->lf_flags & F_POSIX)
    168 				printf("proc %d",
    169 				    ((struct proc *)blk->lf_id)->p_pid);
    170 			else
    171 				printf("file %p", (struct file *)blk->lf_id);
    172 			printf(", %s, start %qx, end %qx",
    173 				blk->lf_type == F_RDLCK ? "shared" :
    174 				blk->lf_type == F_WRLCK ? "exclusive" :
    175 				blk->lf_type == F_UNLCK ? "unlock" :
    176 				"unknown", blk->lf_start, blk->lf_end);
    177 			if (TAILQ_FIRST(&blk->lf_blkhd))
    178 				 panic("lf_printlist: bad list");
    179 		}
    180 		printf("\n");
    181 	}
    182 }
    183 #endif /* LOCKF_DEBUG */
    184 
    185 /*
    186  * 3 options for allowfail.
    187  * 0 - always allocate.  1 - cutoff at limit.  2 - cutoff at double limit.
    188  */
    189 static struct lockf *
    190 lf_alloc(uid_t uid, int allowfail)
    191 {
    192 	struct uidinfo *uip;
    193 	struct lockf *lock;
    194 	int s;
    195 
    196 	uip = uid_find(uid);
    197 	UILOCK(uip, s);
    198 	if (uid && allowfail && uip->ui_lockcnt >
    199 	    (allowfail == 1 ? maxlocksperuid : (maxlocksperuid * 2))) {
    200 		UIUNLOCK(uip, s);
    201 		return NULL;
    202 	}
    203 	uip->ui_lockcnt++;
    204 	UIUNLOCK(uip, s);
    205 	lock = pool_get(&lockfpool, PR_WAITOK);
    206 	lock->lf_uid = uid;
    207 	return lock;
    208 }
    209 
    210 static void
    211 lf_free(struct lockf *lock)
    212 {
    213 	struct uidinfo *uip;
    214 	int s;
    215 
    216 	uip = uid_find(lock->lf_uid);
    217 	UILOCK(uip, s);
    218 	uip->ui_lockcnt--;
    219 	UIUNLOCK(uip, s);
    220 	pool_put(&lockfpool, lock);
    221 }
    222 
    223 /*
    224  * Walk the list of locks for an inode to
    225  * find an overlapping lock (if any).
    226  *
    227  * NOTE: this returns only the FIRST overlapping lock.  There
    228  *	 may be more than one.
    229  */
    230 static int
    231 lf_findoverlap(struct lockf *lf, struct lockf *lock, int type,
    232     struct lockf ***prev, struct lockf **overlap)
    233 {
    234 	off_t start, end;
    235 
    236 	*overlap = lf;
    237 	if (lf == NOLOCKF)
    238 		return 0;
    239 #ifdef LOCKF_DEBUG
    240 	if (lockf_debug & 2)
    241 		lf_print("lf_findoverlap: looking for overlap in", lock);
    242 #endif /* LOCKF_DEBUG */
    243 	start = lock->lf_start;
    244 	end = lock->lf_end;
    245 	while (lf != NOLOCKF) {
    246 		if (((type == SELF) && lf->lf_id != lock->lf_id) ||
    247 		    ((type == OTHERS) && lf->lf_id == lock->lf_id)) {
    248 			*prev = &lf->lf_next;
    249 			*overlap = lf = lf->lf_next;
    250 			continue;
    251 		}
    252 #ifdef LOCKF_DEBUG
    253 		if (lockf_debug & 2)
    254 			lf_print("\tchecking", lf);
    255 #endif /* LOCKF_DEBUG */
    256 		/*
    257 		 * OK, check for overlap
    258 		 *
    259 		 * Six cases:
    260 		 *	0) no overlap
    261 		 *	1) overlap == lock
    262 		 *	2) overlap contains lock
    263 		 *	3) lock contains overlap
    264 		 *	4) overlap starts before lock
    265 		 *	5) overlap ends after lock
    266 		 */
    267 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
    268 		    (end != -1 && lf->lf_start > end)) {
    269 			/* Case 0 */
    270 #ifdef LOCKF_DEBUG
    271 			if (lockf_debug & 2)
    272 				printf("no overlap\n");
    273 #endif /* LOCKF_DEBUG */
    274 			if ((type & SELF) && end != -1 && lf->lf_start > end)
    275 				return 0;
    276 			*prev = &lf->lf_next;
    277 			*overlap = lf = lf->lf_next;
    278 			continue;
    279 		}
    280 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
    281 			/* Case 1 */
    282 #ifdef LOCKF_DEBUG
    283 			if (lockf_debug & 2)
    284 				printf("overlap == lock\n");
    285 #endif /* LOCKF_DEBUG */
    286 			return 1;
    287 		}
    288 		if ((lf->lf_start <= start) &&
    289 		    (end != -1) &&
    290 		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
    291 			/* Case 2 */
    292 #ifdef LOCKF_DEBUG
    293 			if (lockf_debug & 2)
    294 				printf("overlap contains lock\n");
    295 #endif /* LOCKF_DEBUG */
    296 			return 2;
    297 		}
    298 		if (start <= lf->lf_start &&
    299 		           (end == -1 ||
    300 			   (lf->lf_end != -1 && end >= lf->lf_end))) {
    301 			/* Case 3 */
    302 #ifdef LOCKF_DEBUG
    303 			if (lockf_debug & 2)
    304 				printf("lock contains overlap\n");
    305 #endif /* LOCKF_DEBUG */
    306 			return 3;
    307 		}
    308 		if ((lf->lf_start < start) &&
    309 			((lf->lf_end >= start) || (lf->lf_end == -1))) {
    310 			/* Case 4 */
    311 #ifdef LOCKF_DEBUG
    312 			if (lockf_debug & 2)
    313 				printf("overlap starts before lock\n");
    314 #endif /* LOCKF_DEBUG */
    315 			return 4;
    316 		}
    317 		if ((lf->lf_start > start) &&
    318 			(end != -1) &&
    319 			((lf->lf_end > end) || (lf->lf_end == -1))) {
    320 			/* Case 5 */
    321 #ifdef LOCKF_DEBUG
    322 			if (lockf_debug & 2)
    323 				printf("overlap ends after lock\n");
    324 #endif /* LOCKF_DEBUG */
    325 			return 5;
    326 		}
    327 		panic("lf_findoverlap: default");
    328 	}
    329 	return 0;
    330 }
    331 
    332 /*
    333  * Split a lock and a contained region into
    334  * two or three locks as necessary.
    335  */
    336 static void
    337 lf_split(struct lockf *lock1, struct lockf *lock2, struct lockf **sparelock)
    338 {
    339 	struct lockf *splitlock;
    340 
    341 #ifdef LOCKF_DEBUG
    342 	if (lockf_debug & 2) {
    343 		lf_print("lf_split", lock1);
    344 		lf_print("splitting from", lock2);
    345 	}
    346 #endif /* LOCKF_DEBUG */
    347 	/*
    348 	 * Check to see if spliting into only two pieces.
    349 	 */
    350 	if (lock1->lf_start == lock2->lf_start) {
    351 		lock1->lf_start = lock2->lf_end + 1;
    352 		lock2->lf_next = lock1;
    353 		return;
    354 	}
    355 	if (lock1->lf_end == lock2->lf_end) {
    356 		lock1->lf_end = lock2->lf_start - 1;
    357 		lock2->lf_next = lock1->lf_next;
    358 		lock1->lf_next = lock2;
    359 		return;
    360 	}
    361 	/*
    362 	 * Make a new lock consisting of the last part of
    363 	 * the encompassing lock
    364 	 */
    365 	splitlock = *sparelock;
    366 	*sparelock = NULL;
    367 	memcpy(splitlock, lock1, sizeof(*splitlock));
    368 	splitlock->lf_start = lock2->lf_end + 1;
    369 	TAILQ_INIT(&splitlock->lf_blkhd);
    370 	lock1->lf_end = lock2->lf_start - 1;
    371 	/*
    372 	 * OK, now link it in
    373 	 */
    374 	splitlock->lf_next = lock1->lf_next;
    375 	lock2->lf_next = splitlock;
    376 	lock1->lf_next = lock2;
    377 }
    378 
    379 /*
    380  * Wakeup a blocklist
    381  */
    382 static void
    383 lf_wakelock(struct lockf *listhead)
    384 {
    385 	struct lockf *wakelock;
    386 
    387 	while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) {
    388 		KASSERT(wakelock->lf_next == listhead);
    389 		TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
    390 		wakelock->lf_next = NOLOCKF;
    391 #ifdef LOCKF_DEBUG
    392 		if (lockf_debug & 2)
    393 			lf_print("lf_wakelock: awakening", wakelock);
    394 #endif
    395 		wakeup(wakelock);
    396 	}
    397 }
    398 
    399 /*
    400  * Remove a byte-range lock on an inode.
    401  *
    402  * Generally, find the lock (or an overlap to that lock)
    403  * and remove it (or shrink it), then wakeup anyone we can.
    404  */
    405 static int
    406 lf_clearlock(struct lockf *unlock, struct lockf **sparelock)
    407 {
    408 	struct lockf **head = unlock->lf_head;
    409 	struct lockf *lf = *head;
    410 	struct lockf *overlap, **prev;
    411 	int ovcase;
    412 
    413 	if (lf == NOLOCKF)
    414 		return 0;
    415 #ifdef LOCKF_DEBUG
    416 	if (unlock->lf_type != F_UNLCK)
    417 		panic("lf_clearlock: bad type");
    418 	if (lockf_debug & 1)
    419 		lf_print("lf_clearlock", unlock);
    420 #endif /* LOCKF_DEBUG */
    421 	prev = head;
    422 	while ((ovcase = lf_findoverlap(lf, unlock, SELF,
    423 					&prev, &overlap)) != 0) {
    424 		/*
    425 		 * Wakeup the list of locks to be retried.
    426 		 */
    427 		lf_wakelock(overlap);
    428 
    429 		switch (ovcase) {
    430 
    431 		case 1: /* overlap == lock */
    432 			*prev = overlap->lf_next;
    433 			lf_free(overlap);
    434 			break;
    435 
    436 		case 2: /* overlap contains lock: split it */
    437 			if (overlap->lf_start == unlock->lf_start) {
    438 				overlap->lf_start = unlock->lf_end + 1;
    439 				break;
    440 			}
    441 			lf_split(overlap, unlock, sparelock);
    442 			overlap->lf_next = unlock->lf_next;
    443 			break;
    444 
    445 		case 3: /* lock contains overlap */
    446 			*prev = overlap->lf_next;
    447 			lf = overlap->lf_next;
    448 			lf_free(overlap);
    449 			continue;
    450 
    451 		case 4: /* overlap starts before lock */
    452 			overlap->lf_end = unlock->lf_start - 1;
    453 			prev = &overlap->lf_next;
    454 			lf = overlap->lf_next;
    455 			continue;
    456 
    457 		case 5: /* overlap ends after lock */
    458 			overlap->lf_start = unlock->lf_end + 1;
    459 			break;
    460 		}
    461 		break;
    462 	}
    463 #ifdef LOCKF_DEBUG
    464 	if (lockf_debug & 1)
    465 		lf_printlist("lf_clearlock", unlock);
    466 #endif /* LOCKF_DEBUG */
    467 	return 0;
    468 }
    469 
    470 /*
    471  * Walk the list of locks for an inode and
    472  * return the first blocking lock.
    473  */
    474 static struct lockf *
    475 lf_getblock(struct lockf *lock)
    476 {
    477 	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
    478 
    479 	prev = lock->lf_head;
    480 	while (lf_findoverlap(lf, lock, OTHERS, &prev, &overlap) != 0) {
    481 		/*
    482 		 * We've found an overlap, see if it blocks us
    483 		 */
    484 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
    485 			return overlap;
    486 		/*
    487 		 * Nope, point to the next one on the list and
    488 		 * see if it blocks us
    489 		 */
    490 		lf = overlap->lf_next;
    491 	}
    492 	return NOLOCKF;
    493 }
    494 
    495 /*
    496  * Set a byte-range lock.
    497  */
    498 static int
    499 lf_setlock(struct lockf *lock, struct lockf **sparelock,
    500     struct simplelock *interlock)
    501 {
    502 	struct lockf *block;
    503 	struct lockf **head = lock->lf_head;
    504 	struct lockf **prev, *overlap, *ltmp;
    505 	static char lockstr[] = "lockf";
    506 	int ovcase, priority, needtolink, error;
    507 
    508 #ifdef LOCKF_DEBUG
    509 	if (lockf_debug & 1)
    510 		lf_print("lf_setlock", lock);
    511 #endif /* LOCKF_DEBUG */
    512 
    513 	/*
    514 	 * Set the priority
    515 	 */
    516 	priority = PLOCK;
    517 	if (lock->lf_type == F_WRLCK)
    518 		priority += 4;
    519 	priority |= PCATCH;
    520 	/*
    521 	 * Scan lock list for this file looking for locks that would block us.
    522 	 */
    523 	while ((block = lf_getblock(lock)) != NULL) {
    524 		/*
    525 		 * Free the structure and return if nonblocking.
    526 		 */
    527 		if ((lock->lf_flags & F_WAIT) == 0) {
    528 			lf_free(lock);
    529 			return EAGAIN;
    530 		}
    531 		/*
    532 		 * We are blocked. Since flock style locks cover
    533 		 * the whole file, there is no chance for deadlock.
    534 		 * For byte-range locks we must check for deadlock.
    535 		 *
    536 		 * Deadlock detection is done by looking through the
    537 		 * wait channels to see if there are any cycles that
    538 		 * involve us. MAXDEPTH is set just to make sure we
    539 		 * do not go off into neverneverland.
    540 		 */
    541 		if ((lock->lf_flags & F_POSIX) &&
    542 		    (block->lf_flags & F_POSIX)) {
    543 			struct lwp *wlwp;
    544 			volatile const struct lockf *waitblock;
    545 			int i = 0;
    546 			struct proc *p;
    547 
    548 			p = (struct proc *)block->lf_id;
    549 			KASSERT(p != NULL);
    550 			while (i++ < maxlockdepth) {
    551 				simple_lock(&p->p_lock);
    552 				if (p->p_nlwps > 1) {
    553 					simple_unlock(&p->p_lock);
    554 					break;
    555 				}
    556 				wlwp = LIST_FIRST(&p->p_lwps);
    557 				if (wlwp->l_wmesg != lockstr) {
    558 					simple_unlock(&p->p_lock);
    559 					break;
    560 				}
    561 				simple_unlock(&p->p_lock);
    562 				waitblock = wlwp->l_wchan;
    563 				if (waitblock == NULL) {
    564 					/*
    565 					 * this lwp just got up but
    566 					 * not returned from ltsleep yet.
    567 					 */
    568 					break;
    569 				}
    570 				/* Get the owner of the blocking lock */
    571 				waitblock = waitblock->lf_next;
    572 				if ((waitblock->lf_flags & F_POSIX) == 0)
    573 					break;
    574 				p = (struct proc *)waitblock->lf_id;
    575 				if (p == curproc) {
    576 					lf_free(lock);
    577 					return EDEADLK;
    578 				}
    579 			}
    580 			/*
    581 			 * If we're still following a dependency chain
    582 			 * after maxlockdepth iterations, assume we're in
    583 			 * a cycle to be safe.
    584 			 */
    585 			if (i >= maxlockdepth) {
    586 				lf_free(lock);
    587 				return EDEADLK;
    588 			}
    589 		}
    590 		/*
    591 		 * For flock type locks, we must first remove
    592 		 * any shared locks that we hold before we sleep
    593 		 * waiting for an exclusive lock.
    594 		 */
    595 		if ((lock->lf_flags & F_FLOCK) &&
    596 		    lock->lf_type == F_WRLCK) {
    597 			lock->lf_type = F_UNLCK;
    598 			(void) lf_clearlock(lock, NULL);
    599 			lock->lf_type = F_WRLCK;
    600 		}
    601 		/*
    602 		 * Add our lock to the blocked list and sleep until we're free.
    603 		 * Remember who blocked us (for deadlock detection).
    604 		 */
    605 		lock->lf_next = block;
    606 		TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
    607 #ifdef LOCKF_DEBUG
    608 		if (lockf_debug & 1) {
    609 			lf_print("lf_setlock: blocking on", block);
    610 			lf_printlist("lf_setlock", block);
    611 		}
    612 #endif /* LOCKF_DEBUG */
    613 		error = ltsleep(lock, priority, lockstr, 0, interlock);
    614 
    615 		/*
    616 		 * We may have been awakened by a signal (in
    617 		 * which case we must remove ourselves from the
    618 		 * blocked list) and/or by another process
    619 		 * releasing a lock (in which case we have already
    620 		 * been removed from the blocked list and our
    621 		 * lf_next field set to NOLOCKF).
    622 		 */
    623 		if (lock->lf_next != NOLOCKF) {
    624 			TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
    625 			lock->lf_next = NOLOCKF;
    626 		}
    627 		if (error) {
    628 			lf_free(lock);
    629 			return error;
    630 		}
    631 	}
    632 	/*
    633 	 * No blocks!!  Add the lock.  Note that we will
    634 	 * downgrade or upgrade any overlapping locks this
    635 	 * process already owns.
    636 	 *
    637 	 * Skip over locks owned by other processes.
    638 	 * Handle any locks that overlap and are owned by ourselves.
    639 	 */
    640 	prev = head;
    641 	block = *head;
    642 	needtolink = 1;
    643 	for (;;) {
    644 		ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
    645 		if (ovcase)
    646 			block = overlap->lf_next;
    647 		/*
    648 		 * Six cases:
    649 		 *	0) no overlap
    650 		 *	1) overlap == lock
    651 		 *	2) overlap contains lock
    652 		 *	3) lock contains overlap
    653 		 *	4) overlap starts before lock
    654 		 *	5) overlap ends after lock
    655 		 */
    656 		switch (ovcase) {
    657 		case 0: /* no overlap */
    658 			if (needtolink) {
    659 				*prev = lock;
    660 				lock->lf_next = overlap;
    661 			}
    662 			break;
    663 
    664 		case 1: /* overlap == lock */
    665 			/*
    666 			 * If downgrading lock, others may be
    667 			 * able to acquire it.
    668 			 */
    669 			if (lock->lf_type == F_RDLCK &&
    670 			    overlap->lf_type == F_WRLCK)
    671 				lf_wakelock(overlap);
    672 			overlap->lf_type = lock->lf_type;
    673 			lf_free(lock);
    674 			lock = overlap; /* for debug output below */
    675 			break;
    676 
    677 		case 2: /* overlap contains lock */
    678 			/*
    679 			 * Check for common starting point and different types.
    680 			 */
    681 			if (overlap->lf_type == lock->lf_type) {
    682 				lf_free(lock);
    683 				lock = overlap; /* for debug output below */
    684 				break;
    685 			}
    686 			if (overlap->lf_start == lock->lf_start) {
    687 				*prev = lock;
    688 				lock->lf_next = overlap;
    689 				overlap->lf_start = lock->lf_end + 1;
    690 			} else
    691 				lf_split(overlap, lock, sparelock);
    692 			lf_wakelock(overlap);
    693 			break;
    694 
    695 		case 3: /* lock contains overlap */
    696 			/*
    697 			 * If downgrading lock, others may be able to
    698 			 * acquire it, otherwise take the list.
    699 			 */
    700 			if (lock->lf_type == F_RDLCK &&
    701 			    overlap->lf_type == F_WRLCK) {
    702 				lf_wakelock(overlap);
    703 			} else {
    704 				while ((ltmp = TAILQ_FIRST(&overlap->lf_blkhd))) {
    705 					KASSERT(ltmp->lf_next == overlap);
    706 					TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
    707 					    lf_block);
    708 					ltmp->lf_next = lock;
    709 					TAILQ_INSERT_TAIL(&lock->lf_blkhd,
    710 					    ltmp, lf_block);
    711 				}
    712 			}
    713 			/*
    714 			 * Add the new lock if necessary and delete the overlap.
    715 			 */
    716 			if (needtolink) {
    717 				*prev = lock;
    718 				lock->lf_next = overlap->lf_next;
    719 				prev = &lock->lf_next;
    720 				needtolink = 0;
    721 			} else
    722 				*prev = overlap->lf_next;
    723 			lf_free(overlap);
    724 			continue;
    725 
    726 		case 4: /* overlap starts before lock */
    727 			/*
    728 			 * Add lock after overlap on the list.
    729 			 */
    730 			lock->lf_next = overlap->lf_next;
    731 			overlap->lf_next = lock;
    732 			overlap->lf_end = lock->lf_start - 1;
    733 			prev = &lock->lf_next;
    734 			lf_wakelock(overlap);
    735 			needtolink = 0;
    736 			continue;
    737 
    738 		case 5: /* overlap ends after lock */
    739 			/*
    740 			 * Add the new lock before overlap.
    741 			 */
    742 			if (needtolink) {
    743 				*prev = lock;
    744 				lock->lf_next = overlap;
    745 			}
    746 			overlap->lf_start = lock->lf_end + 1;
    747 			lf_wakelock(overlap);
    748 			break;
    749 		}
    750 		break;
    751 	}
    752 #ifdef LOCKF_DEBUG
    753 	if (lockf_debug & 1) {
    754 		lf_print("lf_setlock: got the lock", lock);
    755 		lf_printlist("lf_setlock", lock);
    756 	}
    757 #endif /* LOCKF_DEBUG */
    758 	return 0;
    759 }
    760 
    761 /*
    762  * Check whether there is a blocking lock,
    763  * and if so return its process identifier.
    764  */
    765 static int
    766 lf_getlock(struct lockf *lock, struct flock *fl)
    767 {
    768 	struct lockf *block;
    769 
    770 #ifdef LOCKF_DEBUG
    771 	if (lockf_debug & 1)
    772 		lf_print("lf_getlock", lock);
    773 #endif /* LOCKF_DEBUG */
    774 
    775 	if ((block = lf_getblock(lock)) != NULL) {
    776 		fl->l_type = block->lf_type;
    777 		fl->l_whence = SEEK_SET;
    778 		fl->l_start = block->lf_start;
    779 		if (block->lf_end == -1)
    780 			fl->l_len = 0;
    781 		else
    782 			fl->l_len = block->lf_end - block->lf_start + 1;
    783 		if (block->lf_flags & F_POSIX)
    784 			fl->l_pid = ((struct proc *)block->lf_id)->p_pid;
    785 		else
    786 			fl->l_pid = -1;
    787 	} else {
    788 		fl->l_type = F_UNLCK;
    789 	}
    790 	return 0;
    791 }
    792 
    793 /*
    794  * Do an advisory lock operation.
    795  */
    796 int
    797 lf_advlock(struct vop_advlock_args *ap, struct lockf **head, off_t size)
    798 {
    799 	struct proc *p = curproc;
    800 	struct flock *fl = ap->a_fl;
    801 	struct lockf *lock = NULL;
    802 	struct lockf *sparelock;
    803 	struct simplelock *interlock = &ap->a_vp->v_interlock;
    804 	off_t start, end;
    805 	int error = 0;
    806 
    807 	/*
    808 	 * Convert the flock structure into a start and end.
    809 	 */
    810 	switch (fl->l_whence) {
    811 	case SEEK_SET:
    812 	case SEEK_CUR:
    813 		/*
    814 		 * Caller is responsible for adding any necessary offset
    815 		 * when SEEK_CUR is used.
    816 		 */
    817 		start = fl->l_start;
    818 		break;
    819 
    820 	case SEEK_END:
    821 		start = size + fl->l_start;
    822 		break;
    823 
    824 	default:
    825 		return EINVAL;
    826 	}
    827 	if (start < 0)
    828 		return EINVAL;
    829 
    830 	/*
    831 	 * allocate locks before acquire simple lock.
    832 	 * we need two locks in the worst case.
    833 	 */
    834 	switch (ap->a_op) {
    835 	case F_SETLK:
    836 	case F_UNLCK:
    837 		/*
    838 		 * XXX for F_UNLCK case, we can re-use lock.
    839 		 */
    840 		if ((ap->a_flags & F_FLOCK) == 0) {
    841 			/*
    842 			 * byte-range lock might need one more lock.
    843 			 */
    844 			sparelock = lf_alloc(kauth_cred_geteuid(p->p_cred), 0);
    845 			if (sparelock == NULL) {
    846 				error = ENOMEM;
    847 				goto quit;
    848 			}
    849 			break;
    850 		}
    851 		/* FALLTHROUGH */
    852 
    853 	case F_GETLK:
    854 		sparelock = NULL;
    855 		break;
    856 
    857 	default:
    858 		return EINVAL;
    859 	}
    860 
    861 	lock = lf_alloc(kauth_cred_geteuid(p->p_cred), ap->a_op != F_UNLCK ? 1 : 2);
    862 	if (lock == NULL) {
    863 		error = ENOMEM;
    864 		goto quit;
    865 	}
    866 
    867 	simple_lock(interlock);
    868 
    869 	/*
    870 	 * Avoid the common case of unlocking when inode has no locks.
    871 	 */
    872 	if (*head == (struct lockf *)0) {
    873 		if (ap->a_op != F_SETLK) {
    874 			fl->l_type = F_UNLCK;
    875 			error = 0;
    876 			goto quit_unlock;
    877 		}
    878 	}
    879 
    880 	if (fl->l_len == 0)
    881 		end = -1;
    882 	else
    883 		end = start + fl->l_len - 1;
    884 	/*
    885 	 * Create the lockf structure.
    886 	 */
    887 	lock->lf_start = start;
    888 	lock->lf_end = end;
    889 	/* XXX NJWLWP
    890 	 * I don't want to make the entire VFS universe use LWPs, because
    891 	 * they don't need them, for the most part. This is an exception,
    892 	 * and a kluge.
    893 	 */
    894 
    895 	lock->lf_head = head;
    896 	lock->lf_type = fl->l_type;
    897 	lock->lf_next = (struct lockf *)0;
    898 	TAILQ_INIT(&lock->lf_blkhd);
    899 	lock->lf_flags = ap->a_flags;
    900 	if (lock->lf_flags & F_POSIX) {
    901 		KASSERT(curproc == (struct proc *)ap->a_id);
    902 	}
    903 	lock->lf_id = (struct proc *)ap->a_id;
    904 
    905 	/*
    906 	 * Do the requested operation.
    907 	 */
    908 	switch (ap->a_op) {
    909 
    910 	case F_SETLK:
    911 		error = lf_setlock(lock, &sparelock, interlock);
    912 		lock = NULL; /* lf_setlock freed it */
    913 		break;
    914 
    915 	case F_UNLCK:
    916 		error = lf_clearlock(lock, &sparelock);
    917 		break;
    918 
    919 	case F_GETLK:
    920 		error = lf_getlock(lock, fl);
    921 		break;
    922 
    923 	default:
    924 		break;
    925 		/* NOTREACHED */
    926 	}
    927 
    928 quit_unlock:
    929 	simple_unlock(interlock);
    930 quit:
    931 	if (lock)
    932 		lf_free(lock);
    933 	if (sparelock)
    934 		lf_free(sparelock);
    935 
    936 	return error;
    937 }
    938