Home | History | Annotate | Line # | Download | only in kern
vfs_lockf.c revision 1.5
      1  1.5      cgd /*	$NetBSD: vfs_lockf.c,v 1.5 1994/06/29 06:33:55 cgd Exp $	*/
      2  1.5      cgd 
      3  1.1       ws /*
      4  1.4  mycroft  * Copyright (c) 1982, 1986, 1989, 1993
      5  1.4  mycroft  *	The Regents of the University of California.  All rights reserved.
      6  1.1       ws  *
      7  1.1       ws  * This code is derived from software contributed to Berkeley by
      8  1.1       ws  * Scooter Morris at Genentech Inc.
      9  1.1       ws  *
     10  1.1       ws  * Redistribution and use in source and binary forms, with or without
     11  1.1       ws  * modification, are permitted provided that the following conditions
     12  1.1       ws  * are met:
     13  1.1       ws  * 1. Redistributions of source code must retain the above copyright
     14  1.1       ws  *    notice, this list of conditions and the following disclaimer.
     15  1.1       ws  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.1       ws  *    notice, this list of conditions and the following disclaimer in the
     17  1.1       ws  *    documentation and/or other materials provided with the distribution.
     18  1.1       ws  * 3. All advertising materials mentioning features or use of this software
     19  1.1       ws  *    must display the following acknowledgement:
     20  1.1       ws  *	This product includes software developed by the University of
     21  1.1       ws  *	California, Berkeley and its contributors.
     22  1.1       ws  * 4. Neither the name of the University nor the names of its contributors
     23  1.1       ws  *    may be used to endorse or promote products derived from this software
     24  1.1       ws  *    without specific prior written permission.
     25  1.1       ws  *
     26  1.1       ws  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     27  1.1       ws  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     28  1.1       ws  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     29  1.1       ws  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     30  1.1       ws  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     31  1.1       ws  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     32  1.1       ws  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     33  1.1       ws  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     34  1.1       ws  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     35  1.1       ws  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     36  1.1       ws  * SUCH DAMAGE.
     37  1.1       ws  *
     38  1.5      cgd  *	@(#)ufs_lockf.c	8.3 (Berkeley) 1/6/94
     39  1.1       ws  */
     40  1.1       ws 
     41  1.1       ws #include <sys/param.h>
     42  1.1       ws #include <sys/systm.h>
     43  1.1       ws #include <sys/kernel.h>
     44  1.1       ws #include <sys/file.h>
     45  1.1       ws #include <sys/proc.h>
     46  1.1       ws #include <sys/vnode.h>
     47  1.1       ws #include <sys/malloc.h>
     48  1.1       ws #include <sys/fcntl.h>
     49  1.1       ws #include <sys/lockf.h>
     50  1.1       ws 
     51  1.1       ws /*
     52  1.4  mycroft  * Do an advisory lock operation.
     53  1.1       ws  */
     54  1.4  mycroft int
     55  1.1       ws lf_advlock(head, size, id, op, fl, flags)
     56  1.1       ws 	struct lockf **head;
     57  1.3      cgd 	off_t size;
     58  1.1       ws 	caddr_t id;
     59  1.1       ws 	int op;
     60  1.1       ws 	register struct flock *fl;
     61  1.1       ws 	int flags;
     62  1.1       ws {
     63  1.1       ws 	register struct lockf *lock;
     64  1.1       ws 	off_t start, end;
     65  1.1       ws 	int error;
     66  1.1       ws 
     67  1.1       ws 	/*
     68  1.1       ws 	 * Avoid the common case of unlocking when inode has no locks.
     69  1.1       ws 	 */
     70  1.1       ws 	if (*head == (struct lockf *)0) {
     71  1.1       ws 		if (op != F_SETLK) {
     72  1.1       ws 			fl->l_type = F_UNLCK;
     73  1.1       ws 			return (0);
     74  1.1       ws 		}
     75  1.1       ws 	}
     76  1.1       ws 	/*
     77  1.1       ws 	 * Convert the flock structure into a start and end.
     78  1.1       ws 	 */
     79  1.1       ws 	switch (fl->l_whence) {
     80  1.1       ws 
     81  1.1       ws 	case SEEK_SET:
     82  1.1       ws 	case SEEK_CUR:
     83  1.1       ws 		/*
     84  1.1       ws 		 * Caller is responsible for adding any necessary offset
     85  1.1       ws 		 * when SEEK_CUR is used.
     86  1.1       ws 		 */
     87  1.1       ws 		start = fl->l_start;
     88  1.1       ws 		break;
     89  1.1       ws 
     90  1.1       ws 	case SEEK_END:
     91  1.1       ws 		start = size + fl->l_start;
     92  1.1       ws 		break;
     93  1.1       ws 
     94  1.1       ws 	default:
     95  1.1       ws 		return (EINVAL);
     96  1.1       ws 	}
     97  1.1       ws 	if (start < 0)
     98  1.1       ws 		return (EINVAL);
     99  1.1       ws 	if (fl->l_len == 0)
    100  1.1       ws 		end = -1;
    101  1.1       ws 	else
    102  1.1       ws 		end = start + fl->l_len - 1;
    103  1.1       ws 	/*
    104  1.4  mycroft 	 * Create the lockf structure.
    105  1.1       ws 	 */
    106  1.1       ws 	MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
    107  1.1       ws 	lock->lf_start = start;
    108  1.1       ws 	lock->lf_end = end;
    109  1.1       ws 	lock->lf_id = id;
    110  1.1       ws 	lock->lf_head = head;
    111  1.1       ws 	lock->lf_type = fl->l_type;
    112  1.1       ws 	lock->lf_next = (struct lockf *)0;
    113  1.1       ws 	lock->lf_block = (struct lockf *)0;
    114  1.1       ws 	lock->lf_flags = flags;
    115  1.1       ws 	/*
    116  1.1       ws 	 * Do the requested operation.
    117  1.1       ws 	 */
    118  1.4  mycroft 	switch (op) {
    119  1.4  mycroft 
    120  1.1       ws 	case F_SETLK:
    121  1.1       ws 		return (lf_setlock(lock));
    122  1.1       ws 
    123  1.1       ws 	case F_UNLCK:
    124  1.1       ws 		error = lf_clearlock(lock);
    125  1.1       ws 		FREE(lock, M_LOCKF);
    126  1.1       ws 		return (error);
    127  1.1       ws 
    128  1.1       ws 	case F_GETLK:
    129  1.1       ws 		error = lf_getlock(lock, fl);
    130  1.1       ws 		FREE(lock, M_LOCKF);
    131  1.1       ws 		return (error);
    132  1.4  mycroft 
    133  1.1       ws 	default:
    134  1.4  mycroft 		FREE(lock, M_LOCKF);
    135  1.1       ws 		return (EINVAL);
    136  1.1       ws 	}
    137  1.1       ws 	/* NOTREACHED */
    138  1.1       ws }
    139  1.1       ws 
    140  1.1       ws /*
    141  1.1       ws  * This variable controls the maximum number of processes that will
    142  1.1       ws  * be checked in doing deadlock detection.
    143  1.1       ws  */
    144  1.1       ws int maxlockdepth = MAXDEPTH;
    145  1.1       ws 
    146  1.1       ws #ifdef LOCKF_DEBUG
    147  1.1       ws int	lockf_debug = 0;
    148  1.4  mycroft #endif
    149  1.1       ws 
    150  1.1       ws #define NOLOCKF (struct lockf *)0
    151  1.1       ws #define SELF	0x1
    152  1.1       ws #define OTHERS	0x2
    153  1.1       ws 
    154  1.1       ws /*
    155  1.1       ws  * Set a byte-range lock.
    156  1.1       ws  */
    157  1.4  mycroft int
    158  1.1       ws lf_setlock(lock)
    159  1.1       ws 	register struct lockf *lock;
    160  1.1       ws {
    161  1.1       ws 	register struct lockf *block;
    162  1.1       ws 	struct lockf **head = lock->lf_head;
    163  1.1       ws 	struct lockf **prev, *overlap, *ltmp;
    164  1.1       ws 	static char lockstr[] = "lockf";
    165  1.1       ws 	int ovcase, priority, needtolink, error;
    166  1.1       ws 
    167  1.1       ws #ifdef LOCKF_DEBUG
    168  1.1       ws 	if (lockf_debug & 1)
    169  1.1       ws 		lf_print("lf_setlock", lock);
    170  1.1       ws #endif /* LOCKF_DEBUG */
    171  1.1       ws 
    172  1.1       ws 	/*
    173  1.1       ws 	 * Set the priority
    174  1.1       ws 	 */
    175  1.1       ws 	priority = PLOCK;
    176  1.1       ws 	if (lock->lf_type == F_WRLCK)
    177  1.1       ws 		priority += 4;
    178  1.1       ws 	priority |= PCATCH;
    179  1.1       ws 	/*
    180  1.1       ws 	 * Scan lock list for this file looking for locks that would block us.
    181  1.1       ws 	 */
    182  1.1       ws 	while (block = lf_getblock(lock)) {
    183  1.1       ws 		/*
    184  1.1       ws 		 * Free the structure and return if nonblocking.
    185  1.1       ws 		 */
    186  1.1       ws 		if ((lock->lf_flags & F_WAIT) == 0) {
    187  1.1       ws 			FREE(lock, M_LOCKF);
    188  1.1       ws 			return (EAGAIN);
    189  1.1       ws 		}
    190  1.1       ws 		/*
    191  1.1       ws 		 * We are blocked. Since flock style locks cover
    192  1.1       ws 		 * the whole file, there is no chance for deadlock.
    193  1.1       ws 		 * For byte-range locks we must check for deadlock.
    194  1.1       ws 		 *
    195  1.1       ws 		 * Deadlock detection is done by looking through the
    196  1.1       ws 		 * wait channels to see if there are any cycles that
    197  1.1       ws 		 * involve us. MAXDEPTH is set just to make sure we
    198  1.1       ws 		 * do not go off into neverland.
    199  1.1       ws 		 */
    200  1.1       ws 		if ((lock->lf_flags & F_POSIX) &&
    201  1.1       ws 		    (block->lf_flags & F_POSIX)) {
    202  1.1       ws 			register struct proc *wproc;
    203  1.1       ws 			register struct lockf *waitblock;
    204  1.1       ws 			int i = 0;
    205  1.1       ws 
    206  1.1       ws 			/* The block is waiting on something */
    207  1.1       ws 			wproc = (struct proc *)block->lf_id;
    208  1.1       ws 			while (wproc->p_wchan &&
    209  1.1       ws 			       (wproc->p_wmesg == lockstr) &&
    210  1.1       ws 			       (i++ < maxlockdepth)) {
    211  1.1       ws 				waitblock = (struct lockf *)wproc->p_wchan;
    212  1.1       ws 				/* Get the owner of the blocking lock */
    213  1.1       ws 				waitblock = waitblock->lf_next;
    214  1.1       ws 				if ((waitblock->lf_flags & F_POSIX) == 0)
    215  1.1       ws 					break;
    216  1.1       ws 				wproc = (struct proc *)waitblock->lf_id;
    217  1.1       ws 				if (wproc == (struct proc *)lock->lf_id) {
    218  1.1       ws 					free(lock, M_LOCKF);
    219  1.1       ws 					return (EDEADLK);
    220  1.1       ws 				}
    221  1.1       ws 			}
    222  1.1       ws 		}
    223  1.1       ws 		/*
    224  1.1       ws 		 * For flock type locks, we must first remove
    225  1.1       ws 		 * any shared locks that we hold before we sleep
    226  1.1       ws 		 * waiting for an exclusive lock.
    227  1.1       ws 		 */
    228  1.1       ws 		if ((lock->lf_flags & F_FLOCK) &&
    229  1.1       ws 		    lock->lf_type == F_WRLCK) {
    230  1.1       ws 			lock->lf_type = F_UNLCK;
    231  1.1       ws 			(void) lf_clearlock(lock);
    232  1.1       ws 			lock->lf_type = F_WRLCK;
    233  1.1       ws 		}
    234  1.1       ws 		/*
    235  1.1       ws 		 * Add our lock to the blocked list and sleep until we're free.
    236  1.1       ws 		 * Remember who blocked us (for deadlock detection).
    237  1.1       ws 		 */
    238  1.1       ws 		lock->lf_next = block;
    239  1.1       ws 		lf_addblock(block, lock);
    240  1.1       ws #ifdef LOCKF_DEBUG
    241  1.1       ws 		if (lockf_debug & 1) {
    242  1.1       ws 			lf_print("lf_setlock: blocking on", block);
    243  1.1       ws 			lf_printlist("lf_setlock", block);
    244  1.1       ws 		}
    245  1.1       ws #endif /* LOCKF_DEBUG */
    246  1.4  mycroft 		if (error = tsleep((caddr_t)lock, priority, lockstr, 0)) {
    247  1.1       ws 			/*
    248  1.1       ws 			 * Delete ourselves from the waiting to lock list.
    249  1.1       ws 			 */
    250  1.1       ws 			for (block = lock->lf_next;
    251  1.1       ws 			     block != NOLOCKF;
    252  1.1       ws 			     block = block->lf_block) {
    253  1.1       ws 				if (block->lf_block != lock)
    254  1.1       ws 					continue;
    255  1.1       ws 				block->lf_block = block->lf_block->lf_block;
    256  1.4  mycroft 				break;
    257  1.1       ws 			}
    258  1.4  mycroft 			/*
    259  1.4  mycroft 			 * If we did not find ourselves on the list, but
    260  1.4  mycroft 			 * are still linked onto a lock list, then something
    261  1.4  mycroft 			 * is very wrong.
    262  1.4  mycroft 			 */
    263  1.4  mycroft 			if (block == NOLOCKF && lock->lf_next != NOLOCKF)
    264  1.4  mycroft 				panic("lf_setlock: lost lock");
    265  1.4  mycroft 			free(lock, M_LOCKF);
    266  1.4  mycroft 			return (error);
    267  1.1       ws 		}
    268  1.1       ws 	}
    269  1.1       ws 	/*
    270  1.1       ws 	 * No blocks!!  Add the lock.  Note that we will
    271  1.1       ws 	 * downgrade or upgrade any overlapping locks this
    272  1.1       ws 	 * process already owns.
    273  1.1       ws 	 *
    274  1.1       ws 	 * Skip over locks owned by other processes.
    275  1.1       ws 	 * Handle any locks that overlap and are owned by ourselves.
    276  1.1       ws 	 */
    277  1.1       ws 	prev = head;
    278  1.1       ws 	block = *head;
    279  1.1       ws 	needtolink = 1;
    280  1.1       ws 	for (;;) {
    281  1.1       ws 		if (ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap))
    282  1.1       ws 			block = overlap->lf_next;
    283  1.1       ws 		/*
    284  1.1       ws 		 * Six cases:
    285  1.1       ws 		 *	0) no overlap
    286  1.1       ws 		 *	1) overlap == lock
    287  1.1       ws 		 *	2) overlap contains lock
    288  1.1       ws 		 *	3) lock contains overlap
    289  1.1       ws 		 *	4) overlap starts before lock
    290  1.1       ws 		 *	5) overlap ends after lock
    291  1.1       ws 		 */
    292  1.1       ws 		switch (ovcase) {
    293  1.1       ws 		case 0: /* no overlap */
    294  1.1       ws 			if (needtolink) {
    295  1.1       ws 				*prev = lock;
    296  1.1       ws 				lock->lf_next = overlap;
    297  1.1       ws 			}
    298  1.1       ws 			break;
    299  1.1       ws 
    300  1.1       ws 		case 1: /* overlap == lock */
    301  1.1       ws 			/*
    302  1.1       ws 			 * If downgrading lock, others may be
    303  1.1       ws 			 * able to acquire it.
    304  1.1       ws 			 */
    305  1.1       ws 			if (lock->lf_type == F_RDLCK &&
    306  1.1       ws 			    overlap->lf_type == F_WRLCK)
    307  1.1       ws 				lf_wakelock(overlap);
    308  1.1       ws 			overlap->lf_type = lock->lf_type;
    309  1.1       ws 			FREE(lock, M_LOCKF);
    310  1.1       ws 			lock = overlap; /* for debug output below */
    311  1.1       ws 			break;
    312  1.1       ws 
    313  1.1       ws 		case 2: /* overlap contains lock */
    314  1.1       ws 			/*
    315  1.1       ws 			 * Check for common starting point and different types.
    316  1.1       ws 			 */
    317  1.1       ws 			if (overlap->lf_type == lock->lf_type) {
    318  1.1       ws 				free(lock, M_LOCKF);
    319  1.1       ws 				lock = overlap; /* for debug output below */
    320  1.1       ws 				break;
    321  1.1       ws 			}
    322  1.1       ws 			if (overlap->lf_start == lock->lf_start) {
    323  1.1       ws 				*prev = lock;
    324  1.1       ws 				lock->lf_next = overlap;
    325  1.1       ws 				overlap->lf_start = lock->lf_end + 1;
    326  1.1       ws 			} else
    327  1.1       ws 				lf_split(overlap, lock);
    328  1.1       ws 			lf_wakelock(overlap);
    329  1.1       ws 			break;
    330  1.1       ws 
    331  1.1       ws 		case 3: /* lock contains overlap */
    332  1.1       ws 			/*
    333  1.1       ws 			 * If downgrading lock, others may be able to
    334  1.1       ws 			 * acquire it, otherwise take the list.
    335  1.1       ws 			 */
    336  1.1       ws 			if (lock->lf_type == F_RDLCK &&
    337  1.1       ws 			    overlap->lf_type == F_WRLCK) {
    338  1.1       ws 				lf_wakelock(overlap);
    339  1.1       ws 			} else {
    340  1.1       ws 				ltmp = lock->lf_block;
    341  1.1       ws 				lock->lf_block = overlap->lf_block;
    342  1.1       ws 				lf_addblock(lock, ltmp);
    343  1.1       ws 			}
    344  1.1       ws 			/*
    345  1.1       ws 			 * Add the new lock if necessary and delete the overlap.
    346  1.1       ws 			 */
    347  1.1       ws 			if (needtolink) {
    348  1.1       ws 				*prev = lock;
    349  1.1       ws 				lock->lf_next = overlap->lf_next;
    350  1.1       ws 				prev = &lock->lf_next;
    351  1.1       ws 				needtolink = 0;
    352  1.1       ws 			} else
    353  1.1       ws 				*prev = overlap->lf_next;
    354  1.1       ws 			free(overlap, M_LOCKF);
    355  1.1       ws 			continue;
    356  1.1       ws 
    357  1.1       ws 		case 4: /* overlap starts before lock */
    358  1.1       ws 			/*
    359  1.1       ws 			 * Add lock after overlap on the list.
    360  1.1       ws 			 */
    361  1.1       ws 			lock->lf_next = overlap->lf_next;
    362  1.1       ws 			overlap->lf_next = lock;
    363  1.1       ws 			overlap->lf_end = lock->lf_start - 1;
    364  1.1       ws 			prev = &lock->lf_next;
    365  1.1       ws 			lf_wakelock(overlap);
    366  1.1       ws 			needtolink = 0;
    367  1.1       ws 			continue;
    368  1.1       ws 
    369  1.1       ws 		case 5: /* overlap ends after lock */
    370  1.1       ws 			/*
    371  1.1       ws 			 * Add the new lock before overlap.
    372  1.1       ws 			 */
    373  1.1       ws 			if (needtolink) {
    374  1.1       ws 				*prev = lock;
    375  1.1       ws 				lock->lf_next = overlap;
    376  1.1       ws 			}
    377  1.1       ws 			overlap->lf_start = lock->lf_end + 1;
    378  1.1       ws 			lf_wakelock(overlap);
    379  1.1       ws 			break;
    380  1.1       ws 		}
    381  1.1       ws 		break;
    382  1.1       ws 	}
    383  1.1       ws #ifdef LOCKF_DEBUG
    384  1.1       ws 	if (lockf_debug & 1) {
    385  1.1       ws 		lf_print("lf_setlock: got the lock", lock);
    386  1.1       ws 		lf_printlist("lf_setlock", lock);
    387  1.1       ws 	}
    388  1.1       ws #endif /* LOCKF_DEBUG */
    389  1.1       ws 	return (0);
    390  1.1       ws }
    391  1.1       ws 
    392  1.1       ws /*
    393  1.1       ws  * Remove a byte-range lock on an inode.
    394  1.1       ws  *
    395  1.1       ws  * Generally, find the lock (or an overlap to that lock)
    396  1.1       ws  * and remove it (or shrink it), then wakeup anyone we can.
    397  1.1       ws  */
    398  1.4  mycroft int
    399  1.1       ws lf_clearlock(unlock)
    400  1.1       ws 	register struct lockf *unlock;
    401  1.1       ws {
    402  1.1       ws 	struct lockf **head = unlock->lf_head;
    403  1.1       ws 	register struct lockf *lf = *head;
    404  1.1       ws 	struct lockf *overlap, **prev;
    405  1.1       ws 	int ovcase;
    406  1.1       ws 
    407  1.1       ws 	if (lf == NOLOCKF)
    408  1.1       ws 		return (0);
    409  1.1       ws #ifdef LOCKF_DEBUG
    410  1.1       ws 	if (unlock->lf_type != F_UNLCK)
    411  1.1       ws 		panic("lf_clearlock: bad type");
    412  1.1       ws 	if (lockf_debug & 1)
    413  1.1       ws 		lf_print("lf_clearlock", unlock);
    414  1.1       ws #endif /* LOCKF_DEBUG */
    415  1.1       ws 	prev = head;
    416  1.1       ws 	while (ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap)) {
    417  1.1       ws 		/*
    418  1.1       ws 		 * Wakeup the list of locks to be retried.
    419  1.1       ws 		 */
    420  1.1       ws 		lf_wakelock(overlap);
    421  1.1       ws 
    422  1.1       ws 		switch (ovcase) {
    423  1.1       ws 
    424  1.1       ws 		case 1: /* overlap == lock */
    425  1.1       ws 			*prev = overlap->lf_next;
    426  1.1       ws 			FREE(overlap, M_LOCKF);
    427  1.1       ws 			break;
    428  1.1       ws 
    429  1.1       ws 		case 2: /* overlap contains lock: split it */
    430  1.1       ws 			if (overlap->lf_start == unlock->lf_start) {
    431  1.1       ws 				overlap->lf_start = unlock->lf_end + 1;
    432  1.1       ws 				break;
    433  1.1       ws 			}
    434  1.1       ws 			lf_split(overlap, unlock);
    435  1.1       ws 			overlap->lf_next = unlock->lf_next;
    436  1.1       ws 			break;
    437  1.1       ws 
    438  1.1       ws 		case 3: /* lock contains overlap */
    439  1.1       ws 			*prev = overlap->lf_next;
    440  1.1       ws 			lf = overlap->lf_next;
    441  1.1       ws 			free(overlap, M_LOCKF);
    442  1.1       ws 			continue;
    443  1.1       ws 
    444  1.1       ws 		case 4: /* overlap starts before lock */
    445  1.1       ws 			overlap->lf_end = unlock->lf_start - 1;
    446  1.1       ws 			prev = &overlap->lf_next;
    447  1.1       ws 			lf = overlap->lf_next;
    448  1.1       ws 			continue;
    449  1.1       ws 
    450  1.1       ws 		case 5: /* overlap ends after lock */
    451  1.1       ws 			overlap->lf_start = unlock->lf_end + 1;
    452  1.1       ws 			break;
    453  1.1       ws 		}
    454  1.1       ws 		break;
    455  1.1       ws 	}
    456  1.1       ws #ifdef LOCKF_DEBUG
    457  1.1       ws 	if (lockf_debug & 1)
    458  1.1       ws 		lf_printlist("lf_clearlock", unlock);
    459  1.1       ws #endif /* LOCKF_DEBUG */
    460  1.1       ws 	return (0);
    461  1.1       ws }
    462  1.1       ws 
    463  1.1       ws /*
    464  1.1       ws  * Check whether there is a blocking lock,
    465  1.1       ws  * and if so return its process identifier.
    466  1.1       ws  */
    467  1.4  mycroft int
    468  1.1       ws lf_getlock(lock, fl)
    469  1.1       ws 	register struct lockf *lock;
    470  1.1       ws 	register struct flock *fl;
    471  1.1       ws {
    472  1.1       ws 	register struct lockf *block;
    473  1.1       ws 
    474  1.1       ws #ifdef LOCKF_DEBUG
    475  1.1       ws 	if (lockf_debug & 1)
    476  1.1       ws 		lf_print("lf_getlock", lock);
    477  1.1       ws #endif /* LOCKF_DEBUG */
    478  1.1       ws 
    479  1.1       ws 	if (block = lf_getblock(lock)) {
    480  1.1       ws 		fl->l_type = block->lf_type;
    481  1.1       ws 		fl->l_whence = SEEK_SET;
    482  1.1       ws 		fl->l_start = block->lf_start;
    483  1.1       ws 		if (block->lf_end == -1)
    484  1.1       ws 			fl->l_len = 0;
    485  1.1       ws 		else
    486  1.1       ws 			fl->l_len = block->lf_end - block->lf_start + 1;
    487  1.1       ws 		if (block->lf_flags & F_POSIX)
    488  1.1       ws 			fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
    489  1.1       ws 		else
    490  1.1       ws 			fl->l_pid = -1;
    491  1.1       ws 	} else {
    492  1.1       ws 		fl->l_type = F_UNLCK;
    493  1.1       ws 	}
    494  1.1       ws 	return (0);
    495  1.1       ws }
    496  1.1       ws 
    497  1.1       ws /*
    498  1.1       ws  * Walk the list of locks for an inode and
    499  1.1       ws  * return the first blocking lock.
    500  1.1       ws  */
    501  1.1       ws struct lockf *
    502  1.1       ws lf_getblock(lock)
    503  1.1       ws 	register struct lockf *lock;
    504  1.1       ws {
    505  1.1       ws 	struct lockf **prev, *overlap, *lf = *(lock->lf_head);
    506  1.1       ws 	int ovcase;
    507  1.1       ws 
    508  1.1       ws 	prev = lock->lf_head;
    509  1.1       ws 	while (ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap)) {
    510  1.1       ws 		/*
    511  1.1       ws 		 * We've found an overlap, see if it blocks us
    512  1.1       ws 		 */
    513  1.1       ws 		if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
    514  1.1       ws 			return (overlap);
    515  1.1       ws 		/*
    516  1.1       ws 		 * Nope, point to the next one on the list and
    517  1.1       ws 		 * see if it blocks us
    518  1.1       ws 		 */
    519  1.1       ws 		lf = overlap->lf_next;
    520  1.1       ws 	}
    521  1.1       ws 	return (NOLOCKF);
    522  1.1       ws }
    523  1.1       ws 
    524  1.1       ws /*
    525  1.1       ws  * Walk the list of locks for an inode to
    526  1.1       ws  * find an overlapping lock (if any).
    527  1.1       ws  *
    528  1.1       ws  * NOTE: this returns only the FIRST overlapping lock.  There
    529  1.1       ws  *	 may be more than one.
    530  1.1       ws  */
    531  1.4  mycroft int
    532  1.1       ws lf_findoverlap(lf, lock, type, prev, overlap)
    533  1.1       ws 	register struct lockf *lf;
    534  1.1       ws 	struct lockf *lock;
    535  1.1       ws 	int type;
    536  1.1       ws 	struct lockf ***prev;
    537  1.1       ws 	struct lockf **overlap;
    538  1.1       ws {
    539  1.1       ws 	off_t start, end;
    540  1.1       ws 
    541  1.1       ws 	*overlap = lf;
    542  1.1       ws 	if (lf == NOLOCKF)
    543  1.1       ws 		return (0);
    544  1.1       ws #ifdef LOCKF_DEBUG
    545  1.1       ws 	if (lockf_debug & 2)
    546  1.1       ws 		lf_print("lf_findoverlap: looking for overlap in", lock);
    547  1.1       ws #endif /* LOCKF_DEBUG */
    548  1.1       ws 	start = lock->lf_start;
    549  1.1       ws 	end = lock->lf_end;
    550  1.1       ws 	while (lf != NOLOCKF) {
    551  1.1       ws 		if (((type & SELF) && lf->lf_id != lock->lf_id) ||
    552  1.1       ws 		    ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
    553  1.1       ws 			*prev = &lf->lf_next;
    554  1.1       ws 			*overlap = lf = lf->lf_next;
    555  1.1       ws 			continue;
    556  1.1       ws 		}
    557  1.1       ws #ifdef LOCKF_DEBUG
    558  1.1       ws 		if (lockf_debug & 2)
    559  1.1       ws 			lf_print("\tchecking", lf);
    560  1.1       ws #endif /* LOCKF_DEBUG */
    561  1.1       ws 		/*
    562  1.1       ws 		 * OK, check for overlap
    563  1.1       ws 		 *
    564  1.1       ws 		 * Six cases:
    565  1.1       ws 		 *	0) no overlap
    566  1.1       ws 		 *	1) overlap == lock
    567  1.1       ws 		 *	2) overlap contains lock
    568  1.1       ws 		 *	3) lock contains overlap
    569  1.1       ws 		 *	4) overlap starts before lock
    570  1.1       ws 		 *	5) overlap ends after lock
    571  1.1       ws 		 */
    572  1.1       ws 		if ((lf->lf_end != -1 && start > lf->lf_end) ||
    573  1.1       ws 		    (end != -1 && lf->lf_start > end)) {
    574  1.1       ws 			/* Case 0 */
    575  1.1       ws #ifdef LOCKF_DEBUG
    576  1.1       ws 			if (lockf_debug & 2)
    577  1.1       ws 				printf("no overlap\n");
    578  1.1       ws #endif /* LOCKF_DEBUG */
    579  1.1       ws 			if ((type & SELF) && end != -1 && lf->lf_start > end)
    580  1.1       ws 				return (0);
    581  1.1       ws 			*prev = &lf->lf_next;
    582  1.1       ws 			*overlap = lf = lf->lf_next;
    583  1.1       ws 			continue;
    584  1.1       ws 		}
    585  1.1       ws 		if ((lf->lf_start == start) && (lf->lf_end == end)) {
    586  1.1       ws 			/* Case 1 */
    587  1.1       ws #ifdef LOCKF_DEBUG
    588  1.1       ws 			if (lockf_debug & 2)
    589  1.1       ws 				printf("overlap == lock\n");
    590  1.1       ws #endif /* LOCKF_DEBUG */
    591  1.1       ws 			return (1);
    592  1.1       ws 		}
    593  1.1       ws 		if ((lf->lf_start <= start) &&
    594  1.1       ws 		    (end != -1) &&
    595  1.1       ws 		    ((lf->lf_end >= end) || (lf->lf_end == -1))) {
    596  1.1       ws 			/* Case 2 */
    597  1.1       ws #ifdef LOCKF_DEBUG
    598  1.1       ws 			if (lockf_debug & 2)
    599  1.1       ws 				printf("overlap contains lock\n");
    600  1.1       ws #endif /* LOCKF_DEBUG */
    601  1.1       ws 			return (2);
    602  1.1       ws 		}
    603  1.1       ws 		if (start <= lf->lf_start &&
    604  1.4  mycroft 		           (end == -1 ||
    605  1.1       ws 			   (lf->lf_end != -1 && end >= lf->lf_end))) {
    606  1.1       ws 			/* Case 3 */
    607  1.1       ws #ifdef LOCKF_DEBUG
    608  1.1       ws 			if (lockf_debug & 2)
    609  1.1       ws 				printf("lock contains overlap\n");
    610  1.1       ws #endif /* LOCKF_DEBUG */
    611  1.1       ws 			return (3);
    612  1.1       ws 		}
    613  1.1       ws 		if ((lf->lf_start < start) &&
    614  1.1       ws 			((lf->lf_end >= start) || (lf->lf_end == -1))) {
    615  1.1       ws 			/* Case 4 */
    616  1.1       ws #ifdef LOCKF_DEBUG
    617  1.1       ws 			if (lockf_debug & 2)
    618  1.1       ws 				printf("overlap starts before lock\n");
    619  1.1       ws #endif /* LOCKF_DEBUG */
    620  1.1       ws 			return (4);
    621  1.1       ws 		}
    622  1.1       ws 		if ((lf->lf_start > start) &&
    623  1.1       ws 			(end != -1) &&
    624  1.1       ws 			((lf->lf_end > end) || (lf->lf_end == -1))) {
    625  1.1       ws 			/* Case 5 */
    626  1.1       ws #ifdef LOCKF_DEBUG
    627  1.1       ws 			if (lockf_debug & 2)
    628  1.1       ws 				printf("overlap ends after lock\n");
    629  1.1       ws #endif /* LOCKF_DEBUG */
    630  1.1       ws 			return (5);
    631  1.1       ws 		}
    632  1.1       ws 		panic("lf_findoverlap: default");
    633  1.1       ws 	}
    634  1.1       ws 	return (0);
    635  1.1       ws }
    636  1.1       ws 
    637  1.1       ws /*
    638  1.1       ws  * Add a lock to the end of the blocked list.
    639  1.1       ws  */
    640  1.3      cgd void
    641  1.1       ws lf_addblock(lock, blocked)
    642  1.1       ws 	struct lockf *lock;
    643  1.1       ws 	struct lockf *blocked;
    644  1.1       ws {
    645  1.1       ws 	register struct lockf *lf;
    646  1.1       ws 
    647  1.1       ws 	if (blocked == NOLOCKF)
    648  1.1       ws 		return;
    649  1.1       ws #ifdef LOCKF_DEBUG
    650  1.1       ws 	if (lockf_debug & 2) {
    651  1.1       ws 		lf_print("addblock: adding", blocked);
    652  1.1       ws 		lf_print("to blocked list of", lock);
    653  1.1       ws 	}
    654  1.1       ws #endif /* LOCKF_DEBUG */
    655  1.1       ws 	if ((lf = lock->lf_block) == NOLOCKF) {
    656  1.1       ws 		lock->lf_block = blocked;
    657  1.1       ws 		return;
    658  1.1       ws 	}
    659  1.1       ws 	while (lf->lf_block != NOLOCKF)
    660  1.1       ws 		lf = lf->lf_block;
    661  1.1       ws 	lf->lf_block = blocked;
    662  1.1       ws 	return;
    663  1.1       ws }
    664  1.1       ws 
    665  1.1       ws /*
    666  1.1       ws  * Split a lock and a contained region into
    667  1.1       ws  * two or three locks as necessary.
    668  1.1       ws  */
    669  1.2      cgd void
    670  1.1       ws lf_split(lock1, lock2)
    671  1.1       ws 	register struct lockf *lock1;
    672  1.1       ws 	register struct lockf *lock2;
    673  1.1       ws {
    674  1.1       ws 	register struct lockf *splitlock;
    675  1.1       ws 
    676  1.1       ws #ifdef LOCKF_DEBUG
    677  1.1       ws 	if (lockf_debug & 2) {
    678  1.1       ws 		lf_print("lf_split", lock1);
    679  1.1       ws 		lf_print("splitting from", lock2);
    680  1.1       ws 	}
    681  1.1       ws #endif /* LOCKF_DEBUG */
    682  1.1       ws 	/*
    683  1.1       ws 	 * Check to see if spliting into only two pieces.
    684  1.1       ws 	 */
    685  1.1       ws 	if (lock1->lf_start == lock2->lf_start) {
    686  1.1       ws 		lock1->lf_start = lock2->lf_end + 1;
    687  1.1       ws 		lock2->lf_next = lock1;
    688  1.1       ws 		return;
    689  1.1       ws 	}
    690  1.1       ws 	if (lock1->lf_end == lock2->lf_end) {
    691  1.1       ws 		lock1->lf_end = lock2->lf_start - 1;
    692  1.1       ws 		lock2->lf_next = lock1->lf_next;
    693  1.1       ws 		lock1->lf_next = lock2;
    694  1.1       ws 		return;
    695  1.1       ws 	}
    696  1.1       ws 	/*
    697  1.1       ws 	 * Make a new lock consisting of the last part of
    698  1.1       ws 	 * the encompassing lock
    699  1.1       ws 	 */
    700  1.1       ws 	MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
    701  1.1       ws 	bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock);
    702  1.1       ws 	splitlock->lf_start = lock2->lf_end + 1;
    703  1.1       ws 	splitlock->lf_block = NOLOCKF;
    704  1.1       ws 	lock1->lf_end = lock2->lf_start - 1;
    705  1.1       ws 	/*
    706  1.1       ws 	 * OK, now link it in
    707  1.1       ws 	 */
    708  1.1       ws 	splitlock->lf_next = lock1->lf_next;
    709  1.1       ws 	lock2->lf_next = splitlock;
    710  1.1       ws 	lock1->lf_next = lock2;
    711  1.1       ws }
    712  1.1       ws 
    713  1.1       ws /*
    714  1.1       ws  * Wakeup a blocklist
    715  1.1       ws  */
    716  1.2      cgd void
    717  1.1       ws lf_wakelock(listhead)
    718  1.1       ws 	struct lockf *listhead;
    719  1.1       ws {
    720  1.1       ws 	register struct lockf *blocklist, *wakelock;
    721  1.1       ws 
    722  1.1       ws 	blocklist = listhead->lf_block;
    723  1.1       ws 	listhead->lf_block = NOLOCKF;
    724  1.1       ws 	while (blocklist != NOLOCKF) {
    725  1.1       ws 		wakelock = blocklist;
    726  1.1       ws 		blocklist = blocklist->lf_block;
    727  1.1       ws 		wakelock->lf_block = NOLOCKF;
    728  1.1       ws 		wakelock->lf_next = NOLOCKF;
    729  1.1       ws #ifdef LOCKF_DEBUG
    730  1.1       ws 		if (lockf_debug & 2)
    731  1.1       ws 			lf_print("lf_wakelock: awakening", wakelock);
    732  1.1       ws #endif /* LOCKF_DEBUG */
    733  1.1       ws 		wakeup((caddr_t)wakelock);
    734  1.1       ws 	}
    735  1.1       ws }
    736  1.1       ws 
    737  1.1       ws #ifdef LOCKF_DEBUG
    738  1.1       ws /*
    739  1.1       ws  * Print out a lock.
    740  1.1       ws  */
    741  1.4  mycroft void
    742  1.1       ws lf_print(tag, lock)
    743  1.1       ws 	char *tag;
    744  1.1       ws 	register struct lockf *lock;
    745  1.1       ws {
    746  1.1       ws 
    747  1.1       ws 	printf("%s: lock 0x%lx for ", tag, lock);
    748  1.1       ws 	if (lock->lf_flags & F_POSIX)
    749  1.1       ws 		printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid);
    750  1.1       ws 	else
    751  1.1       ws 		printf("id 0x%x", lock->lf_id);
    752  1.1       ws 	printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d",
    753  1.1       ws 		lock->lf_inode->i_number,
    754  1.1       ws 		major(lock->lf_inode->i_dev),
    755  1.1       ws 		minor(lock->lf_inode->i_dev),
    756  1.1       ws 		lock->lf_type == F_RDLCK ? "shared" :
    757  1.1       ws 		lock->lf_type == F_WRLCK ? "exclusive" :
    758  1.1       ws 		lock->lf_type == F_UNLCK ? "unlock" :
    759  1.1       ws 		"unknown", lock->lf_start, lock->lf_end);
    760  1.1       ws 	if (lock->lf_block)
    761  1.1       ws 		printf(" block 0x%x\n", lock->lf_block);
    762  1.1       ws 	else
    763  1.1       ws 		printf("\n");
    764  1.1       ws }
    765  1.1       ws 
    766  1.4  mycroft void
    767  1.1       ws lf_printlist(tag, lock)
    768  1.1       ws 	char *tag;
    769  1.1       ws 	struct lockf *lock;
    770  1.1       ws {
    771  1.1       ws 	register struct lockf *lf;
    772  1.1       ws 
    773  1.1       ws 	printf("%s: Lock list for ino %d on dev <%d, %d>:\n",
    774  1.1       ws 		tag, lock->lf_inode->i_number,
    775  1.1       ws 		major(lock->lf_inode->i_dev),
    776  1.1       ws 		minor(lock->lf_inode->i_dev));
    777  1.1       ws 	for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) {
    778  1.1       ws 		printf("\tlock 0x%lx for ", lf);
    779  1.1       ws 		if (lf->lf_flags & F_POSIX)
    780  1.1       ws 			printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid);
    781  1.1       ws 		else
    782  1.1       ws 			printf("id 0x%x", lf->lf_id);
    783  1.1       ws 		printf(", %s, start %d, end %d",
    784  1.1       ws 			lf->lf_type == F_RDLCK ? "shared" :
    785  1.1       ws 			lf->lf_type == F_WRLCK ? "exclusive" :
    786  1.1       ws 			lf->lf_type == F_UNLCK ? "unlock" :
    787  1.1       ws 			"unknown", lf->lf_start, lf->lf_end);
    788  1.1       ws 		if (lf->lf_block)
    789  1.1       ws 			printf(" block 0x%x\n", lf->lf_block);
    790  1.1       ws 		else
    791  1.1       ws 			printf("\n");
    792  1.1       ws 	}
    793  1.1       ws }
    794  1.1       ws #endif /* LOCKF_DEBUG */
    795