Home | History | Annotate | Line # | Download | only in kern
kern_lock.c revision 1.16.2.1
      1  1.16.2.1     perry /*	$NetBSD: kern_lock.c,v 1.16.2.1 1999/05/04 17:05:42 perry Exp $	*/
      2       1.2      fvdl 
      3       1.1      fvdl /*
      4       1.1      fvdl  * Copyright (c) 1995
      5       1.1      fvdl  *	The Regents of the University of California.  All rights reserved.
      6       1.1      fvdl  *
      7       1.1      fvdl  * This code contains ideas from software contributed to Berkeley by
      8       1.1      fvdl  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
      9       1.1      fvdl  * System project at Carnegie-Mellon University.
     10       1.1      fvdl  *
     11       1.1      fvdl  * Redistribution and use in source and binary forms, with or without
     12       1.1      fvdl  * modification, are permitted provided that the following conditions
     13       1.1      fvdl  * are met:
     14       1.1      fvdl  * 1. Redistributions of source code must retain the above copyright
     15       1.1      fvdl  *    notice, this list of conditions and the following disclaimer.
     16       1.1      fvdl  * 2. Redistributions in binary form must reproduce the above copyright
     17       1.1      fvdl  *    notice, this list of conditions and the following disclaimer in the
     18       1.1      fvdl  *    documentation and/or other materials provided with the distribution.
     19       1.1      fvdl  * 3. All advertising materials mentioning features or use of this software
     20       1.1      fvdl  *    must display the following acknowledgement:
     21       1.1      fvdl  *	This product includes software developed by the University of
     22       1.1      fvdl  *	California, Berkeley and its contributors.
     23       1.1      fvdl  * 4. Neither the name of the University nor the names of its contributors
     24       1.1      fvdl  *    may be used to endorse or promote products derived from this software
     25       1.1      fvdl  *    without specific prior written permission.
     26       1.1      fvdl  *
     27       1.1      fvdl  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     28       1.1      fvdl  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     29       1.1      fvdl  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     30       1.1      fvdl  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     31       1.1      fvdl  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     32       1.1      fvdl  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     33       1.1      fvdl  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     34       1.1      fvdl  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     35       1.1      fvdl  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     36       1.1      fvdl  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     37       1.1      fvdl  * SUCH DAMAGE.
     38       1.1      fvdl  *
     39       1.1      fvdl  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
     40       1.1      fvdl  */
     41       1.7   thorpej 
     42       1.7   thorpej #include "opt_lockdebug.h"
     43       1.1      fvdl 
     44       1.1      fvdl #include <sys/param.h>
     45       1.1      fvdl #include <sys/proc.h>
     46       1.1      fvdl #include <sys/lock.h>
     47       1.2      fvdl #include <sys/systm.h>
     48       1.1      fvdl #include <machine/cpu.h>
     49       1.1      fvdl 
     50       1.1      fvdl /*
     51       1.1      fvdl  * Locking primitives implementation.
     52       1.1      fvdl  * Locks provide shared/exclusive sychronization.
     53       1.1      fvdl  */
     54       1.1      fvdl 
     55  1.16.2.1     perry #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
     56       1.1      fvdl #define COUNT(p, x) if (p) (p)->p_locks += (x)
     57       1.1      fvdl #else
     58       1.1      fvdl #define COUNT(p, x)
     59       1.1      fvdl #endif
     60       1.1      fvdl 
     61      1.11        pk #if 0 /*#was defined(MULTIPROCESSOR)*/
     62      1.11        pk /*-
     63       1.1      fvdl 
     64      1.11        pk This macro is Bad Style and it doesn't work either... [pk, 10-14-1998]
     65      1.11        pk 
     66      1.11        pk -*
     67       1.1      fvdl  * For multiprocessor system, try spin lock first.
     68       1.1      fvdl  *
     69       1.1      fvdl  * This should be inline expanded below, but we cannot have #if
     70       1.1      fvdl  * inside a multiline define.
     71       1.1      fvdl  */
     72      1.11        pk 
     73       1.1      fvdl int lock_wait_time = 100;
     74       1.1      fvdl #define PAUSE(lkp, wanted)						\
     75       1.1      fvdl 		if (lock_wait_time > 0) {				\
     76       1.1      fvdl 			int i;						\
     77       1.1      fvdl 									\
     78       1.1      fvdl 			simple_unlock(&lkp->lk_interlock);		\
     79       1.1      fvdl 			for (i = lock_wait_time; i > 0; i--)		\
     80       1.1      fvdl 				if (!(wanted))				\
     81       1.1      fvdl 					break;				\
     82       1.1      fvdl 			simple_lock(&lkp->lk_interlock);		\
     83       1.1      fvdl 		}							\
     84       1.1      fvdl 		if (!(wanted))						\
     85       1.1      fvdl 			break;
     86       1.1      fvdl 
     87       1.9   thorpej #else /* ! MULTIPROCESSOR */
     88       1.1      fvdl 
     89       1.1      fvdl /*
     90       1.1      fvdl  * It is an error to spin on a uniprocessor as nothing will ever cause
     91       1.1      fvdl  * the simple lock to clear while we are executing.
     92       1.1      fvdl  */
     93       1.1      fvdl #define PAUSE(lkp, wanted)
     94       1.1      fvdl 
     95       1.9   thorpej #endif /* MULTIPROCESSOR */
     96       1.1      fvdl 
     97       1.1      fvdl /*
     98       1.1      fvdl  * Acquire a resource.
     99       1.1      fvdl  */
    100       1.1      fvdl #define ACQUIRE(lkp, error, extflags, wanted)				\
    101       1.1      fvdl 	PAUSE(lkp, wanted);						\
    102       1.1      fvdl 	for (error = 0; wanted; ) {					\
    103       1.1      fvdl 		(lkp)->lk_waitcount++;					\
    104       1.1      fvdl 		simple_unlock(&(lkp)->lk_interlock);			\
    105       1.1      fvdl 		error = tsleep((void *)lkp, (lkp)->lk_prio,		\
    106       1.1      fvdl 		    (lkp)->lk_wmesg, (lkp)->lk_timo);			\
    107       1.1      fvdl 		simple_lock(&(lkp)->lk_interlock);			\
    108       1.1      fvdl 		(lkp)->lk_waitcount--;					\
    109       1.1      fvdl 		if (error)						\
    110       1.1      fvdl 			break;						\
    111       1.1      fvdl 		if ((extflags) & LK_SLEEPFAIL) {			\
    112       1.1      fvdl 			error = ENOLCK;					\
    113       1.1      fvdl 			break;						\
    114       1.1      fvdl 		}							\
    115       1.1      fvdl 	}
    116       1.1      fvdl 
    117       1.1      fvdl /*
    118       1.1      fvdl  * Initialize a lock; required before use.
    119       1.1      fvdl  */
    120       1.1      fvdl void
    121       1.1      fvdl lockinit(lkp, prio, wmesg, timo, flags)
    122       1.1      fvdl 	struct lock *lkp;
    123       1.1      fvdl 	int prio;
    124       1.4   mycroft 	const char *wmesg;
    125       1.1      fvdl 	int timo;
    126       1.1      fvdl 	int flags;
    127       1.1      fvdl {
    128       1.1      fvdl 
    129       1.8     perry 	memset(lkp, 0, sizeof(struct lock));
    130       1.1      fvdl 	simple_lock_init(&lkp->lk_interlock);
    131       1.1      fvdl 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
    132       1.1      fvdl 	lkp->lk_prio = prio;
    133       1.1      fvdl 	lkp->lk_timo = timo;
    134       1.1      fvdl 	lkp->lk_wmesg = wmesg;
    135       1.1      fvdl 	lkp->lk_lockholder = LK_NOPROC;
    136       1.1      fvdl }
    137       1.1      fvdl 
    138       1.1      fvdl /*
    139       1.1      fvdl  * Determine the status of a lock.
    140       1.1      fvdl  */
    141       1.1      fvdl int
    142       1.1      fvdl lockstatus(lkp)
    143       1.1      fvdl 	struct lock *lkp;
    144       1.1      fvdl {
    145       1.1      fvdl 	int lock_type = 0;
    146       1.1      fvdl 
    147       1.1      fvdl 	simple_lock(&lkp->lk_interlock);
    148       1.1      fvdl 	if (lkp->lk_exclusivecount != 0)
    149       1.1      fvdl 		lock_type = LK_EXCLUSIVE;
    150       1.1      fvdl 	else if (lkp->lk_sharecount != 0)
    151       1.1      fvdl 		lock_type = LK_SHARED;
    152       1.1      fvdl 	simple_unlock(&lkp->lk_interlock);
    153       1.1      fvdl 	return (lock_type);
    154       1.1      fvdl }
    155       1.1      fvdl 
    156       1.1      fvdl /*
    157       1.1      fvdl  * Set, change, or release a lock.
    158       1.1      fvdl  *
    159       1.1      fvdl  * Shared requests increment the shared count. Exclusive requests set the
    160       1.1      fvdl  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
    161       1.1      fvdl  * accepted shared locks and shared-to-exclusive upgrades to go away.
    162       1.1      fvdl  */
    163       1.1      fvdl int
    164       1.6      fvdl lockmgr(lkp, flags, interlkp)
    165       1.1      fvdl 	__volatile struct lock *lkp;
    166       1.1      fvdl 	u_int flags;
    167       1.1      fvdl 	struct simplelock *interlkp;
    168       1.1      fvdl {
    169       1.1      fvdl 	int error;
    170       1.1      fvdl 	pid_t pid;
    171       1.1      fvdl 	int extflags;
    172       1.6      fvdl 	struct proc *p = curproc;
    173       1.1      fvdl 
    174       1.1      fvdl 	error = 0;
    175       1.1      fvdl 	if (p)
    176       1.1      fvdl 		pid = p->p_pid;
    177       1.1      fvdl 	else
    178       1.1      fvdl 		pid = LK_KERNPROC;
    179       1.1      fvdl 	simple_lock(&lkp->lk_interlock);
    180       1.1      fvdl 	if (flags & LK_INTERLOCK)
    181       1.1      fvdl 		simple_unlock(interlkp);
    182       1.1      fvdl 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
    183       1.1      fvdl #ifdef DIAGNOSTIC
    184       1.1      fvdl 	/*
    185       1.1      fvdl 	 * Once a lock has drained, the LK_DRAINING flag is set and an
    186       1.1      fvdl 	 * exclusive lock is returned. The only valid operation thereafter
    187       1.1      fvdl 	 * is a single release of that exclusive lock. This final release
    188       1.1      fvdl 	 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
    189       1.1      fvdl 	 * further requests of any sort will result in a panic. The bits
    190       1.1      fvdl 	 * selected for these two flags are chosen so that they will be set
    191       1.1      fvdl 	 * in memory that is freed (freed memory is filled with 0xdeadbeef).
    192       1.1      fvdl 	 * The final release is permitted to give a new lease on life to
    193       1.1      fvdl 	 * the lock by specifying LK_REENABLE.
    194       1.1      fvdl 	 */
    195       1.1      fvdl 	if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
    196       1.1      fvdl 		if (lkp->lk_flags & LK_DRAINED)
    197       1.1      fvdl 			panic("lockmgr: using decommissioned lock");
    198       1.1      fvdl 		if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
    199       1.1      fvdl 		    lkp->lk_lockholder != pid)
    200       1.1      fvdl 			panic("lockmgr: non-release on draining lock: %d\n",
    201       1.1      fvdl 			    flags & LK_TYPE_MASK);
    202       1.1      fvdl 		lkp->lk_flags &= ~LK_DRAINING;
    203       1.1      fvdl 		if ((flags & LK_REENABLE) == 0)
    204       1.1      fvdl 			lkp->lk_flags |= LK_DRAINED;
    205       1.1      fvdl 	}
    206       1.1      fvdl #endif DIAGNOSTIC
    207       1.1      fvdl 
    208       1.1      fvdl 	switch (flags & LK_TYPE_MASK) {
    209       1.1      fvdl 
    210       1.1      fvdl 	case LK_SHARED:
    211       1.1      fvdl 		if (lkp->lk_lockholder != pid) {
    212       1.1      fvdl 			/*
    213       1.1      fvdl 			 * If just polling, check to see if we will block.
    214       1.1      fvdl 			 */
    215       1.1      fvdl 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
    216       1.1      fvdl 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
    217       1.1      fvdl 				error = EBUSY;
    218       1.1      fvdl 				break;
    219       1.1      fvdl 			}
    220       1.1      fvdl 			/*
    221       1.1      fvdl 			 * Wait for exclusive locks and upgrades to clear.
    222       1.1      fvdl 			 */
    223       1.1      fvdl 			ACQUIRE(lkp, error, extflags, lkp->lk_flags &
    224       1.1      fvdl 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
    225       1.1      fvdl 			if (error)
    226       1.1      fvdl 				break;
    227       1.1      fvdl 			lkp->lk_sharecount++;
    228       1.1      fvdl 			COUNT(p, 1);
    229       1.1      fvdl 			break;
    230       1.1      fvdl 		}
    231       1.1      fvdl 		/*
    232       1.1      fvdl 		 * We hold an exclusive lock, so downgrade it to shared.
    233       1.1      fvdl 		 * An alternative would be to fail with EDEADLK.
    234       1.1      fvdl 		 */
    235       1.1      fvdl 		lkp->lk_sharecount++;
    236       1.1      fvdl 		COUNT(p, 1);
    237       1.1      fvdl 		/* fall into downgrade */
    238       1.1      fvdl 
    239       1.1      fvdl 	case LK_DOWNGRADE:
    240       1.1      fvdl 		if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
    241       1.1      fvdl 			panic("lockmgr: not holding exclusive lock");
    242       1.1      fvdl 		lkp->lk_sharecount += lkp->lk_exclusivecount;
    243       1.1      fvdl 		lkp->lk_exclusivecount = 0;
    244      1.15      fvdl 		lkp->lk_recurselevel = 0;
    245       1.1      fvdl 		lkp->lk_flags &= ~LK_HAVE_EXCL;
    246       1.1      fvdl 		lkp->lk_lockholder = LK_NOPROC;
    247       1.1      fvdl 		if (lkp->lk_waitcount)
    248       1.1      fvdl 			wakeup((void *)lkp);
    249       1.1      fvdl 		break;
    250       1.1      fvdl 
    251       1.1      fvdl 	case LK_EXCLUPGRADE:
    252       1.1      fvdl 		/*
    253       1.1      fvdl 		 * If another process is ahead of us to get an upgrade,
    254       1.1      fvdl 		 * then we want to fail rather than have an intervening
    255       1.1      fvdl 		 * exclusive access.
    256       1.1      fvdl 		 */
    257       1.1      fvdl 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
    258       1.1      fvdl 			lkp->lk_sharecount--;
    259       1.1      fvdl 			COUNT(p, -1);
    260       1.1      fvdl 			error = EBUSY;
    261       1.1      fvdl 			break;
    262       1.1      fvdl 		}
    263       1.1      fvdl 		/* fall into normal upgrade */
    264       1.1      fvdl 
    265       1.1      fvdl 	case LK_UPGRADE:
    266       1.1      fvdl 		/*
    267       1.1      fvdl 		 * Upgrade a shared lock to an exclusive one. If another
    268       1.1      fvdl 		 * shared lock has already requested an upgrade to an
    269       1.1      fvdl 		 * exclusive lock, our shared lock is released and an
    270       1.1      fvdl 		 * exclusive lock is requested (which will be granted
    271       1.1      fvdl 		 * after the upgrade). If we return an error, the file
    272       1.1      fvdl 		 * will always be unlocked.
    273       1.1      fvdl 		 */
    274       1.1      fvdl 		if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
    275       1.1      fvdl 			panic("lockmgr: upgrade exclusive lock");
    276       1.1      fvdl 		lkp->lk_sharecount--;
    277       1.1      fvdl 		COUNT(p, -1);
    278       1.1      fvdl 		/*
    279       1.1      fvdl 		 * If we are just polling, check to see if we will block.
    280       1.1      fvdl 		 */
    281       1.1      fvdl 		if ((extflags & LK_NOWAIT) &&
    282       1.1      fvdl 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
    283       1.1      fvdl 		     lkp->lk_sharecount > 1)) {
    284       1.1      fvdl 			error = EBUSY;
    285       1.1      fvdl 			break;
    286       1.1      fvdl 		}
    287       1.1      fvdl 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
    288       1.1      fvdl 			/*
    289       1.1      fvdl 			 * We are first shared lock to request an upgrade, so
    290       1.1      fvdl 			 * request upgrade and wait for the shared count to
    291       1.1      fvdl 			 * drop to zero, then take exclusive lock.
    292       1.1      fvdl 			 */
    293       1.1      fvdl 			lkp->lk_flags |= LK_WANT_UPGRADE;
    294       1.1      fvdl 			ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
    295       1.1      fvdl 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
    296       1.1      fvdl 			if (error)
    297       1.1      fvdl 				break;
    298       1.1      fvdl 			lkp->lk_flags |= LK_HAVE_EXCL;
    299       1.1      fvdl 			lkp->lk_lockholder = pid;
    300       1.1      fvdl 			if (lkp->lk_exclusivecount != 0)
    301       1.1      fvdl 				panic("lockmgr: non-zero exclusive count");
    302       1.1      fvdl 			lkp->lk_exclusivecount = 1;
    303      1.15      fvdl 			if (extflags & LK_SETRECURSE)
    304      1.15      fvdl 				lkp->lk_recurselevel = 1;
    305       1.1      fvdl 			COUNT(p, 1);
    306       1.1      fvdl 			break;
    307       1.1      fvdl 		}
    308       1.1      fvdl 		/*
    309       1.1      fvdl 		 * Someone else has requested upgrade. Release our shared
    310       1.1      fvdl 		 * lock, awaken upgrade requestor if we are the last shared
    311       1.1      fvdl 		 * lock, then request an exclusive lock.
    312       1.1      fvdl 		 */
    313       1.1      fvdl 		if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
    314       1.1      fvdl 			wakeup((void *)lkp);
    315       1.1      fvdl 		/* fall into exclusive request */
    316       1.1      fvdl 
    317       1.1      fvdl 	case LK_EXCLUSIVE:
    318       1.1      fvdl 		if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
    319       1.1      fvdl 			/*
    320       1.1      fvdl 			 *	Recursive lock.
    321       1.1      fvdl 			 */
    322      1.15      fvdl 			if ((extflags & LK_CANRECURSE) == 0 &&
    323      1.16  sommerfe 			     lkp->lk_recurselevel == 0) {
    324      1.16  sommerfe 				if (extflags & LK_RECURSEFAIL) {
    325      1.16  sommerfe 					error = EDEADLK;
    326      1.16  sommerfe 					break;
    327      1.16  sommerfe 				} else
    328      1.16  sommerfe 					panic("lockmgr: locking against myself");
    329      1.16  sommerfe 			}
    330       1.1      fvdl 			lkp->lk_exclusivecount++;
    331      1.15      fvdl 			if (extflags & LK_SETRECURSE &&
    332      1.15      fvdl 			    lkp->lk_recurselevel == 0)
    333      1.15      fvdl 				lkp->lk_recurselevel = lkp->lk_exclusivecount;
    334       1.1      fvdl 			COUNT(p, 1);
    335       1.1      fvdl 			break;
    336       1.1      fvdl 		}
    337       1.1      fvdl 		/*
    338       1.1      fvdl 		 * If we are just polling, check to see if we will sleep.
    339       1.1      fvdl 		 */
    340       1.1      fvdl 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
    341       1.1      fvdl 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
    342       1.1      fvdl 		     lkp->lk_sharecount != 0)) {
    343       1.1      fvdl 			error = EBUSY;
    344       1.1      fvdl 			break;
    345       1.1      fvdl 		}
    346       1.1      fvdl 		/*
    347       1.1      fvdl 		 * Try to acquire the want_exclusive flag.
    348       1.1      fvdl 		 */
    349       1.1      fvdl 		ACQUIRE(lkp, error, extflags, lkp->lk_flags &
    350       1.1      fvdl 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
    351       1.1      fvdl 		if (error)
    352       1.1      fvdl 			break;
    353       1.1      fvdl 		lkp->lk_flags |= LK_WANT_EXCL;
    354       1.1      fvdl 		/*
    355       1.1      fvdl 		 * Wait for shared locks and upgrades to finish.
    356       1.1      fvdl 		 */
    357       1.1      fvdl 		ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
    358       1.1      fvdl 		       (lkp->lk_flags & LK_WANT_UPGRADE));
    359       1.1      fvdl 		lkp->lk_flags &= ~LK_WANT_EXCL;
    360       1.1      fvdl 		if (error)
    361       1.1      fvdl 			break;
    362       1.1      fvdl 		lkp->lk_flags |= LK_HAVE_EXCL;
    363       1.1      fvdl 		lkp->lk_lockholder = pid;
    364       1.1      fvdl 		if (lkp->lk_exclusivecount != 0)
    365       1.1      fvdl 			panic("lockmgr: non-zero exclusive count");
    366       1.1      fvdl 		lkp->lk_exclusivecount = 1;
    367      1.15      fvdl 		if (extflags & LK_SETRECURSE)
    368      1.15      fvdl 			lkp->lk_recurselevel = 1;
    369       1.1      fvdl 		COUNT(p, 1);
    370       1.1      fvdl 		break;
    371       1.1      fvdl 
    372       1.1      fvdl 	case LK_RELEASE:
    373       1.1      fvdl 		if (lkp->lk_exclusivecount != 0) {
    374       1.1      fvdl 			if (pid != lkp->lk_lockholder)
    375      1.13    bouyer 				panic("lockmgr: pid %d, not exclusive lock "
    376      1.13    bouyer 				    "holder %d unlocking", pid,
    377       1.1      fvdl 				    lkp->lk_lockholder);
    378      1.15      fvdl 			if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
    379      1.15      fvdl 				lkp->lk_recurselevel = 0;
    380       1.1      fvdl 			lkp->lk_exclusivecount--;
    381       1.1      fvdl 			COUNT(p, -1);
    382       1.1      fvdl 			if (lkp->lk_exclusivecount == 0) {
    383       1.1      fvdl 				lkp->lk_flags &= ~LK_HAVE_EXCL;
    384       1.1      fvdl 				lkp->lk_lockholder = LK_NOPROC;
    385       1.1      fvdl 			}
    386       1.1      fvdl 		} else if (lkp->lk_sharecount != 0) {
    387       1.1      fvdl 			lkp->lk_sharecount--;
    388       1.1      fvdl 			COUNT(p, -1);
    389       1.1      fvdl 		}
    390       1.1      fvdl 		if (lkp->lk_waitcount)
    391       1.1      fvdl 			wakeup((void *)lkp);
    392       1.1      fvdl 		break;
    393       1.1      fvdl 
    394       1.1      fvdl 	case LK_DRAIN:
    395       1.1      fvdl 		/*
    396       1.1      fvdl 		 * Check that we do not already hold the lock, as it can
    397       1.1      fvdl 		 * never drain if we do. Unfortunately, we have no way to
    398       1.1      fvdl 		 * check for holding a shared lock, but at least we can
    399       1.1      fvdl 		 * check for an exclusive one.
    400       1.1      fvdl 		 */
    401       1.1      fvdl 		if (lkp->lk_lockholder == pid)
    402       1.1      fvdl 			panic("lockmgr: draining against myself");
    403       1.1      fvdl 		/*
    404       1.1      fvdl 		 * If we are just polling, check to see if we will sleep.
    405       1.1      fvdl 		 */
    406       1.1      fvdl 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
    407       1.1      fvdl 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
    408       1.1      fvdl 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
    409       1.1      fvdl 			error = EBUSY;
    410       1.1      fvdl 			break;
    411       1.1      fvdl 		}
    412       1.1      fvdl 		PAUSE(lkp, ((lkp->lk_flags &
    413       1.1      fvdl 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
    414       1.1      fvdl 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
    415       1.1      fvdl 		for (error = 0; ((lkp->lk_flags &
    416       1.1      fvdl 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
    417       1.1      fvdl 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
    418       1.1      fvdl 			lkp->lk_flags |= LK_WAITDRAIN;
    419       1.1      fvdl 			simple_unlock(&lkp->lk_interlock);
    420       1.2      fvdl 			if ((error = tsleep((void *)&lkp->lk_flags,
    421       1.2      fvdl 			    lkp->lk_prio, lkp->lk_wmesg, lkp->lk_timo)))
    422       1.1      fvdl 				return (error);
    423       1.1      fvdl 			if ((extflags) & LK_SLEEPFAIL)
    424       1.1      fvdl 				return (ENOLCK);
    425       1.1      fvdl 			simple_lock(&lkp->lk_interlock);
    426       1.1      fvdl 		}
    427       1.1      fvdl 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
    428       1.1      fvdl 		lkp->lk_lockholder = pid;
    429       1.1      fvdl 		lkp->lk_exclusivecount = 1;
    430      1.15      fvdl 		/* XXX unlikely that we'd want this */
    431      1.15      fvdl 		if (extflags & LK_SETRECURSE)
    432      1.15      fvdl 			lkp->lk_recurselevel = 1;
    433       1.1      fvdl 		COUNT(p, 1);
    434       1.1      fvdl 		break;
    435       1.1      fvdl 
    436       1.1      fvdl 	default:
    437       1.1      fvdl 		simple_unlock(&lkp->lk_interlock);
    438       1.1      fvdl 		panic("lockmgr: unknown locktype request %d",
    439       1.1      fvdl 		    flags & LK_TYPE_MASK);
    440       1.1      fvdl 		/* NOTREACHED */
    441       1.1      fvdl 	}
    442       1.1      fvdl 	if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
    443       1.1      fvdl 	     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
    444       1.1      fvdl 	     lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
    445       1.1      fvdl 		lkp->lk_flags &= ~LK_WAITDRAIN;
    446       1.1      fvdl 		wakeup((void *)&lkp->lk_flags);
    447       1.1      fvdl 	}
    448       1.1      fvdl 	simple_unlock(&lkp->lk_interlock);
    449       1.1      fvdl 	return (error);
    450       1.1      fvdl }
    451       1.1      fvdl 
    452       1.1      fvdl /*
    453       1.1      fvdl  * Print out information about state of a lock. Used by VOP_PRINT
    454       1.1      fvdl  * routines to display ststus about contained locks.
    455       1.1      fvdl  */
    456       1.2      fvdl void
    457       1.1      fvdl lockmgr_printinfo(lkp)
    458       1.1      fvdl 	struct lock *lkp;
    459       1.1      fvdl {
    460       1.1      fvdl 
    461       1.1      fvdl 	if (lkp->lk_sharecount)
    462       1.1      fvdl 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
    463       1.1      fvdl 		    lkp->lk_sharecount);
    464       1.1      fvdl 	else if (lkp->lk_flags & LK_HAVE_EXCL)
    465       1.1      fvdl 		printf(" lock type %s: EXCL (count %d) by pid %d",
    466       1.1      fvdl 		    lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
    467       1.1      fvdl 	if (lkp->lk_waitcount > 0)
    468       1.1      fvdl 		printf(" with %d pending", lkp->lk_waitcount);
    469       1.1      fvdl }
    470       1.1      fvdl 
    471       1.9   thorpej #if defined(LOCKDEBUG) && !defined(MULTIPROCESSOR)
    472       1.1      fvdl #include <sys/kernel.h>
    473       1.1      fvdl #include <vm/vm.h>
    474       1.1      fvdl #include <sys/sysctl.h>
    475       1.1      fvdl int lockpausetime = 0;
    476       1.1      fvdl struct ctldebug debug2 = { "lockpausetime", &lockpausetime };
    477       1.1      fvdl int simplelockrecurse;
    478      1.12       chs LIST_HEAD(slocklist, simplelock) slockdebuglist;
    479      1.12       chs 
    480       1.1      fvdl /*
    481       1.1      fvdl  * Simple lock functions so that the debugger can see from whence
    482       1.1      fvdl  * they are being called.
    483       1.1      fvdl  */
    484       1.1      fvdl void
    485       1.1      fvdl simple_lock_init(alp)
    486       1.1      fvdl 	struct simplelock *alp;
    487       1.1      fvdl {
    488       1.1      fvdl 	alp->lock_data = 0;
    489       1.5       chs 	alp->lock_file = NULL;
    490       1.5       chs 	alp->lock_line = 0;
    491       1.5       chs 	alp->unlock_file = NULL;
    492       1.5       chs 	alp->unlock_line = 0;
    493      1.10   thorpej 	alp->lock_holder = 0;
    494       1.1      fvdl }
    495       1.1      fvdl 
    496       1.1      fvdl void
    497       1.1      fvdl _simple_lock(alp, id, l)
    498       1.1      fvdl 	__volatile struct simplelock *alp;
    499       1.1      fvdl 	const char *id;
    500       1.1      fvdl 	int l;
    501       1.1      fvdl {
    502      1.12       chs 	int s;
    503      1.12       chs 
    504       1.1      fvdl 	if (simplelockrecurse)
    505       1.1      fvdl 		return;
    506       1.1      fvdl 	if (alp->lock_data == 1) {
    507       1.5       chs 		printf("simple_lock: lock held\n");
    508       1.5       chs 		printf("currently at: %s:%d\n", id, l);
    509       1.5       chs 		printf("last locked: %s:%d\n",
    510       1.5       chs 		       alp->lock_file, alp->lock_line);
    511       1.5       chs 		printf("last unlocked: %s:%d\n",
    512       1.5       chs 		       alp->unlock_file, alp->unlock_line);
    513       1.1      fvdl 		if (lockpausetime == -1)
    514       1.5       chs 			panic("simple_lock: lock held");
    515       1.1      fvdl 		if (lockpausetime == 1) {
    516       1.2      fvdl #ifdef BACKTRACE
    517       1.1      fvdl 			BACKTRACE(curproc);
    518       1.2      fvdl #endif
    519       1.1      fvdl 		} else if (lockpausetime > 1) {
    520       1.5       chs 			printf("simple_lock: lock held, pausing...");
    521       1.1      fvdl 			tsleep(&lockpausetime, PCATCH | PPAUSE, "slock",
    522       1.1      fvdl 			    lockpausetime * hz);
    523       1.1      fvdl 			printf(" continuing\n");
    524       1.1      fvdl 		}
    525      1.12       chs 		return;
    526       1.1      fvdl 	}
    527      1.12       chs 
    528      1.12       chs 	s = splhigh();
    529      1.12       chs 	LIST_INSERT_HEAD(&slockdebuglist, (struct simplelock *)alp, list);
    530      1.12       chs 	splx(s);
    531      1.12       chs 
    532       1.1      fvdl 	alp->lock_data = 1;
    533       1.5       chs 	alp->lock_file = id;
    534       1.5       chs 	alp->lock_line = l;
    535       1.1      fvdl 	if (curproc)
    536       1.1      fvdl 		curproc->p_simple_locks++;
    537       1.1      fvdl }
    538       1.1      fvdl 
    539       1.1      fvdl int
    540       1.1      fvdl _simple_lock_try(alp, id, l)
    541       1.1      fvdl 	__volatile struct simplelock *alp;
    542       1.1      fvdl 	const char *id;
    543       1.1      fvdl 	int l;
    544       1.1      fvdl {
    545      1.12       chs 	int s;
    546       1.1      fvdl 
    547       1.1      fvdl 	if (alp->lock_data)
    548       1.1      fvdl 		return (0);
    549       1.1      fvdl 	if (simplelockrecurse)
    550       1.1      fvdl 		return (1);
    551       1.1      fvdl 	alp->lock_data = 1;
    552       1.5       chs 	alp->lock_file = id;
    553       1.5       chs 	alp->lock_line = l;
    554      1.12       chs 
    555      1.12       chs 	s = splhigh();
    556      1.12       chs 	LIST_INSERT_HEAD(&slockdebuglist, (struct simplelock *)alp, list);
    557      1.12       chs 	splx(s);
    558      1.12       chs 
    559       1.1      fvdl 	if (curproc)
    560       1.1      fvdl 		curproc->p_simple_locks++;
    561       1.1      fvdl 	return (1);
    562       1.1      fvdl }
    563       1.1      fvdl 
    564       1.1      fvdl void
    565       1.1      fvdl _simple_unlock(alp, id, l)
    566       1.1      fvdl 	__volatile struct simplelock *alp;
    567       1.1      fvdl 	const char *id;
    568       1.1      fvdl 	int l;
    569       1.1      fvdl {
    570      1.12       chs 	int s;
    571       1.1      fvdl 
    572       1.1      fvdl 	if (simplelockrecurse)
    573       1.1      fvdl 		return;
    574       1.1      fvdl 	if (alp->lock_data == 0) {
    575       1.5       chs 		printf("simple_unlock: lock not held\n");
    576       1.5       chs 		printf("currently at: %s:%d\n", id, l);
    577       1.5       chs 		printf("last locked: %s:%d\n",
    578       1.5       chs 		       alp->lock_file, alp->lock_line);
    579       1.5       chs 		printf("last unlocked: %s:%d\n",
    580       1.5       chs 		       alp->unlock_file, alp->unlock_line);
    581       1.1      fvdl 		if (lockpausetime == -1)
    582       1.5       chs 			panic("simple_unlock: lock not held");
    583       1.1      fvdl 		if (lockpausetime == 1) {
    584       1.2      fvdl #ifdef BACKTRACE
    585       1.1      fvdl 			BACKTRACE(curproc);
    586       1.2      fvdl #endif
    587       1.1      fvdl 		} else if (lockpausetime > 1) {
    588       1.5       chs 			printf("simple_unlock: lock not held, pausing...");
    589       1.1      fvdl 			tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock",
    590       1.1      fvdl 			    lockpausetime * hz);
    591       1.1      fvdl 			printf(" continuing\n");
    592       1.1      fvdl 		}
    593      1.12       chs 		return;
    594       1.1      fvdl 	}
    595      1.12       chs 
    596      1.12       chs 	s = splhigh();
    597      1.12       chs 	LIST_REMOVE(alp, list);
    598      1.12       chs 	alp->list.le_next = NULL;
    599      1.12       chs 	alp->list.le_prev = NULL;
    600      1.12       chs 	splx(s);
    601      1.12       chs 
    602       1.1      fvdl 	alp->lock_data = 0;
    603       1.5       chs 	alp->unlock_file = id;
    604       1.5       chs 	alp->unlock_line = l;
    605       1.1      fvdl 	if (curproc)
    606       1.1      fvdl 		curproc->p_simple_locks--;
    607      1.12       chs }
    608      1.12       chs 
    609      1.12       chs void
    610      1.12       chs simple_lock_dump()
    611      1.12       chs {
    612      1.12       chs 	struct simplelock *alp;
    613      1.12       chs 	int s;
    614      1.12       chs 
    615      1.12       chs 	s = splhigh();
    616      1.12       chs 	printf("all simple locks:\n");
    617      1.12       chs 	for (alp = LIST_FIRST(&slockdebuglist);
    618      1.12       chs 	     alp != NULL;
    619      1.12       chs 	     alp = LIST_NEXT(alp, list)) {
    620      1.12       chs 		printf("%p  %s:%d\n", alp, alp->lock_file, alp->lock_line);
    621      1.12       chs 	}
    622      1.12       chs 	splx(s);
    623      1.12       chs }
    624      1.12       chs 
    625      1.12       chs void
    626      1.12       chs simple_lock_freecheck(start, end)
    627      1.12       chs void *start, *end;
    628      1.12       chs {
    629      1.12       chs 	struct simplelock *alp;
    630      1.12       chs 	int s;
    631      1.12       chs 
    632      1.12       chs 	s = splhigh();
    633      1.12       chs 	for (alp = LIST_FIRST(&slockdebuglist);
    634      1.12       chs 	     alp != NULL;
    635      1.12       chs 	     alp = LIST_NEXT(alp, list)) {
    636      1.12       chs 		if ((void *)alp >= start && (void *)alp < end) {
    637      1.14       chs 			printf("freeing simple_lock %p %s:%d\n",
    638      1.14       chs 			       alp, alp->lock_file, alp->lock_line);
    639      1.12       chs #ifdef DDB
    640      1.12       chs 			Debugger();
    641      1.12       chs #endif
    642      1.12       chs 		}
    643      1.12       chs 	}
    644      1.12       chs 	splx(s);
    645       1.1      fvdl }
    646       1.9   thorpej #endif /* LOCKDEBUG && ! MULTIPROCESSOR */
    647