Home | History | Annotate | Line # | Download | only in kern
kern_lock.c revision 1.16.2.1.2.2
      1  1.16.2.1.2.2       chs /*	$NetBSD: kern_lock.c,v 1.16.2.1.2.2 1999/07/04 01:35:32 chs Exp $	*/
      2           1.2      fvdl 
      3           1.1      fvdl /*
      4           1.1      fvdl  * Copyright (c) 1995
      5           1.1      fvdl  *	The Regents of the University of California.  All rights reserved.
      6           1.1      fvdl  *
      7           1.1      fvdl  * This code contains ideas from software contributed to Berkeley by
      8           1.1      fvdl  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
      9           1.1      fvdl  * System project at Carnegie-Mellon University.
     10           1.1      fvdl  *
     11           1.1      fvdl  * Redistribution and use in source and binary forms, with or without
     12           1.1      fvdl  * modification, are permitted provided that the following conditions
     13           1.1      fvdl  * are met:
     14           1.1      fvdl  * 1. Redistributions of source code must retain the above copyright
     15           1.1      fvdl  *    notice, this list of conditions and the following disclaimer.
     16           1.1      fvdl  * 2. Redistributions in binary form must reproduce the above copyright
     17           1.1      fvdl  *    notice, this list of conditions and the following disclaimer in the
     18           1.1      fvdl  *    documentation and/or other materials provided with the distribution.
     19           1.1      fvdl  * 3. All advertising materials mentioning features or use of this software
     20           1.1      fvdl  *    must display the following acknowledgement:
     21           1.1      fvdl  *	This product includes software developed by the University of
     22           1.1      fvdl  *	California, Berkeley and its contributors.
     23           1.1      fvdl  * 4. Neither the name of the University nor the names of its contributors
     24           1.1      fvdl  *    may be used to endorse or promote products derived from this software
     25           1.1      fvdl  *    without specific prior written permission.
     26           1.1      fvdl  *
     27           1.1      fvdl  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     28           1.1      fvdl  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     29           1.1      fvdl  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     30           1.1      fvdl  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     31           1.1      fvdl  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     32           1.1      fvdl  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     33           1.1      fvdl  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     34           1.1      fvdl  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     35           1.1      fvdl  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     36           1.1      fvdl  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     37           1.1      fvdl  * SUCH DAMAGE.
     38           1.1      fvdl  *
     39           1.1      fvdl  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
     40           1.1      fvdl  */
     41           1.7   thorpej 
     42           1.7   thorpej #include "opt_lockdebug.h"
     43  1.16.2.1.2.1       chs #include "opt_ddb.h"
     44           1.1      fvdl 
     45           1.1      fvdl #include <sys/param.h>
     46           1.1      fvdl #include <sys/proc.h>
     47           1.1      fvdl #include <sys/lock.h>
     48           1.2      fvdl #include <sys/systm.h>
     49           1.1      fvdl #include <machine/cpu.h>
     50           1.1      fvdl 
     51           1.1      fvdl /*
     52           1.1      fvdl  * Locking primitives implementation.
     53           1.1      fvdl  * Locks provide shared/exclusive sychronization.
     54           1.1      fvdl  */
     55           1.1      fvdl 
     56      1.16.2.1     perry #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
     57           1.1      fvdl #define COUNT(p, x) if (p) (p)->p_locks += (x)
     58           1.1      fvdl #else
     59           1.1      fvdl #define COUNT(p, x)
     60           1.1      fvdl #endif
     61           1.1      fvdl 
     62          1.11        pk #if 0 /*#was defined(MULTIPROCESSOR)*/
     63          1.11        pk /*-
     64           1.1      fvdl 
     65          1.11        pk This macro is Bad Style and it doesn't work either... [pk, 10-14-1998]
     66          1.11        pk 
     67          1.11        pk -*
     68           1.1      fvdl  * For multiprocessor system, try spin lock first.
     69           1.1      fvdl  *
     70           1.1      fvdl  * This should be inline expanded below, but we cannot have #if
     71           1.1      fvdl  * inside a multiline define.
     72           1.1      fvdl  */
     73          1.11        pk 
     74           1.1      fvdl int lock_wait_time = 100;
     75           1.1      fvdl #define PAUSE(lkp, wanted)						\
     76           1.1      fvdl 		if (lock_wait_time > 0) {				\
     77           1.1      fvdl 			int i;						\
     78           1.1      fvdl 									\
     79           1.1      fvdl 			simple_unlock(&lkp->lk_interlock);		\
     80           1.1      fvdl 			for (i = lock_wait_time; i > 0; i--)		\
     81           1.1      fvdl 				if (!(wanted))				\
     82           1.1      fvdl 					break;				\
     83           1.1      fvdl 			simple_lock(&lkp->lk_interlock);		\
     84           1.1      fvdl 		}							\
     85           1.1      fvdl 		if (!(wanted))						\
     86           1.1      fvdl 			break;
     87           1.1      fvdl 
     88           1.9   thorpej #else /* ! MULTIPROCESSOR */
     89           1.1      fvdl 
     90           1.1      fvdl /*
     91           1.1      fvdl  * It is an error to spin on a uniprocessor as nothing will ever cause
     92           1.1      fvdl  * the simple lock to clear while we are executing.
     93           1.1      fvdl  */
     94           1.1      fvdl #define PAUSE(lkp, wanted)
     95           1.1      fvdl 
     96           1.9   thorpej #endif /* MULTIPROCESSOR */
     97           1.1      fvdl 
     98           1.1      fvdl /*
     99           1.1      fvdl  * Acquire a resource.
    100           1.1      fvdl  */
    101           1.1      fvdl #define ACQUIRE(lkp, error, extflags, wanted)				\
    102           1.1      fvdl 	PAUSE(lkp, wanted);						\
    103           1.1      fvdl 	for (error = 0; wanted; ) {					\
    104           1.1      fvdl 		(lkp)->lk_waitcount++;					\
    105           1.1      fvdl 		simple_unlock(&(lkp)->lk_interlock);			\
    106           1.1      fvdl 		error = tsleep((void *)lkp, (lkp)->lk_prio,		\
    107           1.1      fvdl 		    (lkp)->lk_wmesg, (lkp)->lk_timo);			\
    108           1.1      fvdl 		simple_lock(&(lkp)->lk_interlock);			\
    109           1.1      fvdl 		(lkp)->lk_waitcount--;					\
    110           1.1      fvdl 		if (error)						\
    111           1.1      fvdl 			break;						\
    112           1.1      fvdl 		if ((extflags) & LK_SLEEPFAIL) {			\
    113           1.1      fvdl 			error = ENOLCK;					\
    114           1.1      fvdl 			break;						\
    115           1.1      fvdl 		}							\
    116           1.1      fvdl 	}
    117           1.1      fvdl 
    118           1.1      fvdl /*
    119           1.1      fvdl  * Initialize a lock; required before use.
    120           1.1      fvdl  */
    121           1.1      fvdl void
    122           1.1      fvdl lockinit(lkp, prio, wmesg, timo, flags)
    123           1.1      fvdl 	struct lock *lkp;
    124           1.1      fvdl 	int prio;
    125           1.4   mycroft 	const char *wmesg;
    126           1.1      fvdl 	int timo;
    127           1.1      fvdl 	int flags;
    128           1.1      fvdl {
    129           1.1      fvdl 
    130           1.8     perry 	memset(lkp, 0, sizeof(struct lock));
    131           1.1      fvdl 	simple_lock_init(&lkp->lk_interlock);
    132           1.1      fvdl 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
    133           1.1      fvdl 	lkp->lk_prio = prio;
    134           1.1      fvdl 	lkp->lk_timo = timo;
    135           1.1      fvdl 	lkp->lk_wmesg = wmesg;
    136           1.1      fvdl 	lkp->lk_lockholder = LK_NOPROC;
    137           1.1      fvdl }
    138           1.1      fvdl 
    139           1.1      fvdl /*
    140           1.1      fvdl  * Determine the status of a lock.
    141           1.1      fvdl  */
    142           1.1      fvdl int
    143           1.1      fvdl lockstatus(lkp)
    144           1.1      fvdl 	struct lock *lkp;
    145           1.1      fvdl {
    146           1.1      fvdl 	int lock_type = 0;
    147           1.1      fvdl 
    148           1.1      fvdl 	simple_lock(&lkp->lk_interlock);
    149           1.1      fvdl 	if (lkp->lk_exclusivecount != 0)
    150           1.1      fvdl 		lock_type = LK_EXCLUSIVE;
    151           1.1      fvdl 	else if (lkp->lk_sharecount != 0)
    152           1.1      fvdl 		lock_type = LK_SHARED;
    153           1.1      fvdl 	simple_unlock(&lkp->lk_interlock);
    154           1.1      fvdl 	return (lock_type);
    155           1.1      fvdl }
    156           1.1      fvdl 
    157           1.1      fvdl /*
    158           1.1      fvdl  * Set, change, or release a lock.
    159           1.1      fvdl  *
    160           1.1      fvdl  * Shared requests increment the shared count. Exclusive requests set the
    161           1.1      fvdl  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
    162           1.1      fvdl  * accepted shared locks and shared-to-exclusive upgrades to go away.
    163           1.1      fvdl  */
    164           1.1      fvdl int
    165           1.6      fvdl lockmgr(lkp, flags, interlkp)
    166           1.1      fvdl 	__volatile struct lock *lkp;
    167           1.1      fvdl 	u_int flags;
    168           1.1      fvdl 	struct simplelock *interlkp;
    169           1.1      fvdl {
    170           1.1      fvdl 	int error;
    171           1.1      fvdl 	pid_t pid;
    172           1.1      fvdl 	int extflags;
    173           1.6      fvdl 	struct proc *p = curproc;
    174           1.1      fvdl 
    175           1.1      fvdl 	error = 0;
    176           1.1      fvdl 	if (p)
    177           1.1      fvdl 		pid = p->p_pid;
    178           1.1      fvdl 	else
    179           1.1      fvdl 		pid = LK_KERNPROC;
    180           1.1      fvdl 	simple_lock(&lkp->lk_interlock);
    181           1.1      fvdl 	if (flags & LK_INTERLOCK)
    182           1.1      fvdl 		simple_unlock(interlkp);
    183           1.1      fvdl 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
    184           1.1      fvdl #ifdef DIAGNOSTIC
    185           1.1      fvdl 	/*
    186           1.1      fvdl 	 * Once a lock has drained, the LK_DRAINING flag is set and an
    187           1.1      fvdl 	 * exclusive lock is returned. The only valid operation thereafter
    188           1.1      fvdl 	 * is a single release of that exclusive lock. This final release
    189           1.1      fvdl 	 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
    190           1.1      fvdl 	 * further requests of any sort will result in a panic. The bits
    191           1.1      fvdl 	 * selected for these two flags are chosen so that they will be set
    192           1.1      fvdl 	 * in memory that is freed (freed memory is filled with 0xdeadbeef).
    193           1.1      fvdl 	 * The final release is permitted to give a new lease on life to
    194           1.1      fvdl 	 * the lock by specifying LK_REENABLE.
    195           1.1      fvdl 	 */
    196           1.1      fvdl 	if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
    197           1.1      fvdl 		if (lkp->lk_flags & LK_DRAINED)
    198           1.1      fvdl 			panic("lockmgr: using decommissioned lock");
    199           1.1      fvdl 		if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
    200           1.1      fvdl 		    lkp->lk_lockholder != pid)
    201           1.1      fvdl 			panic("lockmgr: non-release on draining lock: %d\n",
    202           1.1      fvdl 			    flags & LK_TYPE_MASK);
    203           1.1      fvdl 		lkp->lk_flags &= ~LK_DRAINING;
    204           1.1      fvdl 		if ((flags & LK_REENABLE) == 0)
    205           1.1      fvdl 			lkp->lk_flags |= LK_DRAINED;
    206           1.1      fvdl 	}
    207           1.1      fvdl #endif DIAGNOSTIC
    208           1.1      fvdl 
    209           1.1      fvdl 	switch (flags & LK_TYPE_MASK) {
    210           1.1      fvdl 
    211           1.1      fvdl 	case LK_SHARED:
    212           1.1      fvdl 		if (lkp->lk_lockholder != pid) {
    213           1.1      fvdl 			/*
    214           1.1      fvdl 			 * If just polling, check to see if we will block.
    215           1.1      fvdl 			 */
    216           1.1      fvdl 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
    217           1.1      fvdl 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
    218           1.1      fvdl 				error = EBUSY;
    219           1.1      fvdl 				break;
    220           1.1      fvdl 			}
    221           1.1      fvdl 			/*
    222           1.1      fvdl 			 * Wait for exclusive locks and upgrades to clear.
    223           1.1      fvdl 			 */
    224           1.1      fvdl 			ACQUIRE(lkp, error, extflags, lkp->lk_flags &
    225           1.1      fvdl 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
    226           1.1      fvdl 			if (error)
    227           1.1      fvdl 				break;
    228           1.1      fvdl 			lkp->lk_sharecount++;
    229           1.1      fvdl 			COUNT(p, 1);
    230           1.1      fvdl 			break;
    231           1.1      fvdl 		}
    232           1.1      fvdl 		/*
    233           1.1      fvdl 		 * We hold an exclusive lock, so downgrade it to shared.
    234           1.1      fvdl 		 * An alternative would be to fail with EDEADLK.
    235           1.1      fvdl 		 */
    236           1.1      fvdl 		lkp->lk_sharecount++;
    237           1.1      fvdl 		COUNT(p, 1);
    238           1.1      fvdl 		/* fall into downgrade */
    239           1.1      fvdl 
    240           1.1      fvdl 	case LK_DOWNGRADE:
    241           1.1      fvdl 		if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
    242           1.1      fvdl 			panic("lockmgr: not holding exclusive lock");
    243           1.1      fvdl 		lkp->lk_sharecount += lkp->lk_exclusivecount;
    244           1.1      fvdl 		lkp->lk_exclusivecount = 0;
    245          1.15      fvdl 		lkp->lk_recurselevel = 0;
    246           1.1      fvdl 		lkp->lk_flags &= ~LK_HAVE_EXCL;
    247           1.1      fvdl 		lkp->lk_lockholder = LK_NOPROC;
    248           1.1      fvdl 		if (lkp->lk_waitcount)
    249           1.1      fvdl 			wakeup((void *)lkp);
    250           1.1      fvdl 		break;
    251           1.1      fvdl 
    252           1.1      fvdl 	case LK_EXCLUPGRADE:
    253           1.1      fvdl 		/*
    254           1.1      fvdl 		 * If another process is ahead of us to get an upgrade,
    255           1.1      fvdl 		 * then we want to fail rather than have an intervening
    256           1.1      fvdl 		 * exclusive access.
    257           1.1      fvdl 		 */
    258           1.1      fvdl 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
    259           1.1      fvdl 			lkp->lk_sharecount--;
    260           1.1      fvdl 			COUNT(p, -1);
    261           1.1      fvdl 			error = EBUSY;
    262           1.1      fvdl 			break;
    263           1.1      fvdl 		}
    264           1.1      fvdl 		/* fall into normal upgrade */
    265           1.1      fvdl 
    266           1.1      fvdl 	case LK_UPGRADE:
    267           1.1      fvdl 		/*
    268           1.1      fvdl 		 * Upgrade a shared lock to an exclusive one. If another
    269           1.1      fvdl 		 * shared lock has already requested an upgrade to an
    270           1.1      fvdl 		 * exclusive lock, our shared lock is released and an
    271           1.1      fvdl 		 * exclusive lock is requested (which will be granted
    272           1.1      fvdl 		 * after the upgrade). If we return an error, the file
    273           1.1      fvdl 		 * will always be unlocked.
    274           1.1      fvdl 		 */
    275           1.1      fvdl 		if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
    276           1.1      fvdl 			panic("lockmgr: upgrade exclusive lock");
    277           1.1      fvdl 		lkp->lk_sharecount--;
    278           1.1      fvdl 		COUNT(p, -1);
    279           1.1      fvdl 		/*
    280           1.1      fvdl 		 * If we are just polling, check to see if we will block.
    281           1.1      fvdl 		 */
    282           1.1      fvdl 		if ((extflags & LK_NOWAIT) &&
    283           1.1      fvdl 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
    284           1.1      fvdl 		     lkp->lk_sharecount > 1)) {
    285           1.1      fvdl 			error = EBUSY;
    286           1.1      fvdl 			break;
    287           1.1      fvdl 		}
    288           1.1      fvdl 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
    289           1.1      fvdl 			/*
    290           1.1      fvdl 			 * We are first shared lock to request an upgrade, so
    291           1.1      fvdl 			 * request upgrade and wait for the shared count to
    292           1.1      fvdl 			 * drop to zero, then take exclusive lock.
    293           1.1      fvdl 			 */
    294           1.1      fvdl 			lkp->lk_flags |= LK_WANT_UPGRADE;
    295           1.1      fvdl 			ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
    296           1.1      fvdl 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
    297           1.1      fvdl 			if (error)
    298           1.1      fvdl 				break;
    299           1.1      fvdl 			lkp->lk_flags |= LK_HAVE_EXCL;
    300           1.1      fvdl 			lkp->lk_lockholder = pid;
    301           1.1      fvdl 			if (lkp->lk_exclusivecount != 0)
    302           1.1      fvdl 				panic("lockmgr: non-zero exclusive count");
    303           1.1      fvdl 			lkp->lk_exclusivecount = 1;
    304          1.15      fvdl 			if (extflags & LK_SETRECURSE)
    305          1.15      fvdl 				lkp->lk_recurselevel = 1;
    306           1.1      fvdl 			COUNT(p, 1);
    307           1.1      fvdl 			break;
    308           1.1      fvdl 		}
    309           1.1      fvdl 		/*
    310           1.1      fvdl 		 * Someone else has requested upgrade. Release our shared
    311           1.1      fvdl 		 * lock, awaken upgrade requestor if we are the last shared
    312           1.1      fvdl 		 * lock, then request an exclusive lock.
    313           1.1      fvdl 		 */
    314           1.1      fvdl 		if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
    315           1.1      fvdl 			wakeup((void *)lkp);
    316           1.1      fvdl 		/* fall into exclusive request */
    317           1.1      fvdl 
    318           1.1      fvdl 	case LK_EXCLUSIVE:
    319           1.1      fvdl 		if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
    320           1.1      fvdl 			/*
    321           1.1      fvdl 			 *	Recursive lock.
    322           1.1      fvdl 			 */
    323          1.15      fvdl 			if ((extflags & LK_CANRECURSE) == 0 &&
    324          1.16  sommerfe 			     lkp->lk_recurselevel == 0) {
    325          1.16  sommerfe 				if (extflags & LK_RECURSEFAIL) {
    326          1.16  sommerfe 					error = EDEADLK;
    327          1.16  sommerfe 					break;
    328          1.16  sommerfe 				} else
    329          1.16  sommerfe 					panic("lockmgr: locking against myself");
    330          1.16  sommerfe 			}
    331           1.1      fvdl 			lkp->lk_exclusivecount++;
    332          1.15      fvdl 			if (extflags & LK_SETRECURSE &&
    333          1.15      fvdl 			    lkp->lk_recurselevel == 0)
    334          1.15      fvdl 				lkp->lk_recurselevel = lkp->lk_exclusivecount;
    335           1.1      fvdl 			COUNT(p, 1);
    336           1.1      fvdl 			break;
    337           1.1      fvdl 		}
    338           1.1      fvdl 		/*
    339           1.1      fvdl 		 * If we are just polling, check to see if we will sleep.
    340           1.1      fvdl 		 */
    341           1.1      fvdl 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
    342           1.1      fvdl 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
    343           1.1      fvdl 		     lkp->lk_sharecount != 0)) {
    344           1.1      fvdl 			error = EBUSY;
    345           1.1      fvdl 			break;
    346           1.1      fvdl 		}
    347           1.1      fvdl 		/*
    348           1.1      fvdl 		 * Try to acquire the want_exclusive flag.
    349           1.1      fvdl 		 */
    350           1.1      fvdl 		ACQUIRE(lkp, error, extflags, lkp->lk_flags &
    351           1.1      fvdl 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
    352           1.1      fvdl 		if (error)
    353           1.1      fvdl 			break;
    354           1.1      fvdl 		lkp->lk_flags |= LK_WANT_EXCL;
    355           1.1      fvdl 		/*
    356           1.1      fvdl 		 * Wait for shared locks and upgrades to finish.
    357           1.1      fvdl 		 */
    358           1.1      fvdl 		ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
    359           1.1      fvdl 		       (lkp->lk_flags & LK_WANT_UPGRADE));
    360           1.1      fvdl 		lkp->lk_flags &= ~LK_WANT_EXCL;
    361           1.1      fvdl 		if (error)
    362           1.1      fvdl 			break;
    363           1.1      fvdl 		lkp->lk_flags |= LK_HAVE_EXCL;
    364           1.1      fvdl 		lkp->lk_lockholder = pid;
    365           1.1      fvdl 		if (lkp->lk_exclusivecount != 0)
    366           1.1      fvdl 			panic("lockmgr: non-zero exclusive count");
    367           1.1      fvdl 		lkp->lk_exclusivecount = 1;
    368          1.15      fvdl 		if (extflags & LK_SETRECURSE)
    369          1.15      fvdl 			lkp->lk_recurselevel = 1;
    370           1.1      fvdl 		COUNT(p, 1);
    371           1.1      fvdl 		break;
    372           1.1      fvdl 
    373           1.1      fvdl 	case LK_RELEASE:
    374           1.1      fvdl 		if (lkp->lk_exclusivecount != 0) {
    375           1.1      fvdl 			if (pid != lkp->lk_lockholder)
    376          1.13    bouyer 				panic("lockmgr: pid %d, not exclusive lock "
    377          1.13    bouyer 				    "holder %d unlocking", pid,
    378           1.1      fvdl 				    lkp->lk_lockholder);
    379          1.15      fvdl 			if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
    380          1.15      fvdl 				lkp->lk_recurselevel = 0;
    381           1.1      fvdl 			lkp->lk_exclusivecount--;
    382           1.1      fvdl 			COUNT(p, -1);
    383           1.1      fvdl 			if (lkp->lk_exclusivecount == 0) {
    384           1.1      fvdl 				lkp->lk_flags &= ~LK_HAVE_EXCL;
    385           1.1      fvdl 				lkp->lk_lockholder = LK_NOPROC;
    386           1.1      fvdl 			}
    387           1.1      fvdl 		} else if (lkp->lk_sharecount != 0) {
    388           1.1      fvdl 			lkp->lk_sharecount--;
    389           1.1      fvdl 			COUNT(p, -1);
    390           1.1      fvdl 		}
    391           1.1      fvdl 		if (lkp->lk_waitcount)
    392           1.1      fvdl 			wakeup((void *)lkp);
    393           1.1      fvdl 		break;
    394           1.1      fvdl 
    395           1.1      fvdl 	case LK_DRAIN:
    396           1.1      fvdl 		/*
    397           1.1      fvdl 		 * Check that we do not already hold the lock, as it can
    398           1.1      fvdl 		 * never drain if we do. Unfortunately, we have no way to
    399           1.1      fvdl 		 * check for holding a shared lock, but at least we can
    400           1.1      fvdl 		 * check for an exclusive one.
    401           1.1      fvdl 		 */
    402           1.1      fvdl 		if (lkp->lk_lockholder == pid)
    403           1.1      fvdl 			panic("lockmgr: draining against myself");
    404           1.1      fvdl 		/*
    405           1.1      fvdl 		 * If we are just polling, check to see if we will sleep.
    406           1.1      fvdl 		 */
    407           1.1      fvdl 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
    408           1.1      fvdl 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
    409           1.1      fvdl 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
    410           1.1      fvdl 			error = EBUSY;
    411           1.1      fvdl 			break;
    412           1.1      fvdl 		}
    413           1.1      fvdl 		PAUSE(lkp, ((lkp->lk_flags &
    414           1.1      fvdl 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
    415           1.1      fvdl 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
    416           1.1      fvdl 		for (error = 0; ((lkp->lk_flags &
    417           1.1      fvdl 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
    418           1.1      fvdl 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
    419           1.1      fvdl 			lkp->lk_flags |= LK_WAITDRAIN;
    420           1.1      fvdl 			simple_unlock(&lkp->lk_interlock);
    421           1.2      fvdl 			if ((error = tsleep((void *)&lkp->lk_flags,
    422           1.2      fvdl 			    lkp->lk_prio, lkp->lk_wmesg, lkp->lk_timo)))
    423           1.1      fvdl 				return (error);
    424           1.1      fvdl 			if ((extflags) & LK_SLEEPFAIL)
    425           1.1      fvdl 				return (ENOLCK);
    426           1.1      fvdl 			simple_lock(&lkp->lk_interlock);
    427           1.1      fvdl 		}
    428           1.1      fvdl 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
    429           1.1      fvdl 		lkp->lk_lockholder = pid;
    430           1.1      fvdl 		lkp->lk_exclusivecount = 1;
    431          1.15      fvdl 		/* XXX unlikely that we'd want this */
    432          1.15      fvdl 		if (extflags & LK_SETRECURSE)
    433          1.15      fvdl 			lkp->lk_recurselevel = 1;
    434           1.1      fvdl 		COUNT(p, 1);
    435           1.1      fvdl 		break;
    436           1.1      fvdl 
    437           1.1      fvdl 	default:
    438           1.1      fvdl 		simple_unlock(&lkp->lk_interlock);
    439           1.1      fvdl 		panic("lockmgr: unknown locktype request %d",
    440           1.1      fvdl 		    flags & LK_TYPE_MASK);
    441           1.1      fvdl 		/* NOTREACHED */
    442           1.1      fvdl 	}
    443           1.1      fvdl 	if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
    444           1.1      fvdl 	     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
    445           1.1      fvdl 	     lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
    446           1.1      fvdl 		lkp->lk_flags &= ~LK_WAITDRAIN;
    447           1.1      fvdl 		wakeup((void *)&lkp->lk_flags);
    448           1.1      fvdl 	}
    449           1.1      fvdl 	simple_unlock(&lkp->lk_interlock);
    450           1.1      fvdl 	return (error);
    451           1.1      fvdl }
    452           1.1      fvdl 
    453           1.1      fvdl /*
    454           1.1      fvdl  * Print out information about state of a lock. Used by VOP_PRINT
    455           1.1      fvdl  * routines to display ststus about contained locks.
    456           1.1      fvdl  */
    457           1.2      fvdl void
    458           1.1      fvdl lockmgr_printinfo(lkp)
    459           1.1      fvdl 	struct lock *lkp;
    460           1.1      fvdl {
    461           1.1      fvdl 
    462           1.1      fvdl 	if (lkp->lk_sharecount)
    463           1.1      fvdl 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
    464           1.1      fvdl 		    lkp->lk_sharecount);
    465           1.1      fvdl 	else if (lkp->lk_flags & LK_HAVE_EXCL)
    466           1.1      fvdl 		printf(" lock type %s: EXCL (count %d) by pid %d",
    467           1.1      fvdl 		    lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
    468           1.1      fvdl 	if (lkp->lk_waitcount > 0)
    469           1.1      fvdl 		printf(" with %d pending", lkp->lk_waitcount);
    470           1.1      fvdl }
    471           1.1      fvdl 
    472           1.9   thorpej #if defined(LOCKDEBUG) && !defined(MULTIPROCESSOR)
    473           1.1      fvdl #include <sys/kernel.h>
    474           1.1      fvdl #include <vm/vm.h>
    475           1.1      fvdl #include <sys/sysctl.h>
    476           1.1      fvdl int lockpausetime = 0;
    477           1.1      fvdl struct ctldebug debug2 = { "lockpausetime", &lockpausetime };
    478           1.1      fvdl int simplelockrecurse;
    479          1.12       chs LIST_HEAD(slocklist, simplelock) slockdebuglist;
    480  1.16.2.1.2.1       chs int simple_lock_debugger = 0;
    481          1.12       chs 
    482           1.1      fvdl /*
    483           1.1      fvdl  * Simple lock functions so that the debugger can see from whence
    484           1.1      fvdl  * they are being called.
    485           1.1      fvdl  */
    486           1.1      fvdl void
    487           1.1      fvdl simple_lock_init(alp)
    488           1.1      fvdl 	struct simplelock *alp;
    489           1.1      fvdl {
    490  1.16.2.1.2.1       chs 	alp->lock_data = SLOCK_UNLOCKED;
    491           1.5       chs 	alp->lock_file = NULL;
    492           1.5       chs 	alp->lock_line = 0;
    493           1.5       chs 	alp->unlock_file = NULL;
    494           1.5       chs 	alp->unlock_line = 0;
    495          1.10   thorpej 	alp->lock_holder = 0;
    496           1.1      fvdl }
    497           1.1      fvdl 
    498           1.1      fvdl void
    499           1.1      fvdl _simple_lock(alp, id, l)
    500           1.1      fvdl 	__volatile struct simplelock *alp;
    501           1.1      fvdl 	const char *id;
    502           1.1      fvdl 	int l;
    503           1.1      fvdl {
    504          1.12       chs 	int s;
    505          1.12       chs 
    506           1.1      fvdl 	if (simplelockrecurse)
    507           1.1      fvdl 		return;
    508  1.16.2.1.2.2       chs 
    509  1.16.2.1.2.2       chs 	s = splhigh();
    510  1.16.2.1.2.1       chs 	if (alp->lock_data != SLOCK_UNLOCKED) {
    511           1.5       chs 		printf("simple_lock: lock held\n");
    512           1.5       chs 		printf("currently at: %s:%d\n", id, l);
    513           1.5       chs 		printf("last locked: %s:%d\n",
    514           1.5       chs 		       alp->lock_file, alp->lock_line);
    515           1.5       chs 		printf("last unlocked: %s:%d\n",
    516           1.5       chs 		       alp->unlock_file, alp->unlock_line);
    517           1.1      fvdl 		if (lockpausetime == -1)
    518           1.5       chs 			panic("simple_lock: lock held");
    519           1.1      fvdl 		if (lockpausetime == 1) {
    520           1.2      fvdl #ifdef BACKTRACE
    521           1.1      fvdl 			BACKTRACE(curproc);
    522           1.2      fvdl #endif
    523  1.16.2.1.2.1       chs 		}
    524  1.16.2.1.2.1       chs 		if (simple_lock_debugger) {
    525  1.16.2.1.2.1       chs 			Debugger();
    526           1.1      fvdl 		}
    527  1.16.2.1.2.2       chs 
    528  1.16.2.1.2.2       chs 		splx(s);
    529          1.12       chs 		return;
    530           1.1      fvdl 	}
    531          1.12       chs 
    532          1.12       chs 	LIST_INSERT_HEAD(&slockdebuglist, (struct simplelock *)alp, list);
    533  1.16.2.1.2.1       chs 	alp->lock_data = SLOCK_LOCKED;
    534           1.5       chs 	alp->lock_file = id;
    535           1.5       chs 	alp->lock_line = l;
    536  1.16.2.1.2.2       chs 
    537           1.1      fvdl 	if (curproc)
    538           1.1      fvdl 		curproc->p_simple_locks++;
    539  1.16.2.1.2.2       chs 
    540  1.16.2.1.2.2       chs 	splx(s);
    541           1.1      fvdl }
    542           1.1      fvdl 
    543           1.1      fvdl int
    544           1.1      fvdl _simple_lock_try(alp, id, l)
    545           1.1      fvdl 	__volatile struct simplelock *alp;
    546           1.1      fvdl 	const char *id;
    547           1.1      fvdl 	int l;
    548           1.1      fvdl {
    549          1.12       chs 	int s;
    550           1.1      fvdl 
    551  1.16.2.1.2.2       chs 	if (simplelockrecurse)
    552  1.16.2.1.2.2       chs 		return (1);
    553  1.16.2.1.2.2       chs 
    554  1.16.2.1.2.2       chs 	s = splhigh();
    555  1.16.2.1.2.1       chs 	if (alp->lock_data != SLOCK_UNLOCKED) {
    556  1.16.2.1.2.1       chs 		printf("simple_lock_try: lock held\n");
    557  1.16.2.1.2.1       chs 		printf("currently at: %s:%d\n", id, l);
    558  1.16.2.1.2.1       chs 		printf("last locked: %s:%d\n",
    559  1.16.2.1.2.1       chs 		       alp->lock_file, alp->lock_line);
    560  1.16.2.1.2.1       chs 		printf("last unlocked: %s:%d\n",
    561  1.16.2.1.2.1       chs 		       alp->unlock_file, alp->unlock_line);
    562  1.16.2.1.2.1       chs 		if (lockpausetime == -1)
    563  1.16.2.1.2.1       chs 			panic("simple_lock_try: lock held");
    564  1.16.2.1.2.1       chs 		if (lockpausetime == 1) {
    565  1.16.2.1.2.1       chs #ifdef BACKTRACE
    566  1.16.2.1.2.1       chs 			BACKTRACE(curproc);
    567  1.16.2.1.2.1       chs #endif
    568  1.16.2.1.2.1       chs 		}
    569  1.16.2.1.2.1       chs 		if (simple_lock_debugger) {
    570  1.16.2.1.2.1       chs 			Debugger();
    571  1.16.2.1.2.1       chs 		}
    572  1.16.2.1.2.2       chs 
    573  1.16.2.1.2.2       chs 		splx(s);
    574           1.1      fvdl 		return (0);
    575  1.16.2.1.2.1       chs 	}
    576  1.16.2.1.2.2       chs 
    577  1.16.2.1.2.2       chs 	LIST_INSERT_HEAD(&slockdebuglist, (struct simplelock *)alp, list);
    578  1.16.2.1.2.1       chs 	alp->lock_data = SLOCK_LOCKED;
    579           1.5       chs 	alp->lock_file = id;
    580           1.5       chs 	alp->lock_line = l;
    581          1.12       chs 
    582           1.1      fvdl 	if (curproc)
    583           1.1      fvdl 		curproc->p_simple_locks++;
    584  1.16.2.1.2.2       chs 
    585  1.16.2.1.2.2       chs 	splx(s);
    586           1.1      fvdl 	return (1);
    587           1.1      fvdl }
    588           1.1      fvdl 
    589           1.1      fvdl void
    590           1.1      fvdl _simple_unlock(alp, id, l)
    591           1.1      fvdl 	__volatile struct simplelock *alp;
    592           1.1      fvdl 	const char *id;
    593           1.1      fvdl 	int l;
    594           1.1      fvdl {
    595          1.12       chs 	int s;
    596           1.1      fvdl 
    597           1.1      fvdl 	if (simplelockrecurse)
    598           1.1      fvdl 		return;
    599  1.16.2.1.2.2       chs 
    600  1.16.2.1.2.2       chs 	s = splhigh();
    601  1.16.2.1.2.1       chs 	if (alp->lock_data == SLOCK_UNLOCKED) {
    602           1.5       chs 		printf("simple_unlock: lock not held\n");
    603           1.5       chs 		printf("currently at: %s:%d\n", id, l);
    604           1.5       chs 		printf("last locked: %s:%d\n",
    605           1.5       chs 		       alp->lock_file, alp->lock_line);
    606           1.5       chs 		printf("last unlocked: %s:%d\n",
    607           1.5       chs 		       alp->unlock_file, alp->unlock_line);
    608           1.1      fvdl 		if (lockpausetime == -1)
    609           1.5       chs 			panic("simple_unlock: lock not held");
    610           1.1      fvdl 		if (lockpausetime == 1) {
    611           1.2      fvdl #ifdef BACKTRACE
    612           1.1      fvdl 			BACKTRACE(curproc);
    613           1.2      fvdl #endif
    614  1.16.2.1.2.1       chs 		}
    615  1.16.2.1.2.1       chs 		if (simple_lock_debugger) {
    616  1.16.2.1.2.1       chs 			Debugger();
    617           1.1      fvdl 		}
    618  1.16.2.1.2.2       chs 		splx(s);
    619          1.12       chs 		return;
    620           1.1      fvdl 	}
    621          1.12       chs 
    622          1.12       chs 	LIST_REMOVE(alp, list);
    623          1.12       chs 	alp->list.le_next = NULL;
    624          1.12       chs 	alp->list.le_prev = NULL;
    625  1.16.2.1.2.1       chs 	alp->lock_data = SLOCK_UNLOCKED;
    626           1.5       chs 	alp->unlock_file = id;
    627           1.5       chs 	alp->unlock_line = l;
    628  1.16.2.1.2.2       chs 
    629           1.1      fvdl 	if (curproc)
    630           1.1      fvdl 		curproc->p_simple_locks--;
    631  1.16.2.1.2.2       chs 
    632  1.16.2.1.2.2       chs 	splx(s);
    633          1.12       chs }
    634  1.16.2.1.2.1       chs 
    635  1.16.2.1.2.1       chs void
    636  1.16.2.1.2.1       chs _simple_lock_assert(alp, value, id, l)
    637  1.16.2.1.2.1       chs 	__volatile struct simplelock *alp;
    638  1.16.2.1.2.1       chs 	int value;
    639  1.16.2.1.2.1       chs 	const char *id;
    640  1.16.2.1.2.1       chs 	int l;
    641  1.16.2.1.2.1       chs {
    642  1.16.2.1.2.1       chs 	if (alp->lock_data != value) {
    643  1.16.2.1.2.1       chs 		panic("lock %p: value %d != expected %d at %s:%d",
    644  1.16.2.1.2.1       chs 		      alp, alp->lock_data, value, id, l);
    645  1.16.2.1.2.1       chs 	}
    646  1.16.2.1.2.1       chs }
    647  1.16.2.1.2.1       chs 
    648          1.12       chs 
    649          1.12       chs void
    650          1.12       chs simple_lock_dump()
    651          1.12       chs {
    652          1.12       chs 	struct simplelock *alp;
    653          1.12       chs 	int s;
    654          1.12       chs 
    655          1.12       chs 	s = splhigh();
    656          1.12       chs 	printf("all simple locks:\n");
    657          1.12       chs 	for (alp = LIST_FIRST(&slockdebuglist);
    658          1.12       chs 	     alp != NULL;
    659          1.12       chs 	     alp = LIST_NEXT(alp, list)) {
    660          1.12       chs 		printf("%p  %s:%d\n", alp, alp->lock_file, alp->lock_line);
    661          1.12       chs 	}
    662          1.12       chs 	splx(s);
    663          1.12       chs }
    664          1.12       chs 
    665          1.12       chs void
    666          1.12       chs simple_lock_freecheck(start, end)
    667          1.12       chs void *start, *end;
    668          1.12       chs {
    669          1.12       chs 	struct simplelock *alp;
    670          1.12       chs 	int s;
    671          1.12       chs 
    672          1.12       chs 	s = splhigh();
    673          1.12       chs 	for (alp = LIST_FIRST(&slockdebuglist);
    674          1.12       chs 	     alp != NULL;
    675          1.12       chs 	     alp = LIST_NEXT(alp, list)) {
    676          1.12       chs 		if ((void *)alp >= start && (void *)alp < end) {
    677          1.14       chs 			printf("freeing simple_lock %p %s:%d\n",
    678          1.14       chs 			       alp, alp->lock_file, alp->lock_line);
    679          1.12       chs #ifdef DDB
    680          1.12       chs 			Debugger();
    681          1.12       chs #endif
    682          1.12       chs 		}
    683          1.12       chs 	}
    684          1.12       chs 	splx(s);
    685           1.1      fvdl }
    686           1.9   thorpej #endif /* LOCKDEBUG && ! MULTIPROCESSOR */
    687