Home | History | Annotate | Line # | Download | only in kern
kern_lock.c revision 1.115
      1  1.115        ad /*	$NetBSD: kern_lock.c,v 1.115 2007/06/15 20:59:38 ad Exp $	*/
      2   1.19   thorpej 
      3   1.19   thorpej /*-
      4  1.114        ad  * Copyright (c) 1999, 2000, 2006, 2007 The NetBSD Foundation, Inc.
      5   1.19   thorpej  * All rights reserved.
      6   1.19   thorpej  *
      7   1.19   thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8   1.19   thorpej  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  1.105        ad  * NASA Ames Research Center, and by Andrew Doran.
     10   1.19   thorpej  *
     11   1.19   thorpej  * This code is derived from software contributed to The NetBSD Foundation
     12   1.19   thorpej  * by Ross Harvey.
     13   1.19   thorpej  *
     14   1.19   thorpej  * Redistribution and use in source and binary forms, with or without
     15   1.19   thorpej  * modification, are permitted provided that the following conditions
     16   1.19   thorpej  * are met:
     17   1.19   thorpej  * 1. Redistributions of source code must retain the above copyright
     18   1.19   thorpej  *    notice, this list of conditions and the following disclaimer.
     19   1.19   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     20   1.19   thorpej  *    notice, this list of conditions and the following disclaimer in the
     21   1.19   thorpej  *    documentation and/or other materials provided with the distribution.
     22   1.19   thorpej  * 3. All advertising materials mentioning features or use of this software
     23   1.19   thorpej  *    must display the following acknowledgement:
     24   1.19   thorpej  *	This product includes software developed by the NetBSD
     25   1.19   thorpej  *	Foundation, Inc. and its contributors.
     26   1.19   thorpej  * 4. Neither the name of The NetBSD Foundation nor the names of its
     27   1.19   thorpej  *    contributors may be used to endorse or promote products derived
     28   1.19   thorpej  *    from this software without specific prior written permission.
     29   1.19   thorpej  *
     30   1.19   thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     31   1.19   thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     32   1.19   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     33   1.19   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     34   1.19   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     35   1.19   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     36   1.19   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     37   1.19   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     38   1.19   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     39   1.19   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     40   1.19   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     41   1.19   thorpej  */
     42    1.2      fvdl 
     43   1.86     perry /*
     44    1.1      fvdl  * Copyright (c) 1995
     45    1.1      fvdl  *	The Regents of the University of California.  All rights reserved.
     46    1.1      fvdl  *
     47    1.1      fvdl  * This code contains ideas from software contributed to Berkeley by
     48    1.1      fvdl  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
     49    1.1      fvdl  * System project at Carnegie-Mellon University.
     50    1.1      fvdl  *
     51    1.1      fvdl  * Redistribution and use in source and binary forms, with or without
     52    1.1      fvdl  * modification, are permitted provided that the following conditions
     53    1.1      fvdl  * are met:
     54    1.1      fvdl  * 1. Redistributions of source code must retain the above copyright
     55    1.1      fvdl  *    notice, this list of conditions and the following disclaimer.
     56    1.1      fvdl  * 2. Redistributions in binary form must reproduce the above copyright
     57    1.1      fvdl  *    notice, this list of conditions and the following disclaimer in the
     58    1.1      fvdl  *    documentation and/or other materials provided with the distribution.
     59   1.72       agc  * 3. Neither the name of the University nor the names of its contributors
     60    1.1      fvdl  *    may be used to endorse or promote products derived from this software
     61    1.1      fvdl  *    without specific prior written permission.
     62    1.1      fvdl  *
     63    1.1      fvdl  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     64    1.1      fvdl  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     65    1.1      fvdl  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     66    1.1      fvdl  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     67    1.1      fvdl  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     68    1.1      fvdl  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     69    1.1      fvdl  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     70    1.1      fvdl  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     71    1.1      fvdl  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     72    1.1      fvdl  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     73    1.1      fvdl  * SUCH DAMAGE.
     74    1.1      fvdl  *
     75    1.1      fvdl  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
     76    1.1      fvdl  */
     77   1.60     lukem 
     78   1.60     lukem #include <sys/cdefs.h>
     79  1.115        ad __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.115 2007/06/15 20:59:38 ad Exp $");
     80    1.7   thorpej 
     81   1.21   thorpej #include "opt_multiprocessor.h"
     82   1.18       chs #include "opt_ddb.h"
     83    1.1      fvdl 
     84  1.105        ad #define	__MUTEX_PRIVATE
     85  1.105        ad 
     86    1.1      fvdl #include <sys/param.h>
     87    1.1      fvdl #include <sys/proc.h>
     88    1.1      fvdl #include <sys/lock.h>
     89    1.2      fvdl #include <sys/systm.h>
     90  1.105        ad #include <sys/lockdebug.h>
     91  1.105        ad 
     92    1.1      fvdl #include <machine/cpu.h>
     93  1.110  christos #include <machine/stdarg.h>
     94    1.1      fvdl 
     95   1.98        ad #include <dev/lockstat.h>
     96   1.98        ad 
     97   1.25   thorpej #if defined(LOCKDEBUG)
     98   1.25   thorpej #include <sys/syslog.h>
     99   1.25   thorpej /*
    100   1.25   thorpej  * note that stdarg.h and the ansi style va_start macro is used for both
    101   1.25   thorpej  * ansi and traditional c compiles.
    102   1.25   thorpej  * XXX: this requires that stdarg.h define: va_alist and va_dcl
    103   1.25   thorpej  */
    104   1.25   thorpej #include <machine/stdarg.h>
    105   1.25   thorpej 
    106   1.36   thorpej void	lock_printf(const char *fmt, ...)
    107   1.37       eeh     __attribute__((__format__(__printf__,1,2)));
    108   1.25   thorpej 
    109  1.105        ad static int acquire(volatile struct lock **, int *, int, int, int, uintptr_t);
    110   1.73      yamt 
    111   1.57  sommerfe int	lock_debug_syslog = 0;	/* defaults to printf, but can be patched */
    112   1.55   thorpej 
    113   1.55   thorpej #ifdef DDB
    114   1.55   thorpej #include <ddb/ddbvar.h>
    115   1.55   thorpej #include <machine/db_machdep.h>
    116   1.55   thorpej #include <ddb/db_command.h>
    117   1.55   thorpej #include <ddb/db_interface.h>
    118   1.55   thorpej #endif
    119   1.85      yamt #endif /* defined(LOCKDEBUG) */
    120   1.85      yamt 
    121   1.85      yamt #if defined(MULTIPROCESSOR)
    122  1.105        ad int kernel_lock_id;
    123  1.115        ad __cpu_simple_lock_t kernel_lock;
    124   1.25   thorpej #endif
    125   1.25   thorpej 
    126    1.1      fvdl /*
    127    1.1      fvdl  * Locking primitives implementation.
    128   1.56       wiz  * Locks provide shared/exclusive synchronization.
    129    1.1      fvdl  */
    130    1.1      fvdl 
    131   1.21   thorpej #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
    132   1.21   thorpej #if defined(MULTIPROCESSOR) /* { */
    133   1.21   thorpej #define	COUNT_CPU(cpu_id, x)						\
    134   1.47  sommerfe 	curcpu()->ci_spin_locks += (x)
    135   1.21   thorpej #else
    136   1.21   thorpej u_long	spin_locks;
    137   1.21   thorpej #define	COUNT_CPU(cpu_id, x)	spin_locks += (x)
    138   1.21   thorpej #endif /* MULTIPROCESSOR */ /* } */
    139   1.21   thorpej 
    140   1.69   thorpej #define	COUNT(lkp, l, cpu_id, x)					\
    141   1.21   thorpej do {									\
    142   1.21   thorpej 	if ((lkp)->lk_flags & LK_SPIN)					\
    143   1.21   thorpej 		COUNT_CPU((cpu_id), (x));				\
    144   1.21   thorpej 	else								\
    145   1.69   thorpej 		(l)->l_locks += (x);					\
    146   1.30   thorpej } while (/*CONSTCOND*/0)
    147    1.1      fvdl #else
    148   1.22    mellon #define COUNT(lkp, p, cpu_id, x)
    149   1.48  sommerfe #define COUNT_CPU(cpu_id, x)
    150   1.21   thorpej #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
    151    1.1      fvdl 
    152   1.43   thorpej #define	INTERLOCK_ACQUIRE(lkp, flags, s)				\
    153   1.40   thorpej do {									\
    154   1.43   thorpej 	if ((flags) & LK_SPIN)						\
    155  1.114        ad 		s = splhigh();						\
    156   1.40   thorpej 	simple_lock(&(lkp)->lk_interlock);				\
    157   1.66     perry } while (/*CONSTCOND*/ 0)
    158   1.40   thorpej 
    159   1.43   thorpej #define	INTERLOCK_RELEASE(lkp, flags, s)				\
    160   1.40   thorpej do {									\
    161   1.40   thorpej 	simple_unlock(&(lkp)->lk_interlock);				\
    162   1.52   thorpej 	if ((flags) & LK_SPIN)						\
    163   1.40   thorpej 		splx(s);						\
    164   1.66     perry } while (/*CONSTCOND*/ 0)
    165   1.40   thorpej 
    166   1.63       chs #ifdef DDB /* { */
    167   1.89       chs #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    168   1.63       chs int simple_lock_debugger = 1;	/* more serious on MP */
    169   1.63       chs #else
    170   1.63       chs int simple_lock_debugger = 0;
    171   1.63       chs #endif
    172   1.93       erh #define	SLOCK_DEBUGGER()	if (simple_lock_debugger && db_onpanic) Debugger()
    173   1.63       chs #define	SLOCK_TRACE()							\
    174   1.63       chs 	db_stack_trace_print((db_expr_t)__builtin_frame_address(0),	\
    175  1.108   thorpej 	    true, 65535, "", lock_printf);
    176   1.63       chs #else
    177   1.63       chs #define	SLOCK_DEBUGGER()	/* nothing */
    178   1.63       chs #define	SLOCK_TRACE()		/* nothing */
    179   1.63       chs #endif /* } */
    180   1.63       chs 
    181   1.50   thorpej #if defined(LOCKDEBUG)
    182   1.50   thorpej #if defined(DDB)
    183   1.93       erh #define	SPINLOCK_SPINCHECK_DEBUGGER	if (db_onpanic) Debugger()
    184   1.50   thorpej #else
    185   1.50   thorpej #define	SPINLOCK_SPINCHECK_DEBUGGER	/* nothing */
    186   1.50   thorpej #endif
    187   1.50   thorpej 
    188   1.50   thorpej #define	SPINLOCK_SPINCHECK_DECL						\
    189   1.50   thorpej 	/* 32-bits of count -- wrap constitutes a "spinout" */		\
    190   1.50   thorpej 	uint32_t __spinc = 0
    191   1.50   thorpej 
    192   1.50   thorpej #define	SPINLOCK_SPINCHECK						\
    193   1.50   thorpej do {									\
    194   1.50   thorpej 	if (++__spinc == 0) {						\
    195   1.71        pk 		lock_printf("LK_SPIN spinout, excl %d, share %d\n",	\
    196   1.50   thorpej 		    lkp->lk_exclusivecount, lkp->lk_sharecount);	\
    197   1.50   thorpej 		if (lkp->lk_exclusivecount)				\
    198   1.71        pk 			lock_printf("held by CPU %lu\n",		\
    199   1.50   thorpej 			    (u_long) lkp->lk_cpu);			\
    200   1.50   thorpej 		if (lkp->lk_lock_file)					\
    201   1.71        pk 			lock_printf("last locked at %s:%d\n",		\
    202   1.50   thorpej 			    lkp->lk_lock_file, lkp->lk_lock_line);	\
    203   1.50   thorpej 		if (lkp->lk_unlock_file)				\
    204   1.71        pk 			lock_printf("last unlocked at %s:%d\n",		\
    205   1.50   thorpej 			    lkp->lk_unlock_file, lkp->lk_unlock_line);	\
    206   1.63       chs 		SLOCK_TRACE();						\
    207   1.50   thorpej 		SPINLOCK_SPINCHECK_DEBUGGER;				\
    208   1.50   thorpej 	}								\
    209   1.66     perry } while (/*CONSTCOND*/ 0)
    210   1.50   thorpej #else
    211   1.50   thorpej #define	SPINLOCK_SPINCHECK_DECL			/* nothing */
    212   1.50   thorpej #define	SPINLOCK_SPINCHECK			/* nothing */
    213   1.50   thorpej #endif /* LOCKDEBUG && DDB */
    214   1.50   thorpej 
    215   1.98        ad #define	RETURN_ADDRESS		((uintptr_t)__builtin_return_address(0))
    216   1.98        ad 
    217    1.1      fvdl /*
    218    1.1      fvdl  * Acquire a resource.
    219    1.1      fvdl  */
    220   1.73      yamt static int
    221   1.91     perry acquire(volatile struct lock **lkpp, int *s, int extflags,
    222  1.102      yamt     int drain, int wanted, uintptr_t ra)
    223   1.73      yamt {
    224   1.73      yamt 	int error;
    225   1.91     perry 	volatile struct lock *lkp = *lkpp;
    226   1.98        ad 	LOCKSTAT_TIMER(slptime);
    227  1.105        ad 	LOCKSTAT_FLAG(lsflag);
    228   1.73      yamt 
    229   1.73      yamt 	KASSERT(drain || (wanted & LK_WAIT_NONZERO) == 0);
    230   1.73      yamt 
    231   1.73      yamt 	if (extflags & LK_SPIN) {
    232   1.73      yamt 		int interlocked;
    233   1.73      yamt 
    234   1.73      yamt 		SPINLOCK_SPINCHECK_DECL;
    235   1.73      yamt 
    236   1.73      yamt 		if (!drain) {
    237   1.73      yamt 			lkp->lk_waitcount++;
    238   1.73      yamt 			lkp->lk_flags |= LK_WAIT_NONZERO;
    239   1.73      yamt 		}
    240   1.73      yamt 		for (interlocked = 1;;) {
    241   1.73      yamt 			SPINLOCK_SPINCHECK;
    242   1.73      yamt 			if ((lkp->lk_flags & wanted) != 0) {
    243   1.73      yamt 				if (interlocked) {
    244   1.74   hannken 					INTERLOCK_RELEASE(lkp, LK_SPIN, *s);
    245   1.73      yamt 					interlocked = 0;
    246   1.73      yamt 				}
    247   1.73      yamt 				SPINLOCK_SPIN_HOOK;
    248   1.73      yamt 			} else if (interlocked) {
    249   1.73      yamt 				break;
    250   1.73      yamt 			} else {
    251   1.74   hannken 				INTERLOCK_ACQUIRE(lkp, LK_SPIN, *s);
    252   1.73      yamt 				interlocked = 1;
    253   1.73      yamt 			}
    254   1.73      yamt 		}
    255   1.73      yamt 		if (!drain) {
    256   1.73      yamt 			lkp->lk_waitcount--;
    257   1.73      yamt 			if (lkp->lk_waitcount == 0)
    258   1.73      yamt 				lkp->lk_flags &= ~LK_WAIT_NONZERO;
    259   1.73      yamt 		}
    260   1.73      yamt 		KASSERT((lkp->lk_flags & wanted) == 0);
    261   1.73      yamt 		error = 0;	/* sanity */
    262   1.73      yamt 	} else {
    263  1.105        ad 		LOCKSTAT_ENTER(lsflag);
    264  1.105        ad 
    265   1.73      yamt 		for (error = 0; (lkp->lk_flags & wanted) != 0; ) {
    266   1.73      yamt 			if (drain)
    267   1.73      yamt 				lkp->lk_flags |= LK_WAITDRAIN;
    268   1.73      yamt 			else {
    269   1.73      yamt 				lkp->lk_waitcount++;
    270   1.73      yamt 				lkp->lk_flags |= LK_WAIT_NONZERO;
    271   1.73      yamt 			}
    272   1.73      yamt 			/* XXX Cast away volatile. */
    273  1.105        ad 			LOCKSTAT_START_TIMER(lsflag, slptime);
    274   1.73      yamt 			error = ltsleep(drain ?
    275   1.87  christos 			    (volatile const void *)&lkp->lk_flags :
    276   1.87  christos 			    (volatile const void *)lkp, lkp->lk_prio,
    277   1.73      yamt 			    lkp->lk_wmesg, lkp->lk_timo, &lkp->lk_interlock);
    278  1.105        ad 			LOCKSTAT_STOP_TIMER(lsflag, slptime);
    279  1.105        ad 			LOCKSTAT_EVENT_RA(lsflag, (void *)(uintptr_t)lkp,
    280  1.104        ad 			    LB_LOCKMGR | LB_SLEEP1, 1, slptime, ra);
    281   1.73      yamt 			if (!drain) {
    282   1.73      yamt 				lkp->lk_waitcount--;
    283   1.73      yamt 				if (lkp->lk_waitcount == 0)
    284   1.73      yamt 					lkp->lk_flags &= ~LK_WAIT_NONZERO;
    285   1.73      yamt 			}
    286   1.73      yamt 			if (error)
    287   1.73      yamt 				break;
    288   1.73      yamt 			if (extflags & LK_SLEEPFAIL) {
    289   1.73      yamt 				error = ENOLCK;
    290   1.73      yamt 				break;
    291   1.73      yamt 			}
    292   1.78   hannken 			if (lkp->lk_newlock != NULL) {
    293   1.78   hannken 				simple_lock(&lkp->lk_newlock->lk_interlock);
    294   1.78   hannken 				simple_unlock(&lkp->lk_interlock);
    295   1.78   hannken 				if (lkp->lk_waitcount == 0)
    296   1.87  christos 					wakeup(&lkp->lk_newlock);
    297   1.78   hannken 				*lkpp = lkp = lkp->lk_newlock;
    298   1.78   hannken 			}
    299   1.73      yamt 		}
    300  1.105        ad 
    301  1.105        ad 		LOCKSTAT_EXIT(lsflag);
    302    1.1      fvdl 	}
    303    1.1      fvdl 
    304   1.73      yamt 	return error;
    305   1.73      yamt }
    306   1.73      yamt 
    307   1.69   thorpej #define	SETHOLDER(lkp, pid, lid, cpu_id)				\
    308   1.19   thorpej do {									\
    309   1.19   thorpej 	if ((lkp)->lk_flags & LK_SPIN)					\
    310   1.19   thorpej 		(lkp)->lk_cpu = cpu_id;					\
    311   1.69   thorpej 	else {								\
    312   1.19   thorpej 		(lkp)->lk_lockholder = pid;				\
    313   1.69   thorpej 		(lkp)->lk_locklwp = lid;				\
    314   1.69   thorpej 	}								\
    315   1.30   thorpej } while (/*CONSTCOND*/0)
    316   1.19   thorpej 
    317   1.69   thorpej #define	WEHOLDIT(lkp, pid, lid, cpu_id)					\
    318   1.19   thorpej 	(((lkp)->lk_flags & LK_SPIN) != 0 ?				\
    319   1.69   thorpej 	 ((lkp)->lk_cpu == (cpu_id)) :					\
    320   1.69   thorpej 	 ((lkp)->lk_lockholder == (pid) && (lkp)->lk_locklwp == (lid)))
    321   1.19   thorpej 
    322   1.23   thorpej #define	WAKEUP_WAITER(lkp)						\
    323   1.23   thorpej do {									\
    324   1.73      yamt 	if (((lkp)->lk_flags & (LK_SPIN | LK_WAIT_NONZERO)) ==		\
    325   1.73      yamt 	    LK_WAIT_NONZERO) {						\
    326   1.87  christos 		wakeup((lkp));						\
    327   1.23   thorpej 	}								\
    328   1.30   thorpej } while (/*CONSTCOND*/0)
    329   1.23   thorpej 
    330   1.21   thorpej #if defined(LOCKDEBUG) /* { */
    331   1.21   thorpej #if defined(MULTIPROCESSOR) /* { */
    332   1.21   thorpej struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
    333   1.21   thorpej 
    334   1.27   thorpej #define	SPINLOCK_LIST_LOCK()						\
    335   1.29  sommerfe 	__cpu_simple_lock(&spinlock_list_slock.lock_data)
    336   1.21   thorpej 
    337   1.27   thorpej #define	SPINLOCK_LIST_UNLOCK()						\
    338   1.29  sommerfe 	__cpu_simple_unlock(&spinlock_list_slock.lock_data)
    339   1.21   thorpej #else
    340   1.21   thorpej #define	SPINLOCK_LIST_LOCK()	/* nothing */
    341   1.21   thorpej 
    342   1.21   thorpej #define	SPINLOCK_LIST_UNLOCK()	/* nothing */
    343   1.21   thorpej #endif /* MULTIPROCESSOR */ /* } */
    344   1.21   thorpej 
    345   1.91     perry _TAILQ_HEAD(, struct lock, volatile) spinlock_list =
    346   1.21   thorpej     TAILQ_HEAD_INITIALIZER(spinlock_list);
    347   1.21   thorpej 
    348   1.21   thorpej #define	HAVEIT(lkp)							\
    349   1.21   thorpej do {									\
    350   1.21   thorpej 	if ((lkp)->lk_flags & LK_SPIN) {				\
    351  1.114        ad 		int sp = splhigh();					\
    352   1.21   thorpej 		SPINLOCK_LIST_LOCK();					\
    353   1.87  christos 		TAILQ_INSERT_TAIL(&spinlock_list, (lkp), lk_list);	\
    354   1.21   thorpej 		SPINLOCK_LIST_UNLOCK();					\
    355   1.87  christos 		splx(sp);						\
    356   1.21   thorpej 	}								\
    357   1.30   thorpej } while (/*CONSTCOND*/0)
    358   1.21   thorpej 
    359   1.21   thorpej #define	DONTHAVEIT(lkp)							\
    360   1.21   thorpej do {									\
    361   1.21   thorpej 	if ((lkp)->lk_flags & LK_SPIN) {				\
    362  1.114        ad 		int sp = splhigh();					\
    363   1.21   thorpej 		SPINLOCK_LIST_LOCK();					\
    364   1.87  christos 		TAILQ_REMOVE(&spinlock_list, (lkp), lk_list);		\
    365   1.21   thorpej 		SPINLOCK_LIST_UNLOCK();					\
    366   1.87  christos 		splx(sp);						\
    367   1.21   thorpej 	}								\
    368   1.30   thorpej } while (/*CONSTCOND*/0)
    369   1.21   thorpej #else
    370   1.21   thorpej #define	HAVEIT(lkp)		/* nothing */
    371   1.21   thorpej 
    372   1.21   thorpej #define	DONTHAVEIT(lkp)		/* nothing */
    373   1.21   thorpej #endif /* LOCKDEBUG */ /* } */
    374   1.21   thorpej 
    375   1.25   thorpej #if defined(LOCKDEBUG)
    376   1.25   thorpej /*
    377   1.25   thorpej  * Lock debug printing routine; can be configured to print to console
    378   1.25   thorpej  * or log to syslog.
    379   1.25   thorpej  */
    380   1.25   thorpej void
    381   1.25   thorpej lock_printf(const char *fmt, ...)
    382   1.25   thorpej {
    383   1.68        pk 	char b[150];
    384   1.25   thorpej 	va_list ap;
    385   1.25   thorpej 
    386   1.25   thorpej 	va_start(ap, fmt);
    387   1.25   thorpej 	if (lock_debug_syslog)
    388   1.25   thorpej 		vlog(LOG_DEBUG, fmt, ap);
    389   1.68        pk 	else {
    390   1.68        pk 		vsnprintf(b, sizeof(b), fmt, ap);
    391   1.68        pk 		printf_nolog("%s", b);
    392   1.68        pk 	}
    393   1.25   thorpej 	va_end(ap);
    394   1.25   thorpej }
    395   1.25   thorpej #endif /* LOCKDEBUG */
    396   1.25   thorpej 
    397  1.110  christos static void
    398  1.110  christos lockpanic(volatile struct lock *lkp, const char *fmt, ...)
    399  1.110  christos {
    400  1.110  christos 	char s[150], b[150];
    401  1.110  christos #ifdef LOCKDEBUG
    402  1.110  christos 	static const char *locktype[] = {
    403  1.110  christos 	    "*0*", "shared", "exclusive", "upgrade", "exclupgrade",
    404  1.110  christos 	    "downgrade", "release", "drain", "exclother", "*9*",
    405  1.110  christos 	    "*10*", "*11*", "*12*", "*13*", "*14*", "*15*"
    406  1.110  christos 	};
    407  1.110  christos #endif
    408  1.110  christos 
    409  1.110  christos 	va_list ap;
    410  1.110  christos 	va_start(ap, fmt);
    411  1.110  christos 	vsnprintf(s, sizeof(s), fmt, ap);
    412  1.110  christos 	va_end(ap);
    413  1.110  christos 	bitmask_snprintf(lkp->lk_flags, __LK_FLAG_BITS, b, sizeof(b));
    414  1.110  christos 	panic("%s ("
    415  1.110  christos #ifdef LOCKDEBUG
    416  1.110  christos 	    "type %s "
    417  1.110  christos #endif
    418  1.110  christos 	    "flags %s, sharecount %d, exclusivecount %d, "
    419  1.110  christos 	    "recurselevel %d, waitcount %d, wmesg %s"
    420  1.110  christos #ifdef LOCKDEBUG
    421  1.110  christos 	    ", lock_file %s, unlock_file %s, lock_line %d, unlock_line %d"
    422  1.110  christos #endif
    423  1.110  christos 	    ")\n",
    424  1.110  christos 	    s,
    425  1.110  christos #ifdef LOCKDEBUG
    426  1.110  christos 	    locktype[lkp->lk_flags & LK_TYPE_MASK],
    427  1.110  christos #endif
    428  1.110  christos 	    b, lkp->lk_sharecount, lkp->lk_exclusivecount,
    429  1.110  christos 	    lkp->lk_recurselevel, lkp->lk_waitcount, lkp->lk_wmesg
    430  1.110  christos #ifdef LOCKDEBUG
    431  1.110  christos 	    , lkp->lk_lock_file, lkp->lk_unlock_file, lkp->lk_lock_line,
    432  1.110  christos 	    lkp->lk_unlock_line
    433  1.110  christos #endif
    434  1.110  christos 	);
    435  1.110  christos }
    436  1.110  christos 
    437    1.1      fvdl /*
    438   1.78   hannken  * Transfer any waiting processes from one lock to another.
    439   1.78   hannken  */
    440   1.78   hannken void
    441   1.78   hannken transferlockers(struct lock *from, struct lock *to)
    442   1.78   hannken {
    443   1.78   hannken 
    444   1.78   hannken 	KASSERT(from != to);
    445   1.78   hannken 	KASSERT((from->lk_flags & LK_WAITDRAIN) == 0);
    446   1.78   hannken 	if (from->lk_waitcount == 0)
    447   1.78   hannken 		return;
    448   1.78   hannken 	from->lk_newlock = to;
    449   1.78   hannken 	wakeup((void *)from);
    450   1.78   hannken 	tsleep((void *)&from->lk_newlock, from->lk_prio, "lkxfer", 0);
    451   1.78   hannken 	from->lk_newlock = NULL;
    452   1.78   hannken 	from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
    453   1.78   hannken 	KASSERT(from->lk_waitcount == 0);
    454   1.78   hannken }
    455   1.78   hannken 
    456   1.78   hannken 
    457   1.78   hannken /*
    458    1.1      fvdl  * Initialize a lock; required before use.
    459    1.1      fvdl  */
    460    1.1      fvdl void
    461  1.109      yamt lockinit(struct lock *lkp, pri_t prio, const char *wmesg, int timo, int flags)
    462    1.1      fvdl {
    463    1.1      fvdl 
    464    1.8     perry 	memset(lkp, 0, sizeof(struct lock));
    465    1.1      fvdl 	simple_lock_init(&lkp->lk_interlock);
    466    1.1      fvdl 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
    467   1.19   thorpej 	if (flags & LK_SPIN)
    468   1.19   thorpej 		lkp->lk_cpu = LK_NOCPU;
    469   1.19   thorpej 	else {
    470   1.19   thorpej 		lkp->lk_lockholder = LK_NOPROC;
    471   1.78   hannken 		lkp->lk_newlock = NULL;
    472   1.19   thorpej 		lkp->lk_prio = prio;
    473   1.19   thorpej 		lkp->lk_timo = timo;
    474   1.19   thorpej 	}
    475   1.19   thorpej 	lkp->lk_wmesg = wmesg;	/* just a name for spin locks */
    476   1.50   thorpej #if defined(LOCKDEBUG)
    477   1.50   thorpej 	lkp->lk_lock_file = NULL;
    478   1.50   thorpej 	lkp->lk_unlock_file = NULL;
    479   1.50   thorpej #endif
    480    1.1      fvdl }
    481    1.1      fvdl 
    482    1.1      fvdl /*
    483    1.1      fvdl  * Determine the status of a lock.
    484    1.1      fvdl  */
    485    1.1      fvdl int
    486   1.33   thorpej lockstatus(struct lock *lkp)
    487    1.1      fvdl {
    488   1.76      yamt 	int s = 0; /* XXX: gcc */
    489   1.76      yamt 	int lock_type = 0;
    490   1.76      yamt 	struct lwp *l = curlwp; /* XXX */
    491   1.76      yamt 	pid_t pid;
    492   1.76      yamt 	lwpid_t lid;
    493   1.88     blymn 	cpuid_t cpu_num;
    494   1.76      yamt 
    495   1.76      yamt 	if ((lkp->lk_flags & LK_SPIN) || l == NULL) {
    496   1.88     blymn 		cpu_num = cpu_number();
    497   1.76      yamt 		pid = LK_KERNPROC;
    498   1.76      yamt 		lid = 0;
    499   1.76      yamt 	} else {
    500   1.88     blymn 		cpu_num = LK_NOCPU;
    501   1.76      yamt 		pid = l->l_proc->p_pid;
    502   1.76      yamt 		lid = l->l_lid;
    503   1.76      yamt 	}
    504    1.1      fvdl 
    505   1.43   thorpej 	INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
    506   1.76      yamt 	if (lkp->lk_exclusivecount != 0) {
    507   1.88     blymn 		if (WEHOLDIT(lkp, pid, lid, cpu_num))
    508   1.76      yamt 			lock_type = LK_EXCLUSIVE;
    509   1.76      yamt 		else
    510   1.76      yamt 			lock_type = LK_EXCLOTHER;
    511   1.76      yamt 	} else if (lkp->lk_sharecount != 0)
    512    1.1      fvdl 		lock_type = LK_SHARED;
    513  1.103       chs 	else if (lkp->lk_flags & (LK_WANT_EXCL | LK_WANT_UPGRADE))
    514  1.103       chs 		lock_type = LK_EXCLOTHER;
    515   1.43   thorpej 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
    516    1.1      fvdl 	return (lock_type);
    517    1.1      fvdl }
    518   1.35   thorpej 
    519   1.92       chs #if defined(LOCKDEBUG)
    520   1.35   thorpej /*
    521   1.35   thorpej  * Make sure no spin locks are held by a CPU that is about
    522   1.35   thorpej  * to context switch.
    523   1.35   thorpej  */
    524   1.35   thorpej void
    525   1.35   thorpej spinlock_switchcheck(void)
    526   1.35   thorpej {
    527   1.35   thorpej 	u_long cnt;
    528   1.35   thorpej 	int s;
    529   1.35   thorpej 
    530  1.114        ad 	s = splhigh();
    531   1.35   thorpej #if defined(MULTIPROCESSOR)
    532   1.35   thorpej 	cnt = curcpu()->ci_spin_locks;
    533   1.35   thorpej #else
    534   1.35   thorpej 	cnt = spin_locks;
    535   1.35   thorpej #endif
    536   1.35   thorpej 	splx(s);
    537   1.35   thorpej 
    538   1.35   thorpej 	if (cnt != 0)
    539   1.35   thorpej 		panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
    540   1.35   thorpej 		    (u_long) cpu_number(), cnt);
    541   1.35   thorpej }
    542   1.92       chs #endif /* LOCKDEBUG */
    543    1.1      fvdl 
    544    1.1      fvdl /*
    545   1.44   thorpej  * Locks and IPLs (interrupt priority levels):
    546   1.44   thorpej  *
    547   1.44   thorpej  * Locks which may be taken from interrupt context must be handled
    548   1.44   thorpej  * very carefully; you must spl to the highest IPL where the lock
    549   1.44   thorpej  * is needed before acquiring the lock.
    550   1.44   thorpej  *
    551   1.44   thorpej  * It is also important to avoid deadlock, since certain (very high
    552   1.44   thorpej  * priority) interrupts are often needed to keep the system as a whole
    553   1.44   thorpej  * from deadlocking, and must not be blocked while you are spinning
    554   1.44   thorpej  * waiting for a lower-priority lock.
    555   1.44   thorpej  *
    556   1.44   thorpej  * In addition, the lock-debugging hooks themselves need to use locks!
    557   1.44   thorpej  *
    558   1.44   thorpej  * A raw __cpu_simple_lock may be used from interrupts are long as it
    559   1.44   thorpej  * is acquired and held at a single IPL.
    560   1.44   thorpej  */
    561   1.44   thorpej 
    562   1.44   thorpej /*
    563   1.32  sommerfe  * XXX XXX kludge around another kludge..
    564   1.32  sommerfe  *
    565   1.32  sommerfe  * vfs_shutdown() may be called from interrupt context, either as a result
    566   1.32  sommerfe  * of a panic, or from the debugger.   It proceeds to call
    567   1.32  sommerfe  * sys_sync(&proc0, ...), pretending its running on behalf of proc0
    568   1.32  sommerfe  *
    569   1.32  sommerfe  * We would like to make an attempt to sync the filesystems in this case, so
    570   1.32  sommerfe  * if this happens, we treat attempts to acquire locks specially.
    571   1.32  sommerfe  * All locks are acquired on behalf of proc0.
    572   1.32  sommerfe  *
    573   1.32  sommerfe  * If we've already paniced, we don't block waiting for locks, but
    574   1.32  sommerfe  * just barge right ahead since we're already going down in flames.
    575   1.32  sommerfe  */
    576   1.32  sommerfe 
    577   1.32  sommerfe /*
    578    1.1      fvdl  * Set, change, or release a lock.
    579    1.1      fvdl  *
    580    1.1      fvdl  * Shared requests increment the shared count. Exclusive requests set the
    581    1.1      fvdl  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
    582    1.1      fvdl  * accepted shared locks and shared-to-exclusive upgrades to go away.
    583    1.1      fvdl  */
    584    1.1      fvdl int
    585   1.50   thorpej #if defined(LOCKDEBUG)
    586   1.91     perry _lockmgr(volatile struct lock *lkp, u_int flags,
    587   1.50   thorpej     struct simplelock *interlkp, const char *file, int line)
    588   1.50   thorpej #else
    589   1.91     perry lockmgr(volatile struct lock *lkp, u_int flags,
    590   1.33   thorpej     struct simplelock *interlkp)
    591   1.50   thorpej #endif
    592    1.1      fvdl {
    593    1.1      fvdl 	int error;
    594    1.1      fvdl 	pid_t pid;
    595   1.69   thorpej 	lwpid_t lid;
    596    1.1      fvdl 	int extflags;
    597   1.88     blymn 	cpuid_t cpu_num;
    598   1.69   thorpej 	struct lwp *l = curlwp;
    599   1.32  sommerfe 	int lock_shutdown_noblock = 0;
    600   1.67       scw 	int s = 0;
    601    1.1      fvdl 
    602    1.1      fvdl 	error = 0;
    603   1.19   thorpej 
    604   1.80      yamt 	/* LK_RETRY is for vn_lock, not for lockmgr. */
    605   1.79      yamt 	KASSERT((flags & LK_RETRY) == 0);
    606   1.79      yamt 
    607   1.43   thorpej 	INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
    608    1.1      fvdl 	if (flags & LK_INTERLOCK)
    609    1.1      fvdl 		simple_unlock(interlkp);
    610    1.1      fvdl 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
    611   1.19   thorpej 
    612   1.21   thorpej #ifdef DIAGNOSTIC /* { */
    613   1.19   thorpej 	/*
    614   1.19   thorpej 	 * Don't allow spins on sleep locks and don't allow sleeps
    615   1.19   thorpej 	 * on spin locks.
    616   1.19   thorpej 	 */
    617   1.19   thorpej 	if ((flags ^ lkp->lk_flags) & LK_SPIN)
    618  1.110  christos 		lockpanic(lkp, "lockmgr: sleep/spin mismatch");
    619   1.21   thorpej #endif /* } */
    620   1.19   thorpej 
    621   1.69   thorpej 	if (extflags & LK_SPIN) {
    622   1.19   thorpej 		pid = LK_KERNPROC;
    623   1.69   thorpej 		lid = 0;
    624   1.69   thorpej 	} else {
    625   1.69   thorpej 		if (l == NULL) {
    626   1.32  sommerfe 			if (!doing_shutdown) {
    627   1.32  sommerfe 				panic("lockmgr: no context");
    628   1.32  sommerfe 			} else {
    629   1.69   thorpej 				l = &lwp0;
    630   1.32  sommerfe 				if (panicstr && (!(flags & LK_NOWAIT))) {
    631   1.32  sommerfe 					flags |= LK_NOWAIT;
    632   1.32  sommerfe 					lock_shutdown_noblock = 1;
    633   1.32  sommerfe 				}
    634   1.32  sommerfe 			}
    635   1.32  sommerfe 		}
    636   1.69   thorpej 		lid = l->l_lid;
    637   1.69   thorpej 		pid = l->l_proc->p_pid;
    638   1.19   thorpej 	}
    639   1.88     blymn 	cpu_num = cpu_number();
    640   1.19   thorpej 
    641    1.1      fvdl 	/*
    642    1.1      fvdl 	 * Once a lock has drained, the LK_DRAINING flag is set and an
    643    1.1      fvdl 	 * exclusive lock is returned. The only valid operation thereafter
    644    1.1      fvdl 	 * is a single release of that exclusive lock. This final release
    645    1.1      fvdl 	 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
    646    1.1      fvdl 	 * further requests of any sort will result in a panic. The bits
    647    1.1      fvdl 	 * selected for these two flags are chosen so that they will be set
    648    1.1      fvdl 	 * in memory that is freed (freed memory is filled with 0xdeadbeef).
    649    1.1      fvdl 	 * The final release is permitted to give a new lease on life to
    650    1.1      fvdl 	 * the lock by specifying LK_REENABLE.
    651    1.1      fvdl 	 */
    652    1.1      fvdl 	if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
    653   1.28   thorpej #ifdef DIAGNOSTIC /* { */
    654    1.1      fvdl 		if (lkp->lk_flags & LK_DRAINED)
    655  1.110  christos 			lockpanic(lkp, "lockmgr: using decommissioned lock");
    656    1.1      fvdl 		if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
    657   1.88     blymn 		    WEHOLDIT(lkp, pid, lid, cpu_num) == 0)
    658  1.110  christos 			lockpanic(lkp, "lockmgr: non-release on draining lock: %d",
    659    1.1      fvdl 			    flags & LK_TYPE_MASK);
    660   1.28   thorpej #endif /* DIAGNOSTIC */ /* } */
    661    1.1      fvdl 		lkp->lk_flags &= ~LK_DRAINING;
    662    1.1      fvdl 		if ((flags & LK_REENABLE) == 0)
    663    1.1      fvdl 			lkp->lk_flags |= LK_DRAINED;
    664    1.1      fvdl 	}
    665    1.1      fvdl 
    666    1.1      fvdl 	switch (flags & LK_TYPE_MASK) {
    667    1.1      fvdl 
    668    1.1      fvdl 	case LK_SHARED:
    669   1.88     blymn 		if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0) {
    670    1.1      fvdl 			/*
    671    1.1      fvdl 			 * If just polling, check to see if we will block.
    672    1.1      fvdl 			 */
    673    1.1      fvdl 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
    674    1.1      fvdl 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
    675    1.1      fvdl 				error = EBUSY;
    676    1.1      fvdl 				break;
    677    1.1      fvdl 			}
    678    1.1      fvdl 			/*
    679    1.1      fvdl 			 * Wait for exclusive locks and upgrades to clear.
    680    1.1      fvdl 			 */
    681   1.78   hannken 			error = acquire(&lkp, &s, extflags, 0,
    682   1.98        ad 			    LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE,
    683   1.98        ad 			    RETURN_ADDRESS);
    684    1.1      fvdl 			if (error)
    685    1.1      fvdl 				break;
    686    1.1      fvdl 			lkp->lk_sharecount++;
    687   1.73      yamt 			lkp->lk_flags |= LK_SHARE_NONZERO;
    688   1.88     blymn 			COUNT(lkp, l, cpu_num, 1);
    689    1.1      fvdl 			break;
    690    1.1      fvdl 		}
    691    1.1      fvdl 		/*
    692    1.1      fvdl 		 * We hold an exclusive lock, so downgrade it to shared.
    693    1.1      fvdl 		 * An alternative would be to fail with EDEADLK.
    694    1.1      fvdl 		 */
    695    1.1      fvdl 		lkp->lk_sharecount++;
    696   1.73      yamt 		lkp->lk_flags |= LK_SHARE_NONZERO;
    697   1.88     blymn 		COUNT(lkp, l, cpu_num, 1);
    698    1.1      fvdl 		/* fall into downgrade */
    699    1.1      fvdl 
    700    1.1      fvdl 	case LK_DOWNGRADE:
    701   1.88     blymn 		if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0 ||
    702   1.19   thorpej 		    lkp->lk_exclusivecount == 0)
    703  1.110  christos 			lockpanic(lkp, "lockmgr: not holding exclusive lock");
    704    1.1      fvdl 		lkp->lk_sharecount += lkp->lk_exclusivecount;
    705   1.73      yamt 		lkp->lk_flags |= LK_SHARE_NONZERO;
    706    1.1      fvdl 		lkp->lk_exclusivecount = 0;
    707   1.15      fvdl 		lkp->lk_recurselevel = 0;
    708    1.1      fvdl 		lkp->lk_flags &= ~LK_HAVE_EXCL;
    709   1.69   thorpej 		SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
    710   1.50   thorpej #if defined(LOCKDEBUG)
    711   1.50   thorpej 		lkp->lk_unlock_file = file;
    712   1.50   thorpej 		lkp->lk_unlock_line = line;
    713   1.50   thorpej #endif
    714   1.21   thorpej 		DONTHAVEIT(lkp);
    715   1.23   thorpej 		WAKEUP_WAITER(lkp);
    716    1.1      fvdl 		break;
    717    1.1      fvdl 
    718    1.1      fvdl 	case LK_EXCLUPGRADE:
    719    1.1      fvdl 		/*
    720    1.1      fvdl 		 * If another process is ahead of us to get an upgrade,
    721    1.1      fvdl 		 * then we want to fail rather than have an intervening
    722    1.1      fvdl 		 * exclusive access.
    723    1.1      fvdl 		 */
    724    1.1      fvdl 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
    725    1.1      fvdl 			lkp->lk_sharecount--;
    726   1.73      yamt 			if (lkp->lk_sharecount == 0)
    727   1.73      yamt 				lkp->lk_flags &= ~LK_SHARE_NONZERO;
    728   1.88     blymn 			COUNT(lkp, l, cpu_num, -1);
    729    1.1      fvdl 			error = EBUSY;
    730    1.1      fvdl 			break;
    731    1.1      fvdl 		}
    732    1.1      fvdl 		/* fall into normal upgrade */
    733    1.1      fvdl 
    734    1.1      fvdl 	case LK_UPGRADE:
    735    1.1      fvdl 		/*
    736    1.1      fvdl 		 * Upgrade a shared lock to an exclusive one. If another
    737    1.1      fvdl 		 * shared lock has already requested an upgrade to an
    738    1.1      fvdl 		 * exclusive lock, our shared lock is released and an
    739    1.1      fvdl 		 * exclusive lock is requested (which will be granted
    740    1.1      fvdl 		 * after the upgrade). If we return an error, the file
    741    1.1      fvdl 		 * will always be unlocked.
    742    1.1      fvdl 		 */
    743   1.88     blymn 		if (WEHOLDIT(lkp, pid, lid, cpu_num) || lkp->lk_sharecount <= 0)
    744  1.110  christos 			lockpanic(lkp, "lockmgr: upgrade exclusive lock");
    745    1.1      fvdl 		lkp->lk_sharecount--;
    746   1.73      yamt 		if (lkp->lk_sharecount == 0)
    747   1.73      yamt 			lkp->lk_flags &= ~LK_SHARE_NONZERO;
    748   1.88     blymn 		COUNT(lkp, l, cpu_num, -1);
    749    1.1      fvdl 		/*
    750    1.1      fvdl 		 * If we are just polling, check to see if we will block.
    751    1.1      fvdl 		 */
    752    1.1      fvdl 		if ((extflags & LK_NOWAIT) &&
    753    1.1      fvdl 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
    754    1.1      fvdl 		     lkp->lk_sharecount > 1)) {
    755    1.1      fvdl 			error = EBUSY;
    756    1.1      fvdl 			break;
    757    1.1      fvdl 		}
    758    1.1      fvdl 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
    759    1.1      fvdl 			/*
    760    1.1      fvdl 			 * We are first shared lock to request an upgrade, so
    761    1.1      fvdl 			 * request upgrade and wait for the shared count to
    762    1.1      fvdl 			 * drop to zero, then take exclusive lock.
    763    1.1      fvdl 			 */
    764    1.1      fvdl 			lkp->lk_flags |= LK_WANT_UPGRADE;
    765   1.98        ad 			error = acquire(&lkp, &s, extflags, 0, LK_SHARE_NONZERO,
    766   1.98        ad 			    RETURN_ADDRESS);
    767    1.1      fvdl 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
    768   1.83      yamt 			if (error) {
    769   1.83      yamt 				WAKEUP_WAITER(lkp);
    770    1.1      fvdl 				break;
    771   1.83      yamt 			}
    772    1.1      fvdl 			lkp->lk_flags |= LK_HAVE_EXCL;
    773   1.88     blymn 			SETHOLDER(lkp, pid, lid, cpu_num);
    774   1.50   thorpej #if defined(LOCKDEBUG)
    775   1.50   thorpej 			lkp->lk_lock_file = file;
    776   1.50   thorpej 			lkp->lk_lock_line = line;
    777   1.50   thorpej #endif
    778   1.21   thorpej 			HAVEIT(lkp);
    779    1.1      fvdl 			if (lkp->lk_exclusivecount != 0)
    780  1.110  christos 				lockpanic(lkp, "lockmgr: non-zero exclusive count");
    781    1.1      fvdl 			lkp->lk_exclusivecount = 1;
    782   1.15      fvdl 			if (extflags & LK_SETRECURSE)
    783   1.15      fvdl 				lkp->lk_recurselevel = 1;
    784   1.88     blymn 			COUNT(lkp, l, cpu_num, 1);
    785    1.1      fvdl 			break;
    786    1.1      fvdl 		}
    787    1.1      fvdl 		/*
    788    1.1      fvdl 		 * Someone else has requested upgrade. Release our shared
    789    1.1      fvdl 		 * lock, awaken upgrade requestor if we are the last shared
    790    1.1      fvdl 		 * lock, then request an exclusive lock.
    791    1.1      fvdl 		 */
    792   1.23   thorpej 		if (lkp->lk_sharecount == 0)
    793   1.23   thorpej 			WAKEUP_WAITER(lkp);
    794    1.1      fvdl 		/* fall into exclusive request */
    795    1.1      fvdl 
    796    1.1      fvdl 	case LK_EXCLUSIVE:
    797   1.88     blymn 		if (WEHOLDIT(lkp, pid, lid, cpu_num)) {
    798    1.1      fvdl 			/*
    799   1.19   thorpej 			 * Recursive lock.
    800    1.1      fvdl 			 */
    801   1.15      fvdl 			if ((extflags & LK_CANRECURSE) == 0 &&
    802   1.16  sommerfe 			     lkp->lk_recurselevel == 0) {
    803   1.16  sommerfe 				if (extflags & LK_RECURSEFAIL) {
    804   1.16  sommerfe 					error = EDEADLK;
    805   1.16  sommerfe 					break;
    806   1.16  sommerfe 				} else
    807  1.110  christos 					lockpanic(lkp, "lockmgr: locking against myself");
    808   1.16  sommerfe 			}
    809    1.1      fvdl 			lkp->lk_exclusivecount++;
    810   1.15      fvdl 			if (extflags & LK_SETRECURSE &&
    811   1.15      fvdl 			    lkp->lk_recurselevel == 0)
    812   1.15      fvdl 				lkp->lk_recurselevel = lkp->lk_exclusivecount;
    813   1.88     blymn 			COUNT(lkp, l, cpu_num, 1);
    814    1.1      fvdl 			break;
    815    1.1      fvdl 		}
    816    1.1      fvdl 		/*
    817    1.1      fvdl 		 * If we are just polling, check to see if we will sleep.
    818    1.1      fvdl 		 */
    819   1.73      yamt 		if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
    820   1.73      yamt 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
    821   1.73      yamt 		     LK_SHARE_NONZERO))) {
    822    1.1      fvdl 			error = EBUSY;
    823    1.1      fvdl 			break;
    824    1.1      fvdl 		}
    825    1.1      fvdl 		/*
    826    1.1      fvdl 		 * Try to acquire the want_exclusive flag.
    827    1.1      fvdl 		 */
    828   1.82      yamt 		error = acquire(&lkp, &s, extflags, 0,
    829   1.98        ad 		    LK_HAVE_EXCL | LK_WANT_EXCL, RETURN_ADDRESS);
    830    1.1      fvdl 		if (error)
    831    1.1      fvdl 			break;
    832    1.1      fvdl 		lkp->lk_flags |= LK_WANT_EXCL;
    833    1.1      fvdl 		/*
    834    1.1      fvdl 		 * Wait for shared locks and upgrades to finish.
    835    1.1      fvdl 		 */
    836   1.78   hannken 		error = acquire(&lkp, &s, extflags, 0,
    837   1.98        ad 		    LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO,
    838   1.98        ad 		    RETURN_ADDRESS);
    839    1.1      fvdl 		lkp->lk_flags &= ~LK_WANT_EXCL;
    840   1.83      yamt 		if (error) {
    841   1.83      yamt 			WAKEUP_WAITER(lkp);
    842    1.1      fvdl 			break;
    843   1.83      yamt 		}
    844    1.1      fvdl 		lkp->lk_flags |= LK_HAVE_EXCL;
    845   1.88     blymn 		SETHOLDER(lkp, pid, lid, cpu_num);
    846   1.50   thorpej #if defined(LOCKDEBUG)
    847   1.50   thorpej 		lkp->lk_lock_file = file;
    848   1.50   thorpej 		lkp->lk_lock_line = line;
    849   1.50   thorpej #endif
    850   1.21   thorpej 		HAVEIT(lkp);
    851    1.1      fvdl 		if (lkp->lk_exclusivecount != 0)
    852  1.110  christos 			lockpanic(lkp, "lockmgr: non-zero exclusive count");
    853    1.1      fvdl 		lkp->lk_exclusivecount = 1;
    854   1.15      fvdl 		if (extflags & LK_SETRECURSE)
    855   1.15      fvdl 			lkp->lk_recurselevel = 1;
    856   1.88     blymn 		COUNT(lkp, l, cpu_num, 1);
    857    1.1      fvdl 		break;
    858    1.1      fvdl 
    859    1.1      fvdl 	case LK_RELEASE:
    860    1.1      fvdl 		if (lkp->lk_exclusivecount != 0) {
    861   1.88     blymn 			if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0) {
    862   1.19   thorpej 				if (lkp->lk_flags & LK_SPIN) {
    863  1.110  christos 					lockpanic(lkp,
    864  1.110  christos 					    "lockmgr: processor %lu, not "
    865   1.19   thorpej 					    "exclusive lock holder %lu "
    866   1.88     blymn 					    "unlocking", cpu_num, lkp->lk_cpu);
    867   1.19   thorpej 				} else {
    868  1.112  perseant 					lockpanic(lkp, "lockmgr: pid %d.%d, not "
    869  1.112  perseant 					    "exclusive lock holder %d.%d "
    870  1.112  perseant 					    "unlocking", pid, lid,
    871  1.112  perseant 					    lkp->lk_lockholder,
    872  1.112  perseant 					    lkp->lk_locklwp);
    873   1.19   thorpej 				}
    874   1.19   thorpej 			}
    875   1.15      fvdl 			if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
    876   1.15      fvdl 				lkp->lk_recurselevel = 0;
    877    1.1      fvdl 			lkp->lk_exclusivecount--;
    878   1.88     blymn 			COUNT(lkp, l, cpu_num, -1);
    879    1.1      fvdl 			if (lkp->lk_exclusivecount == 0) {
    880    1.1      fvdl 				lkp->lk_flags &= ~LK_HAVE_EXCL;
    881   1.69   thorpej 				SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
    882   1.50   thorpej #if defined(LOCKDEBUG)
    883   1.50   thorpej 				lkp->lk_unlock_file = file;
    884   1.50   thorpej 				lkp->lk_unlock_line = line;
    885   1.50   thorpej #endif
    886   1.21   thorpej 				DONTHAVEIT(lkp);
    887    1.1      fvdl 			}
    888    1.1      fvdl 		} else if (lkp->lk_sharecount != 0) {
    889    1.1      fvdl 			lkp->lk_sharecount--;
    890   1.73      yamt 			if (lkp->lk_sharecount == 0)
    891   1.73      yamt 				lkp->lk_flags &= ~LK_SHARE_NONZERO;
    892   1.88     blymn 			COUNT(lkp, l, cpu_num, -1);
    893    1.1      fvdl 		}
    894   1.39   thorpej #ifdef DIAGNOSTIC
    895   1.39   thorpej 		else
    896  1.110  christos 			lockpanic(lkp, "lockmgr: release of unlocked lock!");
    897   1.39   thorpej #endif
    898   1.23   thorpej 		WAKEUP_WAITER(lkp);
    899    1.1      fvdl 		break;
    900    1.1      fvdl 
    901    1.1      fvdl 	case LK_DRAIN:
    902    1.1      fvdl 		/*
    903   1.86     perry 		 * Check that we do not already hold the lock, as it can
    904    1.1      fvdl 		 * never drain if we do. Unfortunately, we have no way to
    905    1.1      fvdl 		 * check for holding a shared lock, but at least we can
    906    1.1      fvdl 		 * check for an exclusive one.
    907    1.1      fvdl 		 */
    908   1.88     blymn 		if (WEHOLDIT(lkp, pid, lid, cpu_num))
    909  1.110  christos 			lockpanic(lkp, "lockmgr: draining against myself");
    910    1.1      fvdl 		/*
    911    1.1      fvdl 		 * If we are just polling, check to see if we will sleep.
    912    1.1      fvdl 		 */
    913   1.73      yamt 		if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
    914   1.73      yamt 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
    915   1.73      yamt 		     LK_SHARE_NONZERO | LK_WAIT_NONZERO))) {
    916    1.1      fvdl 			error = EBUSY;
    917    1.1      fvdl 			break;
    918    1.1      fvdl 		}
    919   1.78   hannken 		error = acquire(&lkp, &s, extflags, 1,
    920   1.73      yamt 		    LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
    921   1.98        ad 		    LK_SHARE_NONZERO | LK_WAIT_NONZERO,
    922   1.98        ad 		    RETURN_ADDRESS);
    923   1.23   thorpej 		if (error)
    924   1.23   thorpej 			break;
    925    1.1      fvdl 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
    926   1.88     blymn 		SETHOLDER(lkp, pid, lid, cpu_num);
    927   1.50   thorpej #if defined(LOCKDEBUG)
    928   1.50   thorpej 		lkp->lk_lock_file = file;
    929   1.50   thorpej 		lkp->lk_lock_line = line;
    930   1.50   thorpej #endif
    931   1.21   thorpej 		HAVEIT(lkp);
    932    1.1      fvdl 		lkp->lk_exclusivecount = 1;
    933   1.15      fvdl 		/* XXX unlikely that we'd want this */
    934   1.15      fvdl 		if (extflags & LK_SETRECURSE)
    935   1.15      fvdl 			lkp->lk_recurselevel = 1;
    936   1.88     blymn 		COUNT(lkp, l, cpu_num, 1);
    937    1.1      fvdl 		break;
    938    1.1      fvdl 
    939    1.1      fvdl 	default:
    940   1.43   thorpej 		INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
    941  1.110  christos 		lockpanic(lkp, "lockmgr: unknown locktype request %d",
    942    1.1      fvdl 		    flags & LK_TYPE_MASK);
    943    1.1      fvdl 		/* NOTREACHED */
    944    1.1      fvdl 	}
    945   1.23   thorpej 	if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
    946   1.23   thorpej 	    ((lkp->lk_flags &
    947   1.73      yamt 	      (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
    948   1.73      yamt 	      LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0)) {
    949    1.1      fvdl 		lkp->lk_flags &= ~LK_WAITDRAIN;
    950   1.87  christos 		wakeup(&lkp->lk_flags);
    951    1.1      fvdl 	}
    952   1.32  sommerfe 	/*
    953   1.32  sommerfe 	 * Note that this panic will be a recursive panic, since
    954   1.32  sommerfe 	 * we only set lock_shutdown_noblock above if panicstr != NULL.
    955   1.32  sommerfe 	 */
    956   1.32  sommerfe 	if (error && lock_shutdown_noblock)
    957  1.110  christos 		lockpanic(lkp, "lockmgr: deadlock (see previous panic)");
    958   1.86     perry 
    959   1.43   thorpej 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
    960    1.1      fvdl 	return (error);
    961    1.1      fvdl }
    962    1.1      fvdl 
    963    1.1      fvdl /*
    964   1.47  sommerfe  * For a recursive spinlock held one or more times by the current CPU,
    965   1.47  sommerfe  * release all N locks, and return N.
    966   1.47  sommerfe  * Intended for use in mi_switch() shortly before context switching.
    967   1.47  sommerfe  */
    968   1.47  sommerfe 
    969   1.47  sommerfe int
    970   1.50   thorpej #if defined(LOCKDEBUG)
    971   1.91     perry _spinlock_release_all(volatile struct lock *lkp, const char *file, int line)
    972   1.50   thorpej #else
    973   1.91     perry spinlock_release_all(volatile struct lock *lkp)
    974   1.50   thorpej #endif
    975   1.47  sommerfe {
    976   1.47  sommerfe 	int s, count;
    977   1.88     blymn 	cpuid_t cpu_num;
    978   1.86     perry 
    979   1.47  sommerfe 	KASSERT(lkp->lk_flags & LK_SPIN);
    980   1.86     perry 
    981   1.47  sommerfe 	INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
    982   1.47  sommerfe 
    983   1.88     blymn 	cpu_num = cpu_number();
    984   1.47  sommerfe 	count = lkp->lk_exclusivecount;
    985   1.86     perry 
    986   1.47  sommerfe 	if (count != 0) {
    987   1.86     perry #ifdef DIAGNOSTIC
    988   1.88     blymn 		if (WEHOLDIT(lkp, 0, 0, cpu_num) == 0) {
    989  1.110  christos 			lockpanic(lkp, "spinlock_release_all: processor %lu, not "
    990   1.47  sommerfe 			    "exclusive lock holder %lu "
    991   1.88     blymn 			    "unlocking", (long)cpu_num, lkp->lk_cpu);
    992   1.47  sommerfe 		}
    993   1.47  sommerfe #endif
    994   1.47  sommerfe 		lkp->lk_recurselevel = 0;
    995   1.47  sommerfe 		lkp->lk_exclusivecount = 0;
    996   1.88     blymn 		COUNT_CPU(cpu_num, -count);
    997   1.47  sommerfe 		lkp->lk_flags &= ~LK_HAVE_EXCL;
    998   1.69   thorpej 		SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
    999   1.50   thorpej #if defined(LOCKDEBUG)
   1000   1.50   thorpej 		lkp->lk_unlock_file = file;
   1001   1.50   thorpej 		lkp->lk_unlock_line = line;
   1002   1.50   thorpej #endif
   1003   1.47  sommerfe 		DONTHAVEIT(lkp);
   1004   1.47  sommerfe 	}
   1005   1.47  sommerfe #ifdef DIAGNOSTIC
   1006   1.47  sommerfe 	else if (lkp->lk_sharecount != 0)
   1007  1.110  christos 		lockpanic(lkp, "spinlock_release_all: release of shared lock!");
   1008   1.47  sommerfe 	else
   1009  1.110  christos 		lockpanic(lkp, "spinlock_release_all: release of unlocked lock!");
   1010   1.47  sommerfe #endif
   1011   1.86     perry 	INTERLOCK_RELEASE(lkp, LK_SPIN, s);
   1012   1.47  sommerfe 
   1013   1.47  sommerfe 	return (count);
   1014   1.47  sommerfe }
   1015   1.47  sommerfe 
   1016   1.47  sommerfe /*
   1017   1.47  sommerfe  * For a recursive spinlock held one or more times by the current CPU,
   1018   1.47  sommerfe  * release all N locks, and return N.
   1019   1.47  sommerfe  * Intended for use in mi_switch() right after resuming execution.
   1020   1.47  sommerfe  */
   1021   1.47  sommerfe 
   1022   1.47  sommerfe void
   1023   1.50   thorpej #if defined(LOCKDEBUG)
   1024   1.91     perry _spinlock_acquire_count(volatile struct lock *lkp, int count,
   1025   1.50   thorpej     const char *file, int line)
   1026   1.50   thorpej #else
   1027   1.91     perry spinlock_acquire_count(volatile struct lock *lkp, int count)
   1028   1.50   thorpej #endif
   1029   1.47  sommerfe {
   1030   1.47  sommerfe 	int s, error;
   1031   1.88     blymn 	cpuid_t cpu_num;
   1032   1.86     perry 
   1033   1.47  sommerfe 	KASSERT(lkp->lk_flags & LK_SPIN);
   1034   1.86     perry 
   1035   1.47  sommerfe 	INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
   1036   1.47  sommerfe 
   1037   1.88     blymn 	cpu_num = cpu_number();
   1038   1.47  sommerfe 
   1039   1.47  sommerfe #ifdef DIAGNOSTIC
   1040   1.88     blymn 	if (WEHOLDIT(lkp, LK_NOPROC, 0, cpu_num))
   1041  1.110  christos 		lockpanic(lkp, "spinlock_acquire_count: processor %lu already holds lock", (long)cpu_num);
   1042   1.47  sommerfe #endif
   1043   1.47  sommerfe 	/*
   1044   1.47  sommerfe 	 * Try to acquire the want_exclusive flag.
   1045   1.47  sommerfe 	 */
   1046   1.98        ad 	error = acquire(&lkp, &s, LK_SPIN, 0, LK_HAVE_EXCL | LK_WANT_EXCL,
   1047   1.98        ad 	    RETURN_ADDRESS);
   1048   1.47  sommerfe 	lkp->lk_flags |= LK_WANT_EXCL;
   1049   1.47  sommerfe 	/*
   1050   1.47  sommerfe 	 * Wait for shared locks and upgrades to finish.
   1051   1.47  sommerfe 	 */
   1052   1.78   hannken 	error = acquire(&lkp, &s, LK_SPIN, 0,
   1053   1.98        ad 	    LK_HAVE_EXCL | LK_SHARE_NONZERO | LK_WANT_UPGRADE,
   1054   1.98        ad 	    RETURN_ADDRESS);
   1055   1.47  sommerfe 	lkp->lk_flags &= ~LK_WANT_EXCL;
   1056   1.47  sommerfe 	lkp->lk_flags |= LK_HAVE_EXCL;
   1057   1.88     blymn 	SETHOLDER(lkp, LK_NOPROC, 0, cpu_num);
   1058   1.50   thorpej #if defined(LOCKDEBUG)
   1059   1.50   thorpej 	lkp->lk_lock_file = file;
   1060   1.50   thorpej 	lkp->lk_lock_line = line;
   1061   1.50   thorpej #endif
   1062   1.47  sommerfe 	HAVEIT(lkp);
   1063   1.47  sommerfe 	if (lkp->lk_exclusivecount != 0)
   1064  1.110  christos 		lockpanic(lkp, "lockmgr: non-zero exclusive count");
   1065   1.47  sommerfe 	lkp->lk_exclusivecount = count;
   1066   1.47  sommerfe 	lkp->lk_recurselevel = 1;
   1067   1.88     blymn 	COUNT_CPU(cpu_num, count);
   1068   1.47  sommerfe 
   1069   1.86     perry 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
   1070   1.47  sommerfe }
   1071   1.47  sommerfe 
   1072   1.47  sommerfe 
   1073   1.47  sommerfe 
   1074   1.47  sommerfe /*
   1075    1.1      fvdl  * Print out information about state of a lock. Used by VOP_PRINT
   1076    1.1      fvdl  * routines to display ststus about contained locks.
   1077    1.1      fvdl  */
   1078    1.2      fvdl void
   1079   1.91     perry lockmgr_printinfo(volatile struct lock *lkp)
   1080    1.1      fvdl {
   1081    1.1      fvdl 
   1082    1.1      fvdl 	if (lkp->lk_sharecount)
   1083    1.1      fvdl 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
   1084    1.1      fvdl 		    lkp->lk_sharecount);
   1085   1.19   thorpej 	else if (lkp->lk_flags & LK_HAVE_EXCL) {
   1086   1.19   thorpej 		printf(" lock type %s: EXCL (count %d) by ",
   1087   1.19   thorpej 		    lkp->lk_wmesg, lkp->lk_exclusivecount);
   1088   1.19   thorpej 		if (lkp->lk_flags & LK_SPIN)
   1089   1.19   thorpej 			printf("processor %lu", lkp->lk_cpu);
   1090   1.19   thorpej 		else
   1091   1.69   thorpej 			printf("pid %d.%d", lkp->lk_lockholder,
   1092   1.69   thorpej 			    lkp->lk_locklwp);
   1093   1.19   thorpej 	} else
   1094   1.19   thorpej 		printf(" not locked");
   1095   1.19   thorpej 	if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
   1096    1.1      fvdl 		printf(" with %d pending", lkp->lk_waitcount);
   1097    1.1      fvdl }
   1098    1.1      fvdl 
   1099   1.21   thorpej #if defined(LOCKDEBUG) /* { */
   1100   1.91     perry _TAILQ_HEAD(, struct simplelock, volatile) simplelock_list =
   1101   1.21   thorpej     TAILQ_HEAD_INITIALIZER(simplelock_list);
   1102   1.21   thorpej 
   1103   1.21   thorpej #if defined(MULTIPROCESSOR) /* { */
   1104   1.21   thorpej struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
   1105   1.21   thorpej 
   1106   1.21   thorpej #define	SLOCK_LIST_LOCK()						\
   1107   1.29  sommerfe 	__cpu_simple_lock(&simplelock_list_slock.lock_data)
   1108   1.21   thorpej 
   1109   1.21   thorpej #define	SLOCK_LIST_UNLOCK()						\
   1110   1.29  sommerfe 	__cpu_simple_unlock(&simplelock_list_slock.lock_data)
   1111   1.21   thorpej 
   1112   1.21   thorpej #define	SLOCK_COUNT(x)							\
   1113   1.47  sommerfe 	curcpu()->ci_simple_locks += (x)
   1114   1.21   thorpej #else
   1115   1.21   thorpej u_long simple_locks;
   1116   1.21   thorpej 
   1117   1.21   thorpej #define	SLOCK_LIST_LOCK()	/* nothing */
   1118   1.21   thorpej 
   1119   1.21   thorpej #define	SLOCK_LIST_UNLOCK()	/* nothing */
   1120   1.21   thorpej 
   1121   1.21   thorpej #define	SLOCK_COUNT(x)		simple_locks += (x)
   1122   1.21   thorpej #endif /* MULTIPROCESSOR */ /* } */
   1123   1.21   thorpej 
   1124   1.26  sommerfe #ifdef MULTIPROCESSOR
   1125   1.75       wiz #define SLOCK_MP()		lock_printf("on CPU %ld\n", 		\
   1126   1.46   thorpej 				    (u_long) cpu_number())
   1127   1.26  sommerfe #else
   1128   1.26  sommerfe #define SLOCK_MP()		/* nothing */
   1129   1.26  sommerfe #endif
   1130   1.26  sommerfe 
   1131   1.21   thorpej #define	SLOCK_WHERE(str, alp, id, l)					\
   1132   1.21   thorpej do {									\
   1133   1.58       chs 	lock_printf("\n");						\
   1134   1.25   thorpej 	lock_printf(str);						\
   1135   1.33   thorpej 	lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
   1136   1.26  sommerfe 	SLOCK_MP();							\
   1137   1.21   thorpej 	if ((alp)->lock_file != NULL)					\
   1138   1.25   thorpej 		lock_printf("last locked: %s:%d\n", (alp)->lock_file,	\
   1139   1.21   thorpej 		    (alp)->lock_line);					\
   1140   1.21   thorpej 	if ((alp)->unlock_file != NULL)					\
   1141   1.25   thorpej 		lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
   1142   1.21   thorpej 		    (alp)->unlock_line);				\
   1143   1.58       chs 	SLOCK_TRACE()							\
   1144   1.21   thorpej 	SLOCK_DEBUGGER();						\
   1145   1.30   thorpej } while (/*CONSTCOND*/0)
   1146   1.12       chs 
   1147    1.1      fvdl /*
   1148    1.1      fvdl  * Simple lock functions so that the debugger can see from whence
   1149    1.1      fvdl  * they are being called.
   1150    1.1      fvdl  */
   1151    1.1      fvdl void
   1152   1.91     perry simple_lock_init(volatile struct simplelock *alp)
   1153    1.1      fvdl {
   1154   1.21   thorpej 
   1155   1.21   thorpej #if defined(MULTIPROCESSOR) /* { */
   1156   1.27   thorpej 	__cpu_simple_lock_init(&alp->lock_data);
   1157   1.21   thorpej #else
   1158   1.27   thorpej 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
   1159   1.21   thorpej #endif /* } */
   1160    1.5       chs 	alp->lock_file = NULL;
   1161    1.5       chs 	alp->lock_line = 0;
   1162    1.5       chs 	alp->unlock_file = NULL;
   1163    1.5       chs 	alp->unlock_line = 0;
   1164   1.41   thorpej 	alp->lock_holder = LK_NOCPU;
   1165    1.1      fvdl }
   1166    1.1      fvdl 
   1167    1.1      fvdl void
   1168   1.91     perry _simple_lock(volatile struct simplelock *alp, const char *id, int l)
   1169    1.1      fvdl {
   1170   1.88     blymn 	cpuid_t cpu_num = cpu_number();
   1171   1.12       chs 	int s;
   1172   1.12       chs 
   1173  1.114        ad 	s = splhigh();
   1174   1.21   thorpej 
   1175   1.21   thorpej 	/*
   1176   1.21   thorpej 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
   1177   1.21   thorpej 	 * don't take any action, and just fall into the normal spin case.
   1178   1.21   thorpej 	 */
   1179   1.27   thorpej 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
   1180   1.21   thorpej #if defined(MULTIPROCESSOR) /* { */
   1181   1.88     blymn 		if (alp->lock_holder == cpu_num) {
   1182   1.21   thorpej 			SLOCK_WHERE("simple_lock: locking against myself\n",
   1183   1.21   thorpej 			    alp, id, l);
   1184   1.21   thorpej 			goto out;
   1185    1.1      fvdl 		}
   1186   1.21   thorpej #else
   1187   1.21   thorpej 		SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
   1188   1.21   thorpej 		goto out;
   1189   1.21   thorpej #endif /* MULTIPROCESSOR */ /* } */
   1190    1.1      fvdl 	}
   1191   1.21   thorpej 
   1192   1.21   thorpej #if defined(MULTIPROCESSOR) /* { */
   1193   1.21   thorpej 	/* Acquire the lock before modifying any fields. */
   1194   1.70        pk 	splx(s);
   1195   1.27   thorpej 	__cpu_simple_lock(&alp->lock_data);
   1196  1.114        ad 	s = splhigh();
   1197   1.21   thorpej #else
   1198   1.27   thorpej 	alp->lock_data = __SIMPLELOCK_LOCKED;
   1199   1.21   thorpej #endif /* } */
   1200   1.21   thorpej 
   1201   1.45  sommerfe 	if (alp->lock_holder != LK_NOCPU) {
   1202   1.45  sommerfe 		SLOCK_WHERE("simple_lock: uninitialized lock\n",
   1203   1.45  sommerfe 		    alp, id, l);
   1204   1.45  sommerfe 	}
   1205    1.5       chs 	alp->lock_file = id;
   1206    1.5       chs 	alp->lock_line = l;
   1207   1.88     blymn 	alp->lock_holder = cpu_num;
   1208   1.21   thorpej 
   1209   1.21   thorpej 	SLOCK_LIST_LOCK();
   1210   1.87  christos 	TAILQ_INSERT_TAIL(&simplelock_list, alp, list);
   1211   1.21   thorpej 	SLOCK_LIST_UNLOCK();
   1212   1.21   thorpej 
   1213   1.21   thorpej 	SLOCK_COUNT(1);
   1214   1.21   thorpej 
   1215   1.21   thorpej  out:
   1216   1.18       chs 	splx(s);
   1217   1.38   thorpej }
   1218   1.38   thorpej 
   1219   1.38   thorpej int
   1220   1.91     perry _simple_lock_held(volatile struct simplelock *alp)
   1221   1.38   thorpej {
   1222   1.54     enami #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC)
   1223   1.88     blymn 	cpuid_t cpu_num = cpu_number();
   1224   1.54     enami #endif
   1225   1.38   thorpej 	int s, locked = 0;
   1226   1.38   thorpej 
   1227  1.114        ad 	s = splhigh();
   1228   1.42   thorpej 
   1229   1.42   thorpej #if defined(MULTIPROCESSOR)
   1230   1.38   thorpej 	if (__cpu_simple_lock_try(&alp->lock_data) == 0)
   1231   1.88     blymn 		locked = (alp->lock_holder == cpu_num);
   1232   1.38   thorpej 	else
   1233   1.38   thorpej 		__cpu_simple_unlock(&alp->lock_data);
   1234   1.38   thorpej #else
   1235   1.42   thorpej 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
   1236   1.42   thorpej 		locked = 1;
   1237   1.88     blymn 		KASSERT(alp->lock_holder == cpu_num);
   1238   1.42   thorpej 	}
   1239   1.42   thorpej #endif
   1240   1.38   thorpej 
   1241   1.38   thorpej 	splx(s);
   1242   1.42   thorpej 
   1243   1.38   thorpej 	return (locked);
   1244    1.1      fvdl }
   1245    1.1      fvdl 
   1246    1.1      fvdl int
   1247   1.91     perry _simple_lock_try(volatile struct simplelock *alp, const char *id, int l)
   1248    1.1      fvdl {
   1249   1.88     blymn 	cpuid_t cpu_num = cpu_number();
   1250   1.21   thorpej 	int s, rv = 0;
   1251    1.1      fvdl 
   1252  1.114        ad 	s = splhigh();
   1253   1.21   thorpej 
   1254   1.21   thorpej 	/*
   1255   1.21   thorpej 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
   1256   1.21   thorpej 	 * don't take any action.
   1257   1.21   thorpej 	 */
   1258   1.21   thorpej #if defined(MULTIPROCESSOR) /* { */
   1259   1.27   thorpej 	if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
   1260   1.88     blymn 		if (alp->lock_holder == cpu_num)
   1261   1.21   thorpej 			SLOCK_WHERE("simple_lock_try: locking against myself\n",
   1262   1.26  sommerfe 			    alp, id, l);
   1263   1.21   thorpej 		goto out;
   1264   1.21   thorpej 	}
   1265   1.21   thorpej #else
   1266   1.27   thorpej 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
   1267   1.21   thorpej 		SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
   1268   1.21   thorpej 		goto out;
   1269   1.18       chs 	}
   1270   1.27   thorpej 	alp->lock_data = __SIMPLELOCK_LOCKED;
   1271   1.21   thorpej #endif /* MULTIPROCESSOR */ /* } */
   1272   1.21   thorpej 
   1273   1.21   thorpej 	/*
   1274   1.21   thorpej 	 * At this point, we have acquired the lock.
   1275   1.21   thorpej 	 */
   1276   1.21   thorpej 
   1277   1.21   thorpej 	rv = 1;
   1278   1.18       chs 
   1279    1.5       chs 	alp->lock_file = id;
   1280    1.5       chs 	alp->lock_line = l;
   1281   1.88     blymn 	alp->lock_holder = cpu_num;
   1282   1.21   thorpej 
   1283   1.21   thorpej 	SLOCK_LIST_LOCK();
   1284   1.87  christos 	TAILQ_INSERT_TAIL(&simplelock_list, alp, list);
   1285   1.21   thorpej 	SLOCK_LIST_UNLOCK();
   1286   1.21   thorpej 
   1287   1.21   thorpej 	SLOCK_COUNT(1);
   1288   1.21   thorpej 
   1289   1.21   thorpej  out:
   1290   1.12       chs 	splx(s);
   1291   1.21   thorpej 	return (rv);
   1292    1.1      fvdl }
   1293    1.1      fvdl 
   1294    1.1      fvdl void
   1295   1.91     perry _simple_unlock(volatile struct simplelock *alp, const char *id, int l)
   1296    1.1      fvdl {
   1297   1.12       chs 	int s;
   1298    1.1      fvdl 
   1299  1.114        ad 	s = splhigh();
   1300   1.21   thorpej 
   1301   1.21   thorpej 	/*
   1302   1.21   thorpej 	 * MULTIPROCESSOR case: This is `safe' because we think we hold
   1303   1.21   thorpej 	 * the lock, and if we don't, we don't take any action.
   1304   1.21   thorpej 	 */
   1305   1.27   thorpej 	if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
   1306   1.21   thorpej 		SLOCK_WHERE("simple_unlock: lock not held\n",
   1307   1.21   thorpej 		    alp, id, l);
   1308   1.21   thorpej 		goto out;
   1309   1.21   thorpej 	}
   1310   1.21   thorpej 
   1311   1.21   thorpej 	SLOCK_LIST_LOCK();
   1312   1.21   thorpej 	TAILQ_REMOVE(&simplelock_list, alp, list);
   1313   1.21   thorpej 	SLOCK_LIST_UNLOCK();
   1314   1.21   thorpej 
   1315   1.21   thorpej 	SLOCK_COUNT(-1);
   1316   1.21   thorpej 
   1317   1.21   thorpej 	alp->list.tqe_next = NULL;	/* sanity */
   1318   1.21   thorpej 	alp->list.tqe_prev = NULL;	/* sanity */
   1319   1.21   thorpej 
   1320    1.5       chs 	alp->unlock_file = id;
   1321    1.5       chs 	alp->unlock_line = l;
   1322   1.21   thorpej 
   1323   1.21   thorpej #if defined(MULTIPROCESSOR) /* { */
   1324   1.26  sommerfe 	alp->lock_holder = LK_NOCPU;
   1325   1.21   thorpej 	/* Now that we've modified all fields, release the lock. */
   1326   1.27   thorpej 	__cpu_simple_unlock(&alp->lock_data);
   1327   1.21   thorpej #else
   1328   1.27   thorpej 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
   1329   1.41   thorpej 	KASSERT(alp->lock_holder == cpu_number());
   1330   1.41   thorpej 	alp->lock_holder = LK_NOCPU;
   1331   1.21   thorpej #endif /* } */
   1332   1.21   thorpej 
   1333   1.21   thorpej  out:
   1334   1.18       chs 	splx(s);
   1335   1.12       chs }
   1336   1.12       chs 
   1337   1.12       chs void
   1338   1.33   thorpej simple_lock_dump(void)
   1339   1.12       chs {
   1340   1.91     perry 	volatile struct simplelock *alp;
   1341   1.12       chs 	int s;
   1342   1.12       chs 
   1343  1.114        ad 	s = splhigh();
   1344   1.21   thorpej 	SLOCK_LIST_LOCK();
   1345   1.25   thorpej 	lock_printf("all simple locks:\n");
   1346   1.58       chs 	TAILQ_FOREACH(alp, &simplelock_list, list) {
   1347   1.25   thorpej 		lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
   1348   1.21   thorpej 		    alp->lock_file, alp->lock_line);
   1349   1.12       chs 	}
   1350   1.21   thorpej 	SLOCK_LIST_UNLOCK();
   1351   1.12       chs 	splx(s);
   1352   1.12       chs }
   1353   1.12       chs 
   1354   1.12       chs void
   1355   1.33   thorpej simple_lock_freecheck(void *start, void *end)
   1356   1.12       chs {
   1357   1.91     perry 	volatile struct simplelock *alp;
   1358   1.12       chs 	int s;
   1359   1.12       chs 
   1360  1.114        ad 	s = splhigh();
   1361   1.21   thorpej 	SLOCK_LIST_LOCK();
   1362   1.58       chs 	TAILQ_FOREACH(alp, &simplelock_list, list) {
   1363   1.91     perry 		if ((volatile void *)alp >= start &&
   1364   1.91     perry 		    (volatile void *)alp < end) {
   1365   1.25   thorpej 			lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
   1366   1.34   thorpej 			    alp, alp->lock_holder, alp->lock_file,
   1367   1.34   thorpej 			    alp->lock_line);
   1368   1.34   thorpej 			SLOCK_DEBUGGER();
   1369   1.34   thorpej 		}
   1370   1.34   thorpej 	}
   1371   1.34   thorpej 	SLOCK_LIST_UNLOCK();
   1372   1.34   thorpej 	splx(s);
   1373   1.34   thorpej }
   1374   1.34   thorpej 
   1375   1.55   thorpej /*
   1376  1.113      yamt  * We must be holding exactly one lock: the spc_lock.
   1377   1.55   thorpej  */
   1378   1.55   thorpej 
   1379   1.34   thorpej void
   1380   1.34   thorpej simple_lock_switchcheck(void)
   1381   1.34   thorpej {
   1382   1.55   thorpej 
   1383  1.105        ad 	simple_lock_only_held(NULL, "switching");
   1384   1.55   thorpej }
   1385   1.55   thorpej 
   1386   1.93       erh /*
   1387   1.93       erh  * Drop into the debugger if lp isn't the only lock held.
   1388   1.93       erh  * lp may be NULL.
   1389   1.93       erh  */
   1390   1.55   thorpej void
   1391   1.55   thorpej simple_lock_only_held(volatile struct simplelock *lp, const char *where)
   1392   1.55   thorpej {
   1393   1.91     perry 	volatile struct simplelock *alp;
   1394   1.88     blymn 	cpuid_t cpu_num = cpu_number();
   1395   1.34   thorpej 	int s;
   1396   1.34   thorpej 
   1397   1.55   thorpej 	if (lp) {
   1398   1.55   thorpej 		LOCK_ASSERT(simple_lock_held(lp));
   1399   1.55   thorpej 	}
   1400  1.114        ad 	s = splhigh();
   1401   1.34   thorpej 	SLOCK_LIST_LOCK();
   1402   1.58       chs 	TAILQ_FOREACH(alp, &simplelock_list, list) {
   1403   1.55   thorpej 		if (alp == lp)
   1404   1.42   thorpej 			continue;
   1405   1.88     blymn 		if (alp->lock_holder == cpu_num)
   1406   1.55   thorpej 			break;
   1407   1.12       chs 	}
   1408   1.21   thorpej 	SLOCK_LIST_UNLOCK();
   1409   1.12       chs 	splx(s);
   1410   1.55   thorpej 
   1411   1.55   thorpej 	if (alp != NULL) {
   1412   1.58       chs 		lock_printf("\n%s with held simple_lock %p "
   1413   1.55   thorpej 		    "CPU %lu %s:%d\n",
   1414   1.55   thorpej 		    where, alp, alp->lock_holder, alp->lock_file,
   1415   1.55   thorpej 		    alp->lock_line);
   1416   1.58       chs 		SLOCK_TRACE();
   1417   1.55   thorpej 		SLOCK_DEBUGGER();
   1418   1.55   thorpej 	}
   1419    1.1      fvdl }
   1420   1.94       erh 
   1421   1.94       erh /*
   1422   1.94       erh  * Set to 1 by simple_lock_assert_*().
   1423   1.94       erh  * Can be cleared from ddb to avoid a panic.
   1424   1.94       erh  */
   1425   1.94       erh int slock_assert_will_panic;
   1426   1.94       erh 
   1427   1.94       erh /*
   1428   1.94       erh  * If the lock isn't held, print a traceback, optionally drop into the
   1429   1.94       erh  *  debugger, then panic.
   1430   1.94       erh  * The panic can be avoided by clearing slock_assert_with_panic from the
   1431   1.94       erh  *  debugger.
   1432   1.94       erh  */
   1433   1.94       erh void
   1434   1.94       erh _simple_lock_assert_locked(volatile struct simplelock *alp,
   1435   1.94       erh     const char *lockname, const char *id, int l)
   1436   1.94       erh {
   1437   1.94       erh 	if (simple_lock_held(alp) == 0) {
   1438   1.94       erh 		slock_assert_will_panic = 1;
   1439   1.94       erh 		lock_printf("%s lock not held\n", lockname);
   1440   1.94       erh 		SLOCK_WHERE("lock not held", alp, id, l);
   1441   1.94       erh 		if (slock_assert_will_panic)
   1442   1.94       erh 			panic("%s: not locked", lockname);
   1443   1.94       erh 	}
   1444   1.94       erh }
   1445   1.94       erh 
   1446   1.94       erh void
   1447   1.94       erh _simple_lock_assert_unlocked(volatile struct simplelock *alp,
   1448   1.94       erh     const char *lockname, const char *id, int l)
   1449   1.94       erh {
   1450   1.94       erh 	if (simple_lock_held(alp)) {
   1451   1.94       erh 		slock_assert_will_panic = 1;
   1452   1.94       erh 		lock_printf("%s lock held\n", lockname);
   1453   1.94       erh 		SLOCK_WHERE("lock held", alp, id, l);
   1454   1.94       erh 		if (slock_assert_will_panic)
   1455   1.94       erh 			panic("%s: locked", lockname);
   1456   1.94       erh 	}
   1457   1.94       erh }
   1458   1.94       erh 
   1459   1.96      yamt void
   1460   1.96      yamt assert_sleepable(struct simplelock *interlock, const char *msg)
   1461   1.96      yamt {
   1462   1.96      yamt 
   1463  1.113      yamt 	if (CURCPU_IDLE_P()) {
   1464  1.113      yamt 		panic("assert_sleepable: idle");
   1465   1.97      yamt 	}
   1466   1.96      yamt 	simple_lock_only_held(interlock, msg);
   1467   1.96      yamt }
   1468   1.96      yamt 
   1469   1.21   thorpej #endif /* LOCKDEBUG */ /* } */
   1470   1.62   thorpej 
   1471   1.62   thorpej #if defined(MULTIPROCESSOR)
   1472  1.105        ad 
   1473   1.62   thorpej /*
   1474   1.62   thorpej  * Functions for manipulating the kernel_lock.  We put them here
   1475   1.62   thorpej  * so that they show up in profiles.
   1476   1.62   thorpej  */
   1477   1.62   thorpej 
   1478  1.105        ad #define	_KERNEL_LOCK_ABORT(msg)						\
   1479  1.105        ad     LOCKDEBUG_ABORT(kernel_lock_id, &kernel_lock, &_kernel_lock_ops,	\
   1480  1.105        ad         __FUNCTION__, msg)
   1481  1.105        ad 
   1482  1.105        ad #ifdef LOCKDEBUG
   1483  1.105        ad #define	_KERNEL_LOCK_ASSERT(cond)					\
   1484  1.105        ad do {									\
   1485  1.105        ad 	if (!(cond))							\
   1486  1.105        ad 		_KERNEL_LOCK_ABORT("assertion failed: " #cond);		\
   1487  1.105        ad } while (/* CONSTCOND */ 0)
   1488  1.105        ad #else
   1489  1.105        ad #define	_KERNEL_LOCK_ASSERT(cond)	/* nothing */
   1490  1.105        ad #endif
   1491  1.105        ad 
   1492  1.105        ad void	_kernel_lock_dump(volatile void *);
   1493  1.105        ad 
   1494  1.105        ad lockops_t _kernel_lock_ops = {
   1495  1.105        ad 	"Kernel lock",
   1496  1.105        ad 	0,
   1497  1.105        ad 	_kernel_lock_dump
   1498  1.105        ad };
   1499  1.105        ad 
   1500   1.85      yamt /*
   1501  1.105        ad  * Initialize the kernel lock.
   1502   1.85      yamt  */
   1503   1.62   thorpej void
   1504   1.62   thorpej _kernel_lock_init(void)
   1505   1.62   thorpej {
   1506   1.62   thorpej 
   1507  1.105        ad 	__cpu_simple_lock_init(&kernel_lock);
   1508  1.105        ad 	kernel_lock_id = LOCKDEBUG_ALLOC(&kernel_lock, &_kernel_lock_ops);
   1509   1.62   thorpej }
   1510   1.62   thorpej 
   1511   1.62   thorpej /*
   1512  1.105        ad  * Print debugging information about the kernel lock.
   1513   1.62   thorpej  */
   1514   1.62   thorpej void
   1515  1.105        ad _kernel_lock_dump(volatile void *junk)
   1516   1.62   thorpej {
   1517   1.85      yamt 	struct cpu_info *ci = curcpu();
   1518   1.62   thorpej 
   1519  1.105        ad 	(void)junk;
   1520   1.85      yamt 
   1521  1.105        ad 	printf_nolog("curcpu holds : %18d wanted by: %#018lx\n",
   1522  1.105        ad 	    ci->ci_biglock_count, (long)ci->ci_biglock_wanted);
   1523   1.62   thorpej }
   1524   1.62   thorpej 
   1525  1.105        ad /*
   1526  1.105        ad  * Acquire 'nlocks' holds on the kernel lock.  If 'l' is non-null, the
   1527  1.105        ad  * acquisition is from process context.
   1528  1.105        ad  */
   1529   1.62   thorpej void
   1530  1.105        ad _kernel_lock(int nlocks, struct lwp *l)
   1531   1.62   thorpej {
   1532   1.85      yamt 	struct cpu_info *ci = curcpu();
   1533  1.105        ad 	LOCKSTAT_TIMER(spintime);
   1534  1.105        ad 	LOCKSTAT_FLAG(lsflag);
   1535  1.105        ad 	struct lwp *owant;
   1536  1.105        ad #ifdef LOCKDEBUG
   1537  1.105        ad 	u_int spins;
   1538  1.105        ad #endif
   1539   1.85      yamt 	int s;
   1540   1.85      yamt 
   1541  1.105        ad 	(void)l;
   1542  1.105        ad 
   1543  1.105        ad 	if (nlocks == 0)
   1544  1.105        ad 		return;
   1545  1.105        ad 	_KERNEL_LOCK_ASSERT(nlocks > 0);
   1546   1.62   thorpej 
   1547  1.115        ad 	s = splsched();	/* XXX splvm() */
   1548  1.105        ad 
   1549  1.105        ad 	if (ci->ci_biglock_count != 0) {
   1550  1.105        ad 		_KERNEL_LOCK_ASSERT(kernel_lock == __SIMPLELOCK_LOCKED);
   1551  1.105        ad 		ci->ci_biglock_count += nlocks;
   1552  1.105        ad 		splx(s);
   1553  1.105        ad 		return;
   1554  1.105        ad 	}
   1555  1.105        ad 
   1556  1.107        ad 	LOCKDEBUG_WANTLOCK(kernel_lock_id,
   1557  1.107        ad 	    (uintptr_t)__builtin_return_address(0), 0);
   1558  1.107        ad 
   1559  1.105        ad 	if (__cpu_simple_lock_try(&kernel_lock)) {
   1560  1.105        ad 		ci->ci_biglock_count = nlocks;
   1561  1.105        ad 		LOCKDEBUG_LOCKED(kernel_lock_id,
   1562  1.105        ad 		    (uintptr_t)__builtin_return_address(0), 0);
   1563  1.105        ad 		splx(s);
   1564  1.105        ad 		return;
   1565  1.105        ad 	}
   1566  1.105        ad 
   1567  1.105        ad 	LOCKSTAT_ENTER(lsflag);
   1568  1.105        ad 	LOCKSTAT_START_TIMER(lsflag, spintime);
   1569  1.105        ad 
   1570  1.105        ad 	/*
   1571  1.105        ad 	 * Before setting ci_biglock_wanted we must post a store
   1572  1.105        ad 	 * fence (see kern_mutex.c).  This is accomplished by the
   1573  1.105        ad 	 * __cpu_simple_lock_try() above.
   1574  1.105        ad 	 */
   1575  1.105        ad 	owant = ci->ci_biglock_wanted;
   1576  1.105        ad 	ci->ci_biglock_wanted = curlwp;	/* XXXAD */
   1577  1.105        ad 
   1578  1.105        ad #ifdef LOCKDEBUG
   1579  1.105        ad 	spins = 0;
   1580  1.105        ad #endif
   1581  1.105        ad 
   1582  1.105        ad 	do {
   1583  1.105        ad 		while (kernel_lock == __SIMPLELOCK_LOCKED) {
   1584  1.105        ad #ifdef LOCKDEBUG
   1585  1.105        ad 			if (SPINLOCK_SPINOUT(spins))
   1586  1.105        ad 				_KERNEL_LOCK_ABORT("spinout");
   1587  1.105        ad #endif
   1588  1.105        ad 			splx(s);
   1589  1.105        ad 			SPINLOCK_SPIN_HOOK;
   1590  1.115        ad 			(void)splsched();	/* XXX splvm() */
   1591  1.105        ad 		}
   1592  1.105        ad 	} while (!__cpu_simple_lock_try(&kernel_lock));
   1593  1.105        ad 
   1594  1.105        ad 	ci->ci_biglock_wanted = owant;
   1595  1.105        ad 	ci->ci_biglock_count += nlocks;
   1596  1.107        ad 	LOCKSTAT_STOP_TIMER(lsflag, spintime);
   1597  1.107        ad 	LOCKDEBUG_LOCKED(kernel_lock_id,
   1598  1.107        ad 	    (uintptr_t)__builtin_return_address(0), 0);
   1599   1.85      yamt 	splx(s);
   1600  1.105        ad 
   1601  1.105        ad 	/*
   1602  1.105        ad 	 * Again, another store fence is required (see kern_mutex.c).
   1603  1.105        ad 	 */
   1604  1.105        ad 	mb_write();
   1605  1.107        ad 	if (owant == NULL) {
   1606  1.107        ad 		LOCKSTAT_EVENT(lsflag, &kernel_lock, LB_KERNEL_LOCK | LB_SPIN,
   1607  1.107        ad 		    1, spintime);
   1608  1.107        ad 	}
   1609  1.105        ad 	LOCKSTAT_EXIT(lsflag);
   1610   1.62   thorpej }
   1611   1.62   thorpej 
   1612   1.62   thorpej /*
   1613  1.105        ad  * Release 'nlocks' holds on the kernel lock.  If 'nlocks' is zero, release
   1614  1.105        ad  * all holds.  If 'l' is non-null, the release is from process context.
   1615   1.62   thorpej  */
   1616   1.62   thorpej void
   1617  1.105        ad _kernel_unlock(int nlocks, struct lwp *l, int *countp)
   1618   1.62   thorpej {
   1619  1.105        ad 	struct cpu_info *ci = curcpu();
   1620  1.105        ad 	u_int olocks;
   1621  1.105        ad 	int s;
   1622   1.62   thorpej 
   1623  1.105        ad 	(void)l;
   1624   1.62   thorpej 
   1625  1.105        ad 	_KERNEL_LOCK_ASSERT(nlocks < 2);
   1626   1.62   thorpej 
   1627  1.105        ad 	olocks = ci->ci_biglock_count;
   1628   1.77      yamt 
   1629  1.105        ad 	if (olocks == 0) {
   1630  1.105        ad 		_KERNEL_LOCK_ASSERT(nlocks <= 0);
   1631  1.105        ad 		if (countp != NULL)
   1632  1.105        ad 			*countp = 0;
   1633  1.105        ad 		return;
   1634  1.105        ad 	}
   1635   1.77      yamt 
   1636  1.105        ad 	_KERNEL_LOCK_ASSERT(kernel_lock == __SIMPLELOCK_LOCKED);
   1637   1.85      yamt 
   1638  1.105        ad 	if (nlocks == 0)
   1639  1.105        ad 		nlocks = olocks;
   1640  1.105        ad 	else if (nlocks == -1) {
   1641  1.105        ad 		nlocks = 1;
   1642  1.105        ad 		_KERNEL_LOCK_ASSERT(olocks == 1);
   1643  1.105        ad 	}
   1644   1.85      yamt 
   1645  1.115        ad 	s = splsched();	/* XXX splvm() */
   1646  1.105        ad 	if ((ci->ci_biglock_count -= nlocks) == 0) {
   1647  1.105        ad 		LOCKDEBUG_UNLOCKED(kernel_lock_id,
   1648  1.105        ad 		    (uintptr_t)__builtin_return_address(0), 0);
   1649  1.105        ad 		__cpu_simple_unlock(&kernel_lock);
   1650   1.85      yamt 	}
   1651  1.105        ad 	splx(s);
   1652   1.77      yamt 
   1653  1.105        ad 	if (countp != NULL)
   1654  1.105        ad 		*countp = olocks;
   1655   1.77      yamt }
   1656   1.77      yamt 
   1657   1.84      yamt #if defined(DEBUG)
   1658  1.105        ad /*
   1659  1.105        ad  * Assert that the kernel lock is held.
   1660  1.105        ad  */
   1661   1.84      yamt void
   1662  1.105        ad _kernel_lock_assert_locked(void)
   1663   1.84      yamt {
   1664  1.100      yamt 
   1665  1.105        ad 	if (kernel_lock != __SIMPLELOCK_LOCKED ||
   1666  1.105        ad 	    curcpu()->ci_biglock_count == 0)
   1667  1.105        ad 		_KERNEL_LOCK_ABORT("not locked");
   1668   1.84      yamt }
   1669  1.100      yamt 
   1670  1.100      yamt void
   1671  1.100      yamt _kernel_lock_assert_unlocked()
   1672  1.100      yamt {
   1673  1.100      yamt 
   1674  1.105        ad 	if (curcpu()->ci_biglock_count != 0)
   1675  1.105        ad 		_KERNEL_LOCK_ABORT("locked");
   1676  1.100      yamt }
   1677   1.84      yamt #endif
   1678   1.94       erh 
   1679  1.105        ad #endif	/* MULTIPROCESSOR || LOCKDEBUG */
   1680