Home | History | Annotate | Line # | Download | only in kern
kern_lock.c revision 1.118
      1  1.118     pooka /*	$NetBSD: kern_lock.c,v 1.118 2007/07/29 12:40:37 pooka Exp $	*/
      2   1.19   thorpej 
      3   1.19   thorpej /*-
      4  1.114        ad  * Copyright (c) 1999, 2000, 2006, 2007 The NetBSD Foundation, Inc.
      5   1.19   thorpej  * All rights reserved.
      6   1.19   thorpej  *
      7   1.19   thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8   1.19   thorpej  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  1.105        ad  * NASA Ames Research Center, and by Andrew Doran.
     10   1.19   thorpej  *
     11   1.19   thorpej  * This code is derived from software contributed to The NetBSD Foundation
     12   1.19   thorpej  * by Ross Harvey.
     13   1.19   thorpej  *
     14   1.19   thorpej  * Redistribution and use in source and binary forms, with or without
     15   1.19   thorpej  * modification, are permitted provided that the following conditions
     16   1.19   thorpej  * are met:
     17   1.19   thorpej  * 1. Redistributions of source code must retain the above copyright
     18   1.19   thorpej  *    notice, this list of conditions and the following disclaimer.
     19   1.19   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     20   1.19   thorpej  *    notice, this list of conditions and the following disclaimer in the
     21   1.19   thorpej  *    documentation and/or other materials provided with the distribution.
     22   1.19   thorpej  * 3. All advertising materials mentioning features or use of this software
     23   1.19   thorpej  *    must display the following acknowledgement:
     24   1.19   thorpej  *	This product includes software developed by the NetBSD
     25   1.19   thorpej  *	Foundation, Inc. and its contributors.
     26   1.19   thorpej  * 4. Neither the name of The NetBSD Foundation nor the names of its
     27   1.19   thorpej  *    contributors may be used to endorse or promote products derived
     28   1.19   thorpej  *    from this software without specific prior written permission.
     29   1.19   thorpej  *
     30   1.19   thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     31   1.19   thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     32   1.19   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     33   1.19   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     34   1.19   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     35   1.19   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     36   1.19   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     37   1.19   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     38   1.19   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     39   1.19   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     40   1.19   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     41   1.19   thorpej  */
     42    1.2      fvdl 
     43   1.86     perry /*
     44    1.1      fvdl  * Copyright (c) 1995
     45    1.1      fvdl  *	The Regents of the University of California.  All rights reserved.
     46    1.1      fvdl  *
     47    1.1      fvdl  * This code contains ideas from software contributed to Berkeley by
     48    1.1      fvdl  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
     49    1.1      fvdl  * System project at Carnegie-Mellon University.
     50    1.1      fvdl  *
     51    1.1      fvdl  * Redistribution and use in source and binary forms, with or without
     52    1.1      fvdl  * modification, are permitted provided that the following conditions
     53    1.1      fvdl  * are met:
     54    1.1      fvdl  * 1. Redistributions of source code must retain the above copyright
     55    1.1      fvdl  *    notice, this list of conditions and the following disclaimer.
     56    1.1      fvdl  * 2. Redistributions in binary form must reproduce the above copyright
     57    1.1      fvdl  *    notice, this list of conditions and the following disclaimer in the
     58    1.1      fvdl  *    documentation and/or other materials provided with the distribution.
     59   1.72       agc  * 3. Neither the name of the University nor the names of its contributors
     60    1.1      fvdl  *    may be used to endorse or promote products derived from this software
     61    1.1      fvdl  *    without specific prior written permission.
     62    1.1      fvdl  *
     63    1.1      fvdl  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     64    1.1      fvdl  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     65    1.1      fvdl  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     66    1.1      fvdl  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     67    1.1      fvdl  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     68    1.1      fvdl  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     69    1.1      fvdl  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     70    1.1      fvdl  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     71    1.1      fvdl  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     72    1.1      fvdl  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     73    1.1      fvdl  * SUCH DAMAGE.
     74    1.1      fvdl  *
     75    1.1      fvdl  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
     76    1.1      fvdl  */
     77   1.60     lukem 
     78   1.60     lukem #include <sys/cdefs.h>
     79  1.118     pooka __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.118 2007/07/29 12:40:37 pooka Exp $");
     80    1.7   thorpej 
     81   1.21   thorpej #include "opt_multiprocessor.h"
     82   1.18       chs #include "opt_ddb.h"
     83    1.1      fvdl 
     84  1.105        ad #define	__MUTEX_PRIVATE
     85  1.105        ad 
     86    1.1      fvdl #include <sys/param.h>
     87    1.1      fvdl #include <sys/proc.h>
     88    1.1      fvdl #include <sys/lock.h>
     89    1.2      fvdl #include <sys/systm.h>
     90  1.105        ad #include <sys/lockdebug.h>
     91  1.105        ad 
     92    1.1      fvdl #include <machine/cpu.h>
     93  1.110  christos #include <machine/stdarg.h>
     94    1.1      fvdl 
     95   1.98        ad #include <dev/lockstat.h>
     96   1.98        ad 
     97   1.25   thorpej #if defined(LOCKDEBUG)
     98   1.25   thorpej #include <sys/syslog.h>
     99   1.25   thorpej /*
    100   1.25   thorpej  * note that stdarg.h and the ansi style va_start macro is used for both
    101   1.25   thorpej  * ansi and traditional c compiles.
    102   1.25   thorpej  * XXX: this requires that stdarg.h define: va_alist and va_dcl
    103   1.25   thorpej  */
    104   1.25   thorpej #include <machine/stdarg.h>
    105   1.25   thorpej 
    106   1.36   thorpej void	lock_printf(const char *fmt, ...)
    107   1.37       eeh     __attribute__((__format__(__printf__,1,2)));
    108   1.25   thorpej 
    109  1.105        ad static int acquire(volatile struct lock **, int *, int, int, int, uintptr_t);
    110   1.73      yamt 
    111   1.57  sommerfe int	lock_debug_syslog = 0;	/* defaults to printf, but can be patched */
    112   1.55   thorpej 
    113   1.55   thorpej #ifdef DDB
    114   1.55   thorpej #include <ddb/ddbvar.h>
    115   1.55   thorpej #include <machine/db_machdep.h>
    116   1.55   thorpej #include <ddb/db_command.h>
    117   1.55   thorpej #include <ddb/db_interface.h>
    118   1.55   thorpej #endif
    119   1.85      yamt #endif /* defined(LOCKDEBUG) */
    120   1.85      yamt 
    121    1.1      fvdl /*
    122    1.1      fvdl  * Locking primitives implementation.
    123   1.56       wiz  * Locks provide shared/exclusive synchronization.
    124    1.1      fvdl  */
    125    1.1      fvdl 
    126   1.21   thorpej #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
    127   1.21   thorpej #if defined(MULTIPROCESSOR) /* { */
    128   1.21   thorpej #define	COUNT_CPU(cpu_id, x)						\
    129   1.47  sommerfe 	curcpu()->ci_spin_locks += (x)
    130   1.21   thorpej #else
    131   1.21   thorpej u_long	spin_locks;
    132   1.21   thorpej #define	COUNT_CPU(cpu_id, x)	spin_locks += (x)
    133   1.21   thorpej #endif /* MULTIPROCESSOR */ /* } */
    134   1.21   thorpej 
    135   1.69   thorpej #define	COUNT(lkp, l, cpu_id, x)					\
    136   1.21   thorpej do {									\
    137   1.21   thorpej 	if ((lkp)->lk_flags & LK_SPIN)					\
    138   1.21   thorpej 		COUNT_CPU((cpu_id), (x));				\
    139   1.21   thorpej 	else								\
    140   1.69   thorpej 		(l)->l_locks += (x);					\
    141   1.30   thorpej } while (/*CONSTCOND*/0)
    142    1.1      fvdl #else
    143   1.22    mellon #define COUNT(lkp, p, cpu_id, x)
    144   1.48  sommerfe #define COUNT_CPU(cpu_id, x)
    145   1.21   thorpej #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
    146    1.1      fvdl 
    147   1.43   thorpej #define	INTERLOCK_ACQUIRE(lkp, flags, s)				\
    148   1.40   thorpej do {									\
    149   1.43   thorpej 	if ((flags) & LK_SPIN)						\
    150  1.114        ad 		s = splhigh();						\
    151   1.40   thorpej 	simple_lock(&(lkp)->lk_interlock);				\
    152   1.66     perry } while (/*CONSTCOND*/ 0)
    153   1.40   thorpej 
    154   1.43   thorpej #define	INTERLOCK_RELEASE(lkp, flags, s)				\
    155   1.40   thorpej do {									\
    156   1.40   thorpej 	simple_unlock(&(lkp)->lk_interlock);				\
    157   1.52   thorpej 	if ((flags) & LK_SPIN)						\
    158   1.40   thorpej 		splx(s);						\
    159   1.66     perry } while (/*CONSTCOND*/ 0)
    160   1.40   thorpej 
    161   1.63       chs #ifdef DDB /* { */
    162   1.89       chs #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
    163   1.63       chs int simple_lock_debugger = 1;	/* more serious on MP */
    164   1.63       chs #else
    165   1.63       chs int simple_lock_debugger = 0;
    166   1.63       chs #endif
    167   1.93       erh #define	SLOCK_DEBUGGER()	if (simple_lock_debugger && db_onpanic) Debugger()
    168   1.63       chs #define	SLOCK_TRACE()							\
    169   1.63       chs 	db_stack_trace_print((db_expr_t)__builtin_frame_address(0),	\
    170  1.108   thorpej 	    true, 65535, "", lock_printf);
    171   1.63       chs #else
    172   1.63       chs #define	SLOCK_DEBUGGER()	/* nothing */
    173   1.63       chs #define	SLOCK_TRACE()		/* nothing */
    174   1.63       chs #endif /* } */
    175   1.63       chs 
    176   1.50   thorpej #if defined(LOCKDEBUG)
    177   1.50   thorpej #if defined(DDB)
    178   1.93       erh #define	SPINLOCK_SPINCHECK_DEBUGGER	if (db_onpanic) Debugger()
    179   1.50   thorpej #else
    180   1.50   thorpej #define	SPINLOCK_SPINCHECK_DEBUGGER	/* nothing */
    181   1.50   thorpej #endif
    182   1.50   thorpej 
    183   1.50   thorpej #define	SPINLOCK_SPINCHECK_DECL						\
    184   1.50   thorpej 	/* 32-bits of count -- wrap constitutes a "spinout" */		\
    185   1.50   thorpej 	uint32_t __spinc = 0
    186   1.50   thorpej 
    187   1.50   thorpej #define	SPINLOCK_SPINCHECK						\
    188   1.50   thorpej do {									\
    189   1.50   thorpej 	if (++__spinc == 0) {						\
    190   1.71        pk 		lock_printf("LK_SPIN spinout, excl %d, share %d\n",	\
    191   1.50   thorpej 		    lkp->lk_exclusivecount, lkp->lk_sharecount);	\
    192   1.50   thorpej 		if (lkp->lk_exclusivecount)				\
    193   1.71        pk 			lock_printf("held by CPU %lu\n",		\
    194   1.50   thorpej 			    (u_long) lkp->lk_cpu);			\
    195   1.50   thorpej 		if (lkp->lk_lock_file)					\
    196   1.71        pk 			lock_printf("last locked at %s:%d\n",		\
    197   1.50   thorpej 			    lkp->lk_lock_file, lkp->lk_lock_line);	\
    198   1.50   thorpej 		if (lkp->lk_unlock_file)				\
    199   1.71        pk 			lock_printf("last unlocked at %s:%d\n",		\
    200   1.50   thorpej 			    lkp->lk_unlock_file, lkp->lk_unlock_line);	\
    201   1.63       chs 		SLOCK_TRACE();						\
    202   1.50   thorpej 		SPINLOCK_SPINCHECK_DEBUGGER;				\
    203   1.50   thorpej 	}								\
    204   1.66     perry } while (/*CONSTCOND*/ 0)
    205   1.50   thorpej #else
    206   1.50   thorpej #define	SPINLOCK_SPINCHECK_DECL			/* nothing */
    207   1.50   thorpej #define	SPINLOCK_SPINCHECK			/* nothing */
    208   1.50   thorpej #endif /* LOCKDEBUG && DDB */
    209   1.50   thorpej 
    210   1.98        ad #define	RETURN_ADDRESS		((uintptr_t)__builtin_return_address(0))
    211   1.98        ad 
    212    1.1      fvdl /*
    213    1.1      fvdl  * Acquire a resource.
    214    1.1      fvdl  */
    215   1.73      yamt static int
    216   1.91     perry acquire(volatile struct lock **lkpp, int *s, int extflags,
    217  1.102      yamt     int drain, int wanted, uintptr_t ra)
    218   1.73      yamt {
    219   1.73      yamt 	int error;
    220   1.91     perry 	volatile struct lock *lkp = *lkpp;
    221   1.98        ad 	LOCKSTAT_TIMER(slptime);
    222  1.105        ad 	LOCKSTAT_FLAG(lsflag);
    223   1.73      yamt 
    224   1.73      yamt 	KASSERT(drain || (wanted & LK_WAIT_NONZERO) == 0);
    225   1.73      yamt 
    226   1.73      yamt 	if (extflags & LK_SPIN) {
    227   1.73      yamt 		int interlocked;
    228   1.73      yamt 
    229   1.73      yamt 		SPINLOCK_SPINCHECK_DECL;
    230   1.73      yamt 
    231   1.73      yamt 		if (!drain) {
    232   1.73      yamt 			lkp->lk_waitcount++;
    233   1.73      yamt 			lkp->lk_flags |= LK_WAIT_NONZERO;
    234   1.73      yamt 		}
    235   1.73      yamt 		for (interlocked = 1;;) {
    236   1.73      yamt 			SPINLOCK_SPINCHECK;
    237   1.73      yamt 			if ((lkp->lk_flags & wanted) != 0) {
    238   1.73      yamt 				if (interlocked) {
    239   1.74   hannken 					INTERLOCK_RELEASE(lkp, LK_SPIN, *s);
    240   1.73      yamt 					interlocked = 0;
    241   1.73      yamt 				}
    242   1.73      yamt 				SPINLOCK_SPIN_HOOK;
    243   1.73      yamt 			} else if (interlocked) {
    244   1.73      yamt 				break;
    245   1.73      yamt 			} else {
    246   1.74   hannken 				INTERLOCK_ACQUIRE(lkp, LK_SPIN, *s);
    247   1.73      yamt 				interlocked = 1;
    248   1.73      yamt 			}
    249   1.73      yamt 		}
    250   1.73      yamt 		if (!drain) {
    251   1.73      yamt 			lkp->lk_waitcount--;
    252   1.73      yamt 			if (lkp->lk_waitcount == 0)
    253   1.73      yamt 				lkp->lk_flags &= ~LK_WAIT_NONZERO;
    254   1.73      yamt 		}
    255   1.73      yamt 		KASSERT((lkp->lk_flags & wanted) == 0);
    256   1.73      yamt 		error = 0;	/* sanity */
    257   1.73      yamt 	} else {
    258  1.105        ad 		LOCKSTAT_ENTER(lsflag);
    259  1.105        ad 
    260   1.73      yamt 		for (error = 0; (lkp->lk_flags & wanted) != 0; ) {
    261   1.73      yamt 			if (drain)
    262   1.73      yamt 				lkp->lk_flags |= LK_WAITDRAIN;
    263   1.73      yamt 			else {
    264   1.73      yamt 				lkp->lk_waitcount++;
    265   1.73      yamt 				lkp->lk_flags |= LK_WAIT_NONZERO;
    266   1.73      yamt 			}
    267   1.73      yamt 			/* XXX Cast away volatile. */
    268  1.105        ad 			LOCKSTAT_START_TIMER(lsflag, slptime);
    269   1.73      yamt 			error = ltsleep(drain ?
    270   1.87  christos 			    (volatile const void *)&lkp->lk_flags :
    271   1.87  christos 			    (volatile const void *)lkp, lkp->lk_prio,
    272   1.73      yamt 			    lkp->lk_wmesg, lkp->lk_timo, &lkp->lk_interlock);
    273  1.105        ad 			LOCKSTAT_STOP_TIMER(lsflag, slptime);
    274  1.105        ad 			LOCKSTAT_EVENT_RA(lsflag, (void *)(uintptr_t)lkp,
    275  1.104        ad 			    LB_LOCKMGR | LB_SLEEP1, 1, slptime, ra);
    276   1.73      yamt 			if (!drain) {
    277   1.73      yamt 				lkp->lk_waitcount--;
    278   1.73      yamt 				if (lkp->lk_waitcount == 0)
    279   1.73      yamt 					lkp->lk_flags &= ~LK_WAIT_NONZERO;
    280   1.73      yamt 			}
    281   1.73      yamt 			if (error)
    282   1.73      yamt 				break;
    283   1.73      yamt 			if (extflags & LK_SLEEPFAIL) {
    284   1.73      yamt 				error = ENOLCK;
    285   1.73      yamt 				break;
    286   1.73      yamt 			}
    287   1.78   hannken 			if (lkp->lk_newlock != NULL) {
    288   1.78   hannken 				simple_lock(&lkp->lk_newlock->lk_interlock);
    289   1.78   hannken 				simple_unlock(&lkp->lk_interlock);
    290   1.78   hannken 				if (lkp->lk_waitcount == 0)
    291   1.87  christos 					wakeup(&lkp->lk_newlock);
    292   1.78   hannken 				*lkpp = lkp = lkp->lk_newlock;
    293   1.78   hannken 			}
    294   1.73      yamt 		}
    295  1.105        ad 
    296  1.105        ad 		LOCKSTAT_EXIT(lsflag);
    297    1.1      fvdl 	}
    298    1.1      fvdl 
    299   1.73      yamt 	return error;
    300   1.73      yamt }
    301   1.73      yamt 
    302   1.69   thorpej #define	SETHOLDER(lkp, pid, lid, cpu_id)				\
    303   1.19   thorpej do {									\
    304   1.19   thorpej 	if ((lkp)->lk_flags & LK_SPIN)					\
    305   1.19   thorpej 		(lkp)->lk_cpu = cpu_id;					\
    306   1.69   thorpej 	else {								\
    307   1.19   thorpej 		(lkp)->lk_lockholder = pid;				\
    308   1.69   thorpej 		(lkp)->lk_locklwp = lid;				\
    309   1.69   thorpej 	}								\
    310   1.30   thorpej } while (/*CONSTCOND*/0)
    311   1.19   thorpej 
    312   1.69   thorpej #define	WEHOLDIT(lkp, pid, lid, cpu_id)					\
    313   1.19   thorpej 	(((lkp)->lk_flags & LK_SPIN) != 0 ?				\
    314   1.69   thorpej 	 ((lkp)->lk_cpu == (cpu_id)) :					\
    315   1.69   thorpej 	 ((lkp)->lk_lockholder == (pid) && (lkp)->lk_locklwp == (lid)))
    316   1.19   thorpej 
    317   1.23   thorpej #define	WAKEUP_WAITER(lkp)						\
    318   1.23   thorpej do {									\
    319   1.73      yamt 	if (((lkp)->lk_flags & (LK_SPIN | LK_WAIT_NONZERO)) ==		\
    320   1.73      yamt 	    LK_WAIT_NONZERO) {						\
    321   1.87  christos 		wakeup((lkp));						\
    322   1.23   thorpej 	}								\
    323   1.30   thorpej } while (/*CONSTCOND*/0)
    324   1.23   thorpej 
    325   1.21   thorpej #if defined(LOCKDEBUG) /* { */
    326   1.21   thorpej #if defined(MULTIPROCESSOR) /* { */
    327   1.21   thorpej struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
    328   1.21   thorpej 
    329   1.27   thorpej #define	SPINLOCK_LIST_LOCK()						\
    330   1.29  sommerfe 	__cpu_simple_lock(&spinlock_list_slock.lock_data)
    331   1.21   thorpej 
    332   1.27   thorpej #define	SPINLOCK_LIST_UNLOCK()						\
    333   1.29  sommerfe 	__cpu_simple_unlock(&spinlock_list_slock.lock_data)
    334   1.21   thorpej #else
    335   1.21   thorpej #define	SPINLOCK_LIST_LOCK()	/* nothing */
    336   1.21   thorpej 
    337   1.21   thorpej #define	SPINLOCK_LIST_UNLOCK()	/* nothing */
    338   1.21   thorpej #endif /* MULTIPROCESSOR */ /* } */
    339   1.21   thorpej 
    340   1.91     perry _TAILQ_HEAD(, struct lock, volatile) spinlock_list =
    341   1.21   thorpej     TAILQ_HEAD_INITIALIZER(spinlock_list);
    342   1.21   thorpej 
    343   1.21   thorpej #define	HAVEIT(lkp)							\
    344   1.21   thorpej do {									\
    345   1.21   thorpej 	if ((lkp)->lk_flags & LK_SPIN) {				\
    346  1.114        ad 		int sp = splhigh();					\
    347   1.21   thorpej 		SPINLOCK_LIST_LOCK();					\
    348   1.87  christos 		TAILQ_INSERT_TAIL(&spinlock_list, (lkp), lk_list);	\
    349   1.21   thorpej 		SPINLOCK_LIST_UNLOCK();					\
    350   1.87  christos 		splx(sp);						\
    351   1.21   thorpej 	}								\
    352   1.30   thorpej } while (/*CONSTCOND*/0)
    353   1.21   thorpej 
    354   1.21   thorpej #define	DONTHAVEIT(lkp)							\
    355   1.21   thorpej do {									\
    356   1.21   thorpej 	if ((lkp)->lk_flags & LK_SPIN) {				\
    357  1.114        ad 		int sp = splhigh();					\
    358   1.21   thorpej 		SPINLOCK_LIST_LOCK();					\
    359   1.87  christos 		TAILQ_REMOVE(&spinlock_list, (lkp), lk_list);		\
    360   1.21   thorpej 		SPINLOCK_LIST_UNLOCK();					\
    361   1.87  christos 		splx(sp);						\
    362   1.21   thorpej 	}								\
    363   1.30   thorpej } while (/*CONSTCOND*/0)
    364   1.21   thorpej #else
    365   1.21   thorpej #define	HAVEIT(lkp)		/* nothing */
    366   1.21   thorpej 
    367   1.21   thorpej #define	DONTHAVEIT(lkp)		/* nothing */
    368   1.21   thorpej #endif /* LOCKDEBUG */ /* } */
    369   1.21   thorpej 
    370   1.25   thorpej #if defined(LOCKDEBUG)
    371   1.25   thorpej /*
    372   1.25   thorpej  * Lock debug printing routine; can be configured to print to console
    373   1.25   thorpej  * or log to syslog.
    374   1.25   thorpej  */
    375   1.25   thorpej void
    376   1.25   thorpej lock_printf(const char *fmt, ...)
    377   1.25   thorpej {
    378   1.68        pk 	char b[150];
    379   1.25   thorpej 	va_list ap;
    380   1.25   thorpej 
    381   1.25   thorpej 	va_start(ap, fmt);
    382   1.25   thorpej 	if (lock_debug_syslog)
    383   1.25   thorpej 		vlog(LOG_DEBUG, fmt, ap);
    384   1.68        pk 	else {
    385   1.68        pk 		vsnprintf(b, sizeof(b), fmt, ap);
    386   1.68        pk 		printf_nolog("%s", b);
    387   1.68        pk 	}
    388   1.25   thorpej 	va_end(ap);
    389   1.25   thorpej }
    390   1.25   thorpej #endif /* LOCKDEBUG */
    391   1.25   thorpej 
    392  1.110  christos static void
    393  1.110  christos lockpanic(volatile struct lock *lkp, const char *fmt, ...)
    394  1.110  christos {
    395  1.110  christos 	char s[150], b[150];
    396  1.110  christos #ifdef LOCKDEBUG
    397  1.110  christos 	static const char *locktype[] = {
    398  1.110  christos 	    "*0*", "shared", "exclusive", "upgrade", "exclupgrade",
    399  1.110  christos 	    "downgrade", "release", "drain", "exclother", "*9*",
    400  1.110  christos 	    "*10*", "*11*", "*12*", "*13*", "*14*", "*15*"
    401  1.110  christos 	};
    402  1.110  christos #endif
    403  1.110  christos 	va_list ap;
    404  1.110  christos 	va_start(ap, fmt);
    405  1.110  christos 	vsnprintf(s, sizeof(s), fmt, ap);
    406  1.110  christos 	va_end(ap);
    407  1.110  christos 	bitmask_snprintf(lkp->lk_flags, __LK_FLAG_BITS, b, sizeof(b));
    408  1.110  christos 	panic("%s ("
    409  1.110  christos #ifdef LOCKDEBUG
    410  1.110  christos 	    "type %s "
    411  1.110  christos #endif
    412  1.110  christos 	    "flags %s, sharecount %d, exclusivecount %d, "
    413  1.110  christos 	    "recurselevel %d, waitcount %d, wmesg %s"
    414  1.110  christos #ifdef LOCKDEBUG
    415  1.110  christos 	    ", lock_file %s, unlock_file %s, lock_line %d, unlock_line %d"
    416  1.110  christos #endif
    417  1.110  christos 	    ")\n",
    418  1.110  christos 	    s,
    419  1.110  christos #ifdef LOCKDEBUG
    420  1.110  christos 	    locktype[lkp->lk_flags & LK_TYPE_MASK],
    421  1.110  christos #endif
    422  1.110  christos 	    b, lkp->lk_sharecount, lkp->lk_exclusivecount,
    423  1.110  christos 	    lkp->lk_recurselevel, lkp->lk_waitcount, lkp->lk_wmesg
    424  1.110  christos #ifdef LOCKDEBUG
    425  1.110  christos 	    , lkp->lk_lock_file, lkp->lk_unlock_file, lkp->lk_lock_line,
    426  1.110  christos 	    lkp->lk_unlock_line
    427  1.110  christos #endif
    428  1.110  christos 	);
    429  1.110  christos }
    430  1.110  christos 
    431    1.1      fvdl /*
    432   1.78   hannken  * Transfer any waiting processes from one lock to another.
    433   1.78   hannken  */
    434   1.78   hannken void
    435   1.78   hannken transferlockers(struct lock *from, struct lock *to)
    436   1.78   hannken {
    437   1.78   hannken 
    438   1.78   hannken 	KASSERT(from != to);
    439   1.78   hannken 	KASSERT((from->lk_flags & LK_WAITDRAIN) == 0);
    440   1.78   hannken 	if (from->lk_waitcount == 0)
    441   1.78   hannken 		return;
    442   1.78   hannken 	from->lk_newlock = to;
    443   1.78   hannken 	wakeup((void *)from);
    444   1.78   hannken 	tsleep((void *)&from->lk_newlock, from->lk_prio, "lkxfer", 0);
    445   1.78   hannken 	from->lk_newlock = NULL;
    446   1.78   hannken 	from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
    447   1.78   hannken 	KASSERT(from->lk_waitcount == 0);
    448   1.78   hannken }
    449   1.78   hannken 
    450   1.78   hannken 
    451   1.78   hannken /*
    452    1.1      fvdl  * Initialize a lock; required before use.
    453    1.1      fvdl  */
    454    1.1      fvdl void
    455  1.109      yamt lockinit(struct lock *lkp, pri_t prio, const char *wmesg, int timo, int flags)
    456    1.1      fvdl {
    457    1.1      fvdl 
    458    1.8     perry 	memset(lkp, 0, sizeof(struct lock));
    459    1.1      fvdl 	simple_lock_init(&lkp->lk_interlock);
    460    1.1      fvdl 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
    461   1.19   thorpej 	if (flags & LK_SPIN)
    462   1.19   thorpej 		lkp->lk_cpu = LK_NOCPU;
    463   1.19   thorpej 	else {
    464   1.19   thorpej 		lkp->lk_lockholder = LK_NOPROC;
    465   1.78   hannken 		lkp->lk_newlock = NULL;
    466   1.19   thorpej 		lkp->lk_prio = prio;
    467   1.19   thorpej 		lkp->lk_timo = timo;
    468   1.19   thorpej 	}
    469   1.19   thorpej 	lkp->lk_wmesg = wmesg;	/* just a name for spin locks */
    470   1.50   thorpej #if defined(LOCKDEBUG)
    471   1.50   thorpej 	lkp->lk_lock_file = NULL;
    472   1.50   thorpej 	lkp->lk_unlock_file = NULL;
    473   1.50   thorpej #endif
    474    1.1      fvdl }
    475    1.1      fvdl 
    476    1.1      fvdl /*
    477    1.1      fvdl  * Determine the status of a lock.
    478    1.1      fvdl  */
    479    1.1      fvdl int
    480   1.33   thorpej lockstatus(struct lock *lkp)
    481    1.1      fvdl {
    482   1.76      yamt 	int s = 0; /* XXX: gcc */
    483   1.76      yamt 	int lock_type = 0;
    484   1.76      yamt 	struct lwp *l = curlwp; /* XXX */
    485   1.76      yamt 	pid_t pid;
    486   1.76      yamt 	lwpid_t lid;
    487   1.88     blymn 	cpuid_t cpu_num;
    488   1.76      yamt 
    489   1.76      yamt 	if ((lkp->lk_flags & LK_SPIN) || l == NULL) {
    490   1.88     blymn 		cpu_num = cpu_number();
    491   1.76      yamt 		pid = LK_KERNPROC;
    492   1.76      yamt 		lid = 0;
    493   1.76      yamt 	} else {
    494   1.88     blymn 		cpu_num = LK_NOCPU;
    495   1.76      yamt 		pid = l->l_proc->p_pid;
    496   1.76      yamt 		lid = l->l_lid;
    497   1.76      yamt 	}
    498    1.1      fvdl 
    499   1.43   thorpej 	INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
    500   1.76      yamt 	if (lkp->lk_exclusivecount != 0) {
    501   1.88     blymn 		if (WEHOLDIT(lkp, pid, lid, cpu_num))
    502   1.76      yamt 			lock_type = LK_EXCLUSIVE;
    503   1.76      yamt 		else
    504   1.76      yamt 			lock_type = LK_EXCLOTHER;
    505   1.76      yamt 	} else if (lkp->lk_sharecount != 0)
    506    1.1      fvdl 		lock_type = LK_SHARED;
    507  1.103       chs 	else if (lkp->lk_flags & (LK_WANT_EXCL | LK_WANT_UPGRADE))
    508  1.103       chs 		lock_type = LK_EXCLOTHER;
    509   1.43   thorpej 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
    510    1.1      fvdl 	return (lock_type);
    511    1.1      fvdl }
    512   1.35   thorpej 
    513   1.92       chs #if defined(LOCKDEBUG)
    514   1.35   thorpej /*
    515   1.35   thorpej  * Make sure no spin locks are held by a CPU that is about
    516   1.35   thorpej  * to context switch.
    517   1.35   thorpej  */
    518   1.35   thorpej void
    519   1.35   thorpej spinlock_switchcheck(void)
    520   1.35   thorpej {
    521   1.35   thorpej 	u_long cnt;
    522   1.35   thorpej 	int s;
    523   1.35   thorpej 
    524  1.117        ad 	if (panicstr != NULL)
    525  1.117        ad 		return;
    526  1.117        ad 
    527  1.114        ad 	s = splhigh();
    528   1.35   thorpej #if defined(MULTIPROCESSOR)
    529   1.35   thorpej 	cnt = curcpu()->ci_spin_locks;
    530   1.35   thorpej #else
    531   1.35   thorpej 	cnt = spin_locks;
    532   1.35   thorpej #endif
    533   1.35   thorpej 	splx(s);
    534   1.35   thorpej 
    535   1.35   thorpej 	if (cnt != 0)
    536   1.35   thorpej 		panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
    537   1.35   thorpej 		    (u_long) cpu_number(), cnt);
    538   1.35   thorpej }
    539   1.92       chs #endif /* LOCKDEBUG */
    540    1.1      fvdl 
    541    1.1      fvdl /*
    542   1.44   thorpej  * Locks and IPLs (interrupt priority levels):
    543   1.44   thorpej  *
    544   1.44   thorpej  * Locks which may be taken from interrupt context must be handled
    545   1.44   thorpej  * very carefully; you must spl to the highest IPL where the lock
    546   1.44   thorpej  * is needed before acquiring the lock.
    547   1.44   thorpej  *
    548   1.44   thorpej  * It is also important to avoid deadlock, since certain (very high
    549   1.44   thorpej  * priority) interrupts are often needed to keep the system as a whole
    550   1.44   thorpej  * from deadlocking, and must not be blocked while you are spinning
    551   1.44   thorpej  * waiting for a lower-priority lock.
    552   1.44   thorpej  *
    553   1.44   thorpej  * In addition, the lock-debugging hooks themselves need to use locks!
    554   1.44   thorpej  *
    555   1.44   thorpej  * A raw __cpu_simple_lock may be used from interrupts are long as it
    556   1.44   thorpej  * is acquired and held at a single IPL.
    557   1.44   thorpej  */
    558   1.44   thorpej 
    559   1.44   thorpej /*
    560   1.32  sommerfe  * XXX XXX kludge around another kludge..
    561   1.32  sommerfe  *
    562   1.32  sommerfe  * vfs_shutdown() may be called from interrupt context, either as a result
    563   1.32  sommerfe  * of a panic, or from the debugger.   It proceeds to call
    564   1.32  sommerfe  * sys_sync(&proc0, ...), pretending its running on behalf of proc0
    565   1.32  sommerfe  *
    566   1.32  sommerfe  * We would like to make an attempt to sync the filesystems in this case, so
    567   1.32  sommerfe  * if this happens, we treat attempts to acquire locks specially.
    568   1.32  sommerfe  * All locks are acquired on behalf of proc0.
    569   1.32  sommerfe  *
    570   1.32  sommerfe  * If we've already paniced, we don't block waiting for locks, but
    571   1.32  sommerfe  * just barge right ahead since we're already going down in flames.
    572   1.32  sommerfe  */
    573   1.32  sommerfe 
    574   1.32  sommerfe /*
    575    1.1      fvdl  * Set, change, or release a lock.
    576    1.1      fvdl  *
    577    1.1      fvdl  * Shared requests increment the shared count. Exclusive requests set the
    578    1.1      fvdl  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
    579    1.1      fvdl  * accepted shared locks and shared-to-exclusive upgrades to go away.
    580    1.1      fvdl  */
    581    1.1      fvdl int
    582   1.50   thorpej #if defined(LOCKDEBUG)
    583   1.91     perry _lockmgr(volatile struct lock *lkp, u_int flags,
    584   1.50   thorpej     struct simplelock *interlkp, const char *file, int line)
    585   1.50   thorpej #else
    586   1.91     perry lockmgr(volatile struct lock *lkp, u_int flags,
    587   1.33   thorpej     struct simplelock *interlkp)
    588   1.50   thorpej #endif
    589    1.1      fvdl {
    590    1.1      fvdl 	int error;
    591    1.1      fvdl 	pid_t pid;
    592   1.69   thorpej 	lwpid_t lid;
    593    1.1      fvdl 	int extflags;
    594   1.88     blymn 	cpuid_t cpu_num;
    595   1.69   thorpej 	struct lwp *l = curlwp;
    596   1.32  sommerfe 	int lock_shutdown_noblock = 0;
    597   1.67       scw 	int s = 0;
    598    1.1      fvdl 
    599    1.1      fvdl 	error = 0;
    600   1.19   thorpej 
    601   1.80      yamt 	/* LK_RETRY is for vn_lock, not for lockmgr. */
    602   1.79      yamt 	KASSERT((flags & LK_RETRY) == 0);
    603   1.79      yamt 
    604   1.43   thorpej 	INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
    605    1.1      fvdl 	if (flags & LK_INTERLOCK)
    606    1.1      fvdl 		simple_unlock(interlkp);
    607    1.1      fvdl 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
    608   1.19   thorpej 
    609   1.21   thorpej #ifdef DIAGNOSTIC /* { */
    610   1.19   thorpej 	/*
    611   1.19   thorpej 	 * Don't allow spins on sleep locks and don't allow sleeps
    612   1.19   thorpej 	 * on spin locks.
    613   1.19   thorpej 	 */
    614   1.19   thorpej 	if ((flags ^ lkp->lk_flags) & LK_SPIN)
    615  1.110  christos 		lockpanic(lkp, "lockmgr: sleep/spin mismatch");
    616   1.21   thorpej #endif /* } */
    617   1.19   thorpej 
    618   1.69   thorpej 	if (extflags & LK_SPIN) {
    619   1.19   thorpej 		pid = LK_KERNPROC;
    620   1.69   thorpej 		lid = 0;
    621   1.69   thorpej 	} else {
    622   1.69   thorpej 		if (l == NULL) {
    623   1.32  sommerfe 			if (!doing_shutdown) {
    624   1.32  sommerfe 				panic("lockmgr: no context");
    625   1.32  sommerfe 			} else {
    626   1.69   thorpej 				l = &lwp0;
    627   1.32  sommerfe 				if (panicstr && (!(flags & LK_NOWAIT))) {
    628   1.32  sommerfe 					flags |= LK_NOWAIT;
    629   1.32  sommerfe 					lock_shutdown_noblock = 1;
    630   1.32  sommerfe 				}
    631   1.32  sommerfe 			}
    632   1.32  sommerfe 		}
    633   1.69   thorpej 		lid = l->l_lid;
    634   1.69   thorpej 		pid = l->l_proc->p_pid;
    635   1.19   thorpej 	}
    636   1.88     blymn 	cpu_num = cpu_number();
    637   1.19   thorpej 
    638    1.1      fvdl 	/*
    639    1.1      fvdl 	 * Once a lock has drained, the LK_DRAINING flag is set and an
    640    1.1      fvdl 	 * exclusive lock is returned. The only valid operation thereafter
    641    1.1      fvdl 	 * is a single release of that exclusive lock. This final release
    642    1.1      fvdl 	 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
    643    1.1      fvdl 	 * further requests of any sort will result in a panic. The bits
    644    1.1      fvdl 	 * selected for these two flags are chosen so that they will be set
    645    1.1      fvdl 	 * in memory that is freed (freed memory is filled with 0xdeadbeef).
    646    1.1      fvdl 	 * The final release is permitted to give a new lease on life to
    647    1.1      fvdl 	 * the lock by specifying LK_REENABLE.
    648    1.1      fvdl 	 */
    649    1.1      fvdl 	if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
    650   1.28   thorpej #ifdef DIAGNOSTIC /* { */
    651    1.1      fvdl 		if (lkp->lk_flags & LK_DRAINED)
    652  1.110  christos 			lockpanic(lkp, "lockmgr: using decommissioned lock");
    653    1.1      fvdl 		if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
    654   1.88     blymn 		    WEHOLDIT(lkp, pid, lid, cpu_num) == 0)
    655  1.110  christos 			lockpanic(lkp, "lockmgr: non-release on draining lock: %d",
    656    1.1      fvdl 			    flags & LK_TYPE_MASK);
    657   1.28   thorpej #endif /* DIAGNOSTIC */ /* } */
    658    1.1      fvdl 		lkp->lk_flags &= ~LK_DRAINING;
    659    1.1      fvdl 		if ((flags & LK_REENABLE) == 0)
    660    1.1      fvdl 			lkp->lk_flags |= LK_DRAINED;
    661    1.1      fvdl 	}
    662    1.1      fvdl 
    663    1.1      fvdl 	switch (flags & LK_TYPE_MASK) {
    664    1.1      fvdl 
    665    1.1      fvdl 	case LK_SHARED:
    666   1.88     blymn 		if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0) {
    667    1.1      fvdl 			/*
    668    1.1      fvdl 			 * If just polling, check to see if we will block.
    669    1.1      fvdl 			 */
    670    1.1      fvdl 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
    671    1.1      fvdl 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
    672    1.1      fvdl 				error = EBUSY;
    673    1.1      fvdl 				break;
    674    1.1      fvdl 			}
    675    1.1      fvdl 			/*
    676    1.1      fvdl 			 * Wait for exclusive locks and upgrades to clear.
    677    1.1      fvdl 			 */
    678   1.78   hannken 			error = acquire(&lkp, &s, extflags, 0,
    679   1.98        ad 			    LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE,
    680   1.98        ad 			    RETURN_ADDRESS);
    681    1.1      fvdl 			if (error)
    682    1.1      fvdl 				break;
    683    1.1      fvdl 			lkp->lk_sharecount++;
    684   1.73      yamt 			lkp->lk_flags |= LK_SHARE_NONZERO;
    685   1.88     blymn 			COUNT(lkp, l, cpu_num, 1);
    686    1.1      fvdl 			break;
    687    1.1      fvdl 		}
    688    1.1      fvdl 		/*
    689    1.1      fvdl 		 * We hold an exclusive lock, so downgrade it to shared.
    690    1.1      fvdl 		 * An alternative would be to fail with EDEADLK.
    691    1.1      fvdl 		 */
    692    1.1      fvdl 		lkp->lk_sharecount++;
    693   1.73      yamt 		lkp->lk_flags |= LK_SHARE_NONZERO;
    694   1.88     blymn 		COUNT(lkp, l, cpu_num, 1);
    695    1.1      fvdl 		/* fall into downgrade */
    696    1.1      fvdl 
    697    1.1      fvdl 	case LK_DOWNGRADE:
    698   1.88     blymn 		if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0 ||
    699   1.19   thorpej 		    lkp->lk_exclusivecount == 0)
    700  1.110  christos 			lockpanic(lkp, "lockmgr: not holding exclusive lock");
    701    1.1      fvdl 		lkp->lk_sharecount += lkp->lk_exclusivecount;
    702   1.73      yamt 		lkp->lk_flags |= LK_SHARE_NONZERO;
    703    1.1      fvdl 		lkp->lk_exclusivecount = 0;
    704   1.15      fvdl 		lkp->lk_recurselevel = 0;
    705    1.1      fvdl 		lkp->lk_flags &= ~LK_HAVE_EXCL;
    706   1.69   thorpej 		SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
    707   1.50   thorpej #if defined(LOCKDEBUG)
    708   1.50   thorpej 		lkp->lk_unlock_file = file;
    709   1.50   thorpej 		lkp->lk_unlock_line = line;
    710   1.50   thorpej #endif
    711   1.21   thorpej 		DONTHAVEIT(lkp);
    712   1.23   thorpej 		WAKEUP_WAITER(lkp);
    713    1.1      fvdl 		break;
    714    1.1      fvdl 
    715    1.1      fvdl 	case LK_EXCLUPGRADE:
    716    1.1      fvdl 		/*
    717    1.1      fvdl 		 * If another process is ahead of us to get an upgrade,
    718    1.1      fvdl 		 * then we want to fail rather than have an intervening
    719    1.1      fvdl 		 * exclusive access.
    720    1.1      fvdl 		 */
    721    1.1      fvdl 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
    722    1.1      fvdl 			lkp->lk_sharecount--;
    723   1.73      yamt 			if (lkp->lk_sharecount == 0)
    724   1.73      yamt 				lkp->lk_flags &= ~LK_SHARE_NONZERO;
    725   1.88     blymn 			COUNT(lkp, l, cpu_num, -1);
    726    1.1      fvdl 			error = EBUSY;
    727    1.1      fvdl 			break;
    728    1.1      fvdl 		}
    729    1.1      fvdl 		/* fall into normal upgrade */
    730    1.1      fvdl 
    731    1.1      fvdl 	case LK_UPGRADE:
    732    1.1      fvdl 		/*
    733    1.1      fvdl 		 * Upgrade a shared lock to an exclusive one. If another
    734    1.1      fvdl 		 * shared lock has already requested an upgrade to an
    735    1.1      fvdl 		 * exclusive lock, our shared lock is released and an
    736    1.1      fvdl 		 * exclusive lock is requested (which will be granted
    737    1.1      fvdl 		 * after the upgrade). If we return an error, the file
    738    1.1      fvdl 		 * will always be unlocked.
    739    1.1      fvdl 		 */
    740   1.88     blymn 		if (WEHOLDIT(lkp, pid, lid, cpu_num) || lkp->lk_sharecount <= 0)
    741  1.110  christos 			lockpanic(lkp, "lockmgr: upgrade exclusive lock");
    742    1.1      fvdl 		lkp->lk_sharecount--;
    743   1.73      yamt 		if (lkp->lk_sharecount == 0)
    744   1.73      yamt 			lkp->lk_flags &= ~LK_SHARE_NONZERO;
    745   1.88     blymn 		COUNT(lkp, l, cpu_num, -1);
    746    1.1      fvdl 		/*
    747    1.1      fvdl 		 * If we are just polling, check to see if we will block.
    748    1.1      fvdl 		 */
    749    1.1      fvdl 		if ((extflags & LK_NOWAIT) &&
    750    1.1      fvdl 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
    751    1.1      fvdl 		     lkp->lk_sharecount > 1)) {
    752    1.1      fvdl 			error = EBUSY;
    753    1.1      fvdl 			break;
    754    1.1      fvdl 		}
    755    1.1      fvdl 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
    756    1.1      fvdl 			/*
    757    1.1      fvdl 			 * We are first shared lock to request an upgrade, so
    758    1.1      fvdl 			 * request upgrade and wait for the shared count to
    759    1.1      fvdl 			 * drop to zero, then take exclusive lock.
    760    1.1      fvdl 			 */
    761    1.1      fvdl 			lkp->lk_flags |= LK_WANT_UPGRADE;
    762   1.98        ad 			error = acquire(&lkp, &s, extflags, 0, LK_SHARE_NONZERO,
    763   1.98        ad 			    RETURN_ADDRESS);
    764    1.1      fvdl 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
    765   1.83      yamt 			if (error) {
    766   1.83      yamt 				WAKEUP_WAITER(lkp);
    767    1.1      fvdl 				break;
    768   1.83      yamt 			}
    769    1.1      fvdl 			lkp->lk_flags |= LK_HAVE_EXCL;
    770   1.88     blymn 			SETHOLDER(lkp, pid, lid, cpu_num);
    771   1.50   thorpej #if defined(LOCKDEBUG)
    772   1.50   thorpej 			lkp->lk_lock_file = file;
    773   1.50   thorpej 			lkp->lk_lock_line = line;
    774   1.50   thorpej #endif
    775   1.21   thorpej 			HAVEIT(lkp);
    776    1.1      fvdl 			if (lkp->lk_exclusivecount != 0)
    777  1.110  christos 				lockpanic(lkp, "lockmgr: non-zero exclusive count");
    778    1.1      fvdl 			lkp->lk_exclusivecount = 1;
    779   1.15      fvdl 			if (extflags & LK_SETRECURSE)
    780   1.15      fvdl 				lkp->lk_recurselevel = 1;
    781   1.88     blymn 			COUNT(lkp, l, cpu_num, 1);
    782    1.1      fvdl 			break;
    783    1.1      fvdl 		}
    784    1.1      fvdl 		/*
    785    1.1      fvdl 		 * Someone else has requested upgrade. Release our shared
    786    1.1      fvdl 		 * lock, awaken upgrade requestor if we are the last shared
    787    1.1      fvdl 		 * lock, then request an exclusive lock.
    788    1.1      fvdl 		 */
    789   1.23   thorpej 		if (lkp->lk_sharecount == 0)
    790   1.23   thorpej 			WAKEUP_WAITER(lkp);
    791    1.1      fvdl 		/* fall into exclusive request */
    792    1.1      fvdl 
    793    1.1      fvdl 	case LK_EXCLUSIVE:
    794   1.88     blymn 		if (WEHOLDIT(lkp, pid, lid, cpu_num)) {
    795    1.1      fvdl 			/*
    796   1.19   thorpej 			 * Recursive lock.
    797    1.1      fvdl 			 */
    798   1.15      fvdl 			if ((extflags & LK_CANRECURSE) == 0 &&
    799   1.16  sommerfe 			     lkp->lk_recurselevel == 0) {
    800   1.16  sommerfe 				if (extflags & LK_RECURSEFAIL) {
    801   1.16  sommerfe 					error = EDEADLK;
    802   1.16  sommerfe 					break;
    803   1.16  sommerfe 				} else
    804  1.110  christos 					lockpanic(lkp, "lockmgr: locking against myself");
    805   1.16  sommerfe 			}
    806    1.1      fvdl 			lkp->lk_exclusivecount++;
    807   1.15      fvdl 			if (extflags & LK_SETRECURSE &&
    808   1.15      fvdl 			    lkp->lk_recurselevel == 0)
    809   1.15      fvdl 				lkp->lk_recurselevel = lkp->lk_exclusivecount;
    810   1.88     blymn 			COUNT(lkp, l, cpu_num, 1);
    811    1.1      fvdl 			break;
    812    1.1      fvdl 		}
    813    1.1      fvdl 		/*
    814    1.1      fvdl 		 * If we are just polling, check to see if we will sleep.
    815    1.1      fvdl 		 */
    816   1.73      yamt 		if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
    817   1.73      yamt 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
    818   1.73      yamt 		     LK_SHARE_NONZERO))) {
    819    1.1      fvdl 			error = EBUSY;
    820    1.1      fvdl 			break;
    821    1.1      fvdl 		}
    822    1.1      fvdl 		/*
    823    1.1      fvdl 		 * Try to acquire the want_exclusive flag.
    824    1.1      fvdl 		 */
    825   1.82      yamt 		error = acquire(&lkp, &s, extflags, 0,
    826   1.98        ad 		    LK_HAVE_EXCL | LK_WANT_EXCL, RETURN_ADDRESS);
    827    1.1      fvdl 		if (error)
    828    1.1      fvdl 			break;
    829    1.1      fvdl 		lkp->lk_flags |= LK_WANT_EXCL;
    830    1.1      fvdl 		/*
    831    1.1      fvdl 		 * Wait for shared locks and upgrades to finish.
    832    1.1      fvdl 		 */
    833   1.78   hannken 		error = acquire(&lkp, &s, extflags, 0,
    834   1.98        ad 		    LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO,
    835   1.98        ad 		    RETURN_ADDRESS);
    836    1.1      fvdl 		lkp->lk_flags &= ~LK_WANT_EXCL;
    837   1.83      yamt 		if (error) {
    838   1.83      yamt 			WAKEUP_WAITER(lkp);
    839    1.1      fvdl 			break;
    840   1.83      yamt 		}
    841    1.1      fvdl 		lkp->lk_flags |= LK_HAVE_EXCL;
    842   1.88     blymn 		SETHOLDER(lkp, pid, lid, cpu_num);
    843   1.50   thorpej #if defined(LOCKDEBUG)
    844   1.50   thorpej 		lkp->lk_lock_file = file;
    845   1.50   thorpej 		lkp->lk_lock_line = line;
    846   1.50   thorpej #endif
    847   1.21   thorpej 		HAVEIT(lkp);
    848    1.1      fvdl 		if (lkp->lk_exclusivecount != 0)
    849  1.110  christos 			lockpanic(lkp, "lockmgr: non-zero exclusive count");
    850    1.1      fvdl 		lkp->lk_exclusivecount = 1;
    851   1.15      fvdl 		if (extflags & LK_SETRECURSE)
    852   1.15      fvdl 			lkp->lk_recurselevel = 1;
    853   1.88     blymn 		COUNT(lkp, l, cpu_num, 1);
    854    1.1      fvdl 		break;
    855    1.1      fvdl 
    856    1.1      fvdl 	case LK_RELEASE:
    857    1.1      fvdl 		if (lkp->lk_exclusivecount != 0) {
    858   1.88     blymn 			if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0) {
    859   1.19   thorpej 				if (lkp->lk_flags & LK_SPIN) {
    860  1.110  christos 					lockpanic(lkp,
    861  1.110  christos 					    "lockmgr: processor %lu, not "
    862   1.19   thorpej 					    "exclusive lock holder %lu "
    863   1.88     blymn 					    "unlocking", cpu_num, lkp->lk_cpu);
    864   1.19   thorpej 				} else {
    865  1.112  perseant 					lockpanic(lkp, "lockmgr: pid %d.%d, not "
    866  1.112  perseant 					    "exclusive lock holder %d.%d "
    867  1.112  perseant 					    "unlocking", pid, lid,
    868  1.112  perseant 					    lkp->lk_lockholder,
    869  1.112  perseant 					    lkp->lk_locklwp);
    870   1.19   thorpej 				}
    871   1.19   thorpej 			}
    872   1.15      fvdl 			if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
    873   1.15      fvdl 				lkp->lk_recurselevel = 0;
    874    1.1      fvdl 			lkp->lk_exclusivecount--;
    875   1.88     blymn 			COUNT(lkp, l, cpu_num, -1);
    876    1.1      fvdl 			if (lkp->lk_exclusivecount == 0) {
    877    1.1      fvdl 				lkp->lk_flags &= ~LK_HAVE_EXCL;
    878   1.69   thorpej 				SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
    879   1.50   thorpej #if defined(LOCKDEBUG)
    880   1.50   thorpej 				lkp->lk_unlock_file = file;
    881   1.50   thorpej 				lkp->lk_unlock_line = line;
    882   1.50   thorpej #endif
    883   1.21   thorpej 				DONTHAVEIT(lkp);
    884    1.1      fvdl 			}
    885    1.1      fvdl 		} else if (lkp->lk_sharecount != 0) {
    886    1.1      fvdl 			lkp->lk_sharecount--;
    887   1.73      yamt 			if (lkp->lk_sharecount == 0)
    888   1.73      yamt 				lkp->lk_flags &= ~LK_SHARE_NONZERO;
    889   1.88     blymn 			COUNT(lkp, l, cpu_num, -1);
    890    1.1      fvdl 		}
    891   1.39   thorpej #ifdef DIAGNOSTIC
    892   1.39   thorpej 		else
    893  1.110  christos 			lockpanic(lkp, "lockmgr: release of unlocked lock!");
    894   1.39   thorpej #endif
    895   1.23   thorpej 		WAKEUP_WAITER(lkp);
    896    1.1      fvdl 		break;
    897    1.1      fvdl 
    898    1.1      fvdl 	case LK_DRAIN:
    899    1.1      fvdl 		/*
    900   1.86     perry 		 * Check that we do not already hold the lock, as it can
    901    1.1      fvdl 		 * never drain if we do. Unfortunately, we have no way to
    902    1.1      fvdl 		 * check for holding a shared lock, but at least we can
    903    1.1      fvdl 		 * check for an exclusive one.
    904    1.1      fvdl 		 */
    905   1.88     blymn 		if (WEHOLDIT(lkp, pid, lid, cpu_num))
    906  1.110  christos 			lockpanic(lkp, "lockmgr: draining against myself");
    907    1.1      fvdl 		/*
    908    1.1      fvdl 		 * If we are just polling, check to see if we will sleep.
    909    1.1      fvdl 		 */
    910   1.73      yamt 		if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
    911   1.73      yamt 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
    912   1.73      yamt 		     LK_SHARE_NONZERO | LK_WAIT_NONZERO))) {
    913    1.1      fvdl 			error = EBUSY;
    914    1.1      fvdl 			break;
    915    1.1      fvdl 		}
    916   1.78   hannken 		error = acquire(&lkp, &s, extflags, 1,
    917   1.73      yamt 		    LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
    918   1.98        ad 		    LK_SHARE_NONZERO | LK_WAIT_NONZERO,
    919   1.98        ad 		    RETURN_ADDRESS);
    920   1.23   thorpej 		if (error)
    921   1.23   thorpej 			break;
    922  1.118     pooka 		lkp->lk_flags |= LK_HAVE_EXCL;
    923  1.118     pooka 		if ((extflags & LK_RESURRECT) == 0)
    924  1.118     pooka 			lkp->lk_flags |= LK_DRAINING;
    925   1.88     blymn 		SETHOLDER(lkp, pid, lid, cpu_num);
    926   1.50   thorpej #if defined(LOCKDEBUG)
    927   1.50   thorpej 		lkp->lk_lock_file = file;
    928   1.50   thorpej 		lkp->lk_lock_line = line;
    929   1.50   thorpej #endif
    930   1.21   thorpej 		HAVEIT(lkp);
    931    1.1      fvdl 		lkp->lk_exclusivecount = 1;
    932   1.15      fvdl 		/* XXX unlikely that we'd want this */
    933   1.15      fvdl 		if (extflags & LK_SETRECURSE)
    934   1.15      fvdl 			lkp->lk_recurselevel = 1;
    935   1.88     blymn 		COUNT(lkp, l, cpu_num, 1);
    936    1.1      fvdl 		break;
    937    1.1      fvdl 
    938    1.1      fvdl 	default:
    939   1.43   thorpej 		INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
    940  1.110  christos 		lockpanic(lkp, "lockmgr: unknown locktype request %d",
    941    1.1      fvdl 		    flags & LK_TYPE_MASK);
    942    1.1      fvdl 		/* NOTREACHED */
    943    1.1      fvdl 	}
    944   1.23   thorpej 	if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
    945   1.23   thorpej 	    ((lkp->lk_flags &
    946   1.73      yamt 	      (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
    947   1.73      yamt 	      LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0)) {
    948    1.1      fvdl 		lkp->lk_flags &= ~LK_WAITDRAIN;
    949   1.87  christos 		wakeup(&lkp->lk_flags);
    950    1.1      fvdl 	}
    951   1.32  sommerfe 	/*
    952   1.32  sommerfe 	 * Note that this panic will be a recursive panic, since
    953   1.32  sommerfe 	 * we only set lock_shutdown_noblock above if panicstr != NULL.
    954   1.32  sommerfe 	 */
    955   1.32  sommerfe 	if (error && lock_shutdown_noblock)
    956  1.110  christos 		lockpanic(lkp, "lockmgr: deadlock (see previous panic)");
    957   1.86     perry 
    958   1.43   thorpej 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
    959    1.1      fvdl 	return (error);
    960    1.1      fvdl }
    961    1.1      fvdl 
    962    1.1      fvdl /*
    963   1.47  sommerfe  * For a recursive spinlock held one or more times by the current CPU,
    964   1.47  sommerfe  * release all N locks, and return N.
    965   1.47  sommerfe  * Intended for use in mi_switch() shortly before context switching.
    966   1.47  sommerfe  */
    967   1.47  sommerfe 
    968   1.47  sommerfe int
    969   1.50   thorpej #if defined(LOCKDEBUG)
    970   1.91     perry _spinlock_release_all(volatile struct lock *lkp, const char *file, int line)
    971   1.50   thorpej #else
    972   1.91     perry spinlock_release_all(volatile struct lock *lkp)
    973   1.50   thorpej #endif
    974   1.47  sommerfe {
    975   1.47  sommerfe 	int s, count;
    976   1.88     blymn 	cpuid_t cpu_num;
    977   1.86     perry 
    978   1.47  sommerfe 	KASSERT(lkp->lk_flags & LK_SPIN);
    979   1.86     perry 
    980   1.47  sommerfe 	INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
    981   1.47  sommerfe 
    982   1.88     blymn 	cpu_num = cpu_number();
    983   1.47  sommerfe 	count = lkp->lk_exclusivecount;
    984   1.86     perry 
    985   1.47  sommerfe 	if (count != 0) {
    986   1.86     perry #ifdef DIAGNOSTIC
    987   1.88     blymn 		if (WEHOLDIT(lkp, 0, 0, cpu_num) == 0) {
    988  1.110  christos 			lockpanic(lkp, "spinlock_release_all: processor %lu, not "
    989   1.47  sommerfe 			    "exclusive lock holder %lu "
    990   1.88     blymn 			    "unlocking", (long)cpu_num, lkp->lk_cpu);
    991   1.47  sommerfe 		}
    992   1.47  sommerfe #endif
    993   1.47  sommerfe 		lkp->lk_recurselevel = 0;
    994   1.47  sommerfe 		lkp->lk_exclusivecount = 0;
    995   1.88     blymn 		COUNT_CPU(cpu_num, -count);
    996   1.47  sommerfe 		lkp->lk_flags &= ~LK_HAVE_EXCL;
    997   1.69   thorpej 		SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
    998   1.50   thorpej #if defined(LOCKDEBUG)
    999   1.50   thorpej 		lkp->lk_unlock_file = file;
   1000   1.50   thorpej 		lkp->lk_unlock_line = line;
   1001   1.50   thorpej #endif
   1002   1.47  sommerfe 		DONTHAVEIT(lkp);
   1003   1.47  sommerfe 	}
   1004   1.47  sommerfe #ifdef DIAGNOSTIC
   1005   1.47  sommerfe 	else if (lkp->lk_sharecount != 0)
   1006  1.110  christos 		lockpanic(lkp, "spinlock_release_all: release of shared lock!");
   1007   1.47  sommerfe 	else
   1008  1.110  christos 		lockpanic(lkp, "spinlock_release_all: release of unlocked lock!");
   1009   1.47  sommerfe #endif
   1010   1.86     perry 	INTERLOCK_RELEASE(lkp, LK_SPIN, s);
   1011   1.47  sommerfe 
   1012   1.47  sommerfe 	return (count);
   1013   1.47  sommerfe }
   1014   1.47  sommerfe 
   1015   1.47  sommerfe /*
   1016   1.47  sommerfe  * For a recursive spinlock held one or more times by the current CPU,
   1017   1.47  sommerfe  * release all N locks, and return N.
   1018   1.47  sommerfe  * Intended for use in mi_switch() right after resuming execution.
   1019   1.47  sommerfe  */
   1020   1.47  sommerfe 
   1021   1.47  sommerfe void
   1022   1.50   thorpej #if defined(LOCKDEBUG)
   1023   1.91     perry _spinlock_acquire_count(volatile struct lock *lkp, int count,
   1024   1.50   thorpej     const char *file, int line)
   1025   1.50   thorpej #else
   1026   1.91     perry spinlock_acquire_count(volatile struct lock *lkp, int count)
   1027   1.50   thorpej #endif
   1028   1.47  sommerfe {
   1029   1.47  sommerfe 	int s, error;
   1030   1.88     blymn 	cpuid_t cpu_num;
   1031   1.86     perry 
   1032   1.47  sommerfe 	KASSERT(lkp->lk_flags & LK_SPIN);
   1033   1.86     perry 
   1034   1.47  sommerfe 	INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
   1035   1.47  sommerfe 
   1036   1.88     blymn 	cpu_num = cpu_number();
   1037   1.47  sommerfe 
   1038   1.47  sommerfe #ifdef DIAGNOSTIC
   1039   1.88     blymn 	if (WEHOLDIT(lkp, LK_NOPROC, 0, cpu_num))
   1040  1.110  christos 		lockpanic(lkp, "spinlock_acquire_count: processor %lu already holds lock", (long)cpu_num);
   1041   1.47  sommerfe #endif
   1042   1.47  sommerfe 	/*
   1043   1.47  sommerfe 	 * Try to acquire the want_exclusive flag.
   1044   1.47  sommerfe 	 */
   1045   1.98        ad 	error = acquire(&lkp, &s, LK_SPIN, 0, LK_HAVE_EXCL | LK_WANT_EXCL,
   1046   1.98        ad 	    RETURN_ADDRESS);
   1047   1.47  sommerfe 	lkp->lk_flags |= LK_WANT_EXCL;
   1048   1.47  sommerfe 	/*
   1049   1.47  sommerfe 	 * Wait for shared locks and upgrades to finish.
   1050   1.47  sommerfe 	 */
   1051   1.78   hannken 	error = acquire(&lkp, &s, LK_SPIN, 0,
   1052   1.98        ad 	    LK_HAVE_EXCL | LK_SHARE_NONZERO | LK_WANT_UPGRADE,
   1053   1.98        ad 	    RETURN_ADDRESS);
   1054   1.47  sommerfe 	lkp->lk_flags &= ~LK_WANT_EXCL;
   1055   1.47  sommerfe 	lkp->lk_flags |= LK_HAVE_EXCL;
   1056   1.88     blymn 	SETHOLDER(lkp, LK_NOPROC, 0, cpu_num);
   1057   1.50   thorpej #if defined(LOCKDEBUG)
   1058   1.50   thorpej 	lkp->lk_lock_file = file;
   1059   1.50   thorpej 	lkp->lk_lock_line = line;
   1060   1.50   thorpej #endif
   1061   1.47  sommerfe 	HAVEIT(lkp);
   1062   1.47  sommerfe 	if (lkp->lk_exclusivecount != 0)
   1063  1.110  christos 		lockpanic(lkp, "lockmgr: non-zero exclusive count");
   1064   1.47  sommerfe 	lkp->lk_exclusivecount = count;
   1065   1.47  sommerfe 	lkp->lk_recurselevel = 1;
   1066   1.88     blymn 	COUNT_CPU(cpu_num, count);
   1067   1.47  sommerfe 
   1068   1.86     perry 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
   1069   1.47  sommerfe }
   1070   1.47  sommerfe 
   1071   1.47  sommerfe 
   1072   1.47  sommerfe 
   1073   1.47  sommerfe /*
   1074    1.1      fvdl  * Print out information about state of a lock. Used by VOP_PRINT
   1075    1.1      fvdl  * routines to display ststus about contained locks.
   1076    1.1      fvdl  */
   1077    1.2      fvdl void
   1078   1.91     perry lockmgr_printinfo(volatile struct lock *lkp)
   1079    1.1      fvdl {
   1080    1.1      fvdl 
   1081    1.1      fvdl 	if (lkp->lk_sharecount)
   1082    1.1      fvdl 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
   1083    1.1      fvdl 		    lkp->lk_sharecount);
   1084   1.19   thorpej 	else if (lkp->lk_flags & LK_HAVE_EXCL) {
   1085   1.19   thorpej 		printf(" lock type %s: EXCL (count %d) by ",
   1086   1.19   thorpej 		    lkp->lk_wmesg, lkp->lk_exclusivecount);
   1087   1.19   thorpej 		if (lkp->lk_flags & LK_SPIN)
   1088   1.19   thorpej 			printf("processor %lu", lkp->lk_cpu);
   1089   1.19   thorpej 		else
   1090   1.69   thorpej 			printf("pid %d.%d", lkp->lk_lockholder,
   1091   1.69   thorpej 			    lkp->lk_locklwp);
   1092   1.19   thorpej 	} else
   1093   1.19   thorpej 		printf(" not locked");
   1094   1.19   thorpej 	if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
   1095    1.1      fvdl 		printf(" with %d pending", lkp->lk_waitcount);
   1096    1.1      fvdl }
   1097    1.1      fvdl 
   1098   1.21   thorpej #if defined(LOCKDEBUG) /* { */
   1099   1.91     perry _TAILQ_HEAD(, struct simplelock, volatile) simplelock_list =
   1100   1.21   thorpej     TAILQ_HEAD_INITIALIZER(simplelock_list);
   1101   1.21   thorpej 
   1102   1.21   thorpej #if defined(MULTIPROCESSOR) /* { */
   1103   1.21   thorpej struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
   1104   1.21   thorpej 
   1105   1.21   thorpej #define	SLOCK_LIST_LOCK()						\
   1106   1.29  sommerfe 	__cpu_simple_lock(&simplelock_list_slock.lock_data)
   1107   1.21   thorpej 
   1108   1.21   thorpej #define	SLOCK_LIST_UNLOCK()						\
   1109   1.29  sommerfe 	__cpu_simple_unlock(&simplelock_list_slock.lock_data)
   1110   1.21   thorpej 
   1111   1.21   thorpej #define	SLOCK_COUNT(x)							\
   1112   1.47  sommerfe 	curcpu()->ci_simple_locks += (x)
   1113   1.21   thorpej #else
   1114   1.21   thorpej u_long simple_locks;
   1115   1.21   thorpej 
   1116   1.21   thorpej #define	SLOCK_LIST_LOCK()	/* nothing */
   1117   1.21   thorpej 
   1118   1.21   thorpej #define	SLOCK_LIST_UNLOCK()	/* nothing */
   1119   1.21   thorpej 
   1120   1.21   thorpej #define	SLOCK_COUNT(x)		simple_locks += (x)
   1121   1.21   thorpej #endif /* MULTIPROCESSOR */ /* } */
   1122   1.21   thorpej 
   1123   1.26  sommerfe #ifdef MULTIPROCESSOR
   1124   1.75       wiz #define SLOCK_MP()		lock_printf("on CPU %ld\n", 		\
   1125   1.46   thorpej 				    (u_long) cpu_number())
   1126   1.26  sommerfe #else
   1127   1.26  sommerfe #define SLOCK_MP()		/* nothing */
   1128   1.26  sommerfe #endif
   1129   1.26  sommerfe 
   1130   1.21   thorpej #define	SLOCK_WHERE(str, alp, id, l)					\
   1131   1.21   thorpej do {									\
   1132   1.58       chs 	lock_printf("\n");						\
   1133   1.25   thorpej 	lock_printf(str);						\
   1134   1.33   thorpej 	lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
   1135   1.26  sommerfe 	SLOCK_MP();							\
   1136   1.21   thorpej 	if ((alp)->lock_file != NULL)					\
   1137   1.25   thorpej 		lock_printf("last locked: %s:%d\n", (alp)->lock_file,	\
   1138   1.21   thorpej 		    (alp)->lock_line);					\
   1139   1.21   thorpej 	if ((alp)->unlock_file != NULL)					\
   1140   1.25   thorpej 		lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
   1141   1.21   thorpej 		    (alp)->unlock_line);				\
   1142   1.58       chs 	SLOCK_TRACE()							\
   1143   1.21   thorpej 	SLOCK_DEBUGGER();						\
   1144   1.30   thorpej } while (/*CONSTCOND*/0)
   1145   1.12       chs 
   1146    1.1      fvdl /*
   1147    1.1      fvdl  * Simple lock functions so that the debugger can see from whence
   1148    1.1      fvdl  * they are being called.
   1149    1.1      fvdl  */
   1150    1.1      fvdl void
   1151   1.91     perry simple_lock_init(volatile struct simplelock *alp)
   1152    1.1      fvdl {
   1153   1.21   thorpej 
   1154   1.21   thorpej #if defined(MULTIPROCESSOR) /* { */
   1155   1.27   thorpej 	__cpu_simple_lock_init(&alp->lock_data);
   1156   1.21   thorpej #else
   1157   1.27   thorpej 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
   1158   1.21   thorpej #endif /* } */
   1159    1.5       chs 	alp->lock_file = NULL;
   1160    1.5       chs 	alp->lock_line = 0;
   1161    1.5       chs 	alp->unlock_file = NULL;
   1162    1.5       chs 	alp->unlock_line = 0;
   1163   1.41   thorpej 	alp->lock_holder = LK_NOCPU;
   1164    1.1      fvdl }
   1165    1.1      fvdl 
   1166    1.1      fvdl void
   1167   1.91     perry _simple_lock(volatile struct simplelock *alp, const char *id, int l)
   1168    1.1      fvdl {
   1169   1.88     blymn 	cpuid_t cpu_num = cpu_number();
   1170   1.12       chs 	int s;
   1171   1.12       chs 
   1172  1.114        ad 	s = splhigh();
   1173   1.21   thorpej 
   1174   1.21   thorpej 	/*
   1175   1.21   thorpej 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
   1176   1.21   thorpej 	 * don't take any action, and just fall into the normal spin case.
   1177   1.21   thorpej 	 */
   1178   1.27   thorpej 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
   1179   1.21   thorpej #if defined(MULTIPROCESSOR) /* { */
   1180   1.88     blymn 		if (alp->lock_holder == cpu_num) {
   1181   1.21   thorpej 			SLOCK_WHERE("simple_lock: locking against myself\n",
   1182   1.21   thorpej 			    alp, id, l);
   1183   1.21   thorpej 			goto out;
   1184    1.1      fvdl 		}
   1185   1.21   thorpej #else
   1186   1.21   thorpej 		SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
   1187   1.21   thorpej 		goto out;
   1188   1.21   thorpej #endif /* MULTIPROCESSOR */ /* } */
   1189    1.1      fvdl 	}
   1190   1.21   thorpej 
   1191   1.21   thorpej #if defined(MULTIPROCESSOR) /* { */
   1192   1.21   thorpej 	/* Acquire the lock before modifying any fields. */
   1193   1.70        pk 	splx(s);
   1194   1.27   thorpej 	__cpu_simple_lock(&alp->lock_data);
   1195  1.114        ad 	s = splhigh();
   1196   1.21   thorpej #else
   1197   1.27   thorpej 	alp->lock_data = __SIMPLELOCK_LOCKED;
   1198   1.21   thorpej #endif /* } */
   1199   1.21   thorpej 
   1200   1.45  sommerfe 	if (alp->lock_holder != LK_NOCPU) {
   1201   1.45  sommerfe 		SLOCK_WHERE("simple_lock: uninitialized lock\n",
   1202   1.45  sommerfe 		    alp, id, l);
   1203   1.45  sommerfe 	}
   1204    1.5       chs 	alp->lock_file = id;
   1205    1.5       chs 	alp->lock_line = l;
   1206   1.88     blymn 	alp->lock_holder = cpu_num;
   1207   1.21   thorpej 
   1208   1.21   thorpej 	SLOCK_LIST_LOCK();
   1209   1.87  christos 	TAILQ_INSERT_TAIL(&simplelock_list, alp, list);
   1210   1.21   thorpej 	SLOCK_LIST_UNLOCK();
   1211   1.21   thorpej 
   1212   1.21   thorpej 	SLOCK_COUNT(1);
   1213   1.21   thorpej 
   1214   1.21   thorpej  out:
   1215   1.18       chs 	splx(s);
   1216   1.38   thorpej }
   1217   1.38   thorpej 
   1218   1.38   thorpej int
   1219   1.91     perry _simple_lock_held(volatile struct simplelock *alp)
   1220   1.38   thorpej {
   1221   1.54     enami #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC)
   1222   1.88     blymn 	cpuid_t cpu_num = cpu_number();
   1223   1.54     enami #endif
   1224   1.38   thorpej 	int s, locked = 0;
   1225   1.38   thorpej 
   1226  1.114        ad 	s = splhigh();
   1227   1.42   thorpej 
   1228   1.42   thorpej #if defined(MULTIPROCESSOR)
   1229   1.38   thorpej 	if (__cpu_simple_lock_try(&alp->lock_data) == 0)
   1230   1.88     blymn 		locked = (alp->lock_holder == cpu_num);
   1231   1.38   thorpej 	else
   1232   1.38   thorpej 		__cpu_simple_unlock(&alp->lock_data);
   1233   1.38   thorpej #else
   1234   1.42   thorpej 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
   1235   1.42   thorpej 		locked = 1;
   1236   1.88     blymn 		KASSERT(alp->lock_holder == cpu_num);
   1237   1.42   thorpej 	}
   1238   1.42   thorpej #endif
   1239   1.38   thorpej 
   1240   1.38   thorpej 	splx(s);
   1241   1.42   thorpej 
   1242   1.38   thorpej 	return (locked);
   1243    1.1      fvdl }
   1244    1.1      fvdl 
   1245    1.1      fvdl int
   1246   1.91     perry _simple_lock_try(volatile struct simplelock *alp, const char *id, int l)
   1247    1.1      fvdl {
   1248   1.88     blymn 	cpuid_t cpu_num = cpu_number();
   1249   1.21   thorpej 	int s, rv = 0;
   1250    1.1      fvdl 
   1251  1.114        ad 	s = splhigh();
   1252   1.21   thorpej 
   1253   1.21   thorpej 	/*
   1254   1.21   thorpej 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
   1255   1.21   thorpej 	 * don't take any action.
   1256   1.21   thorpej 	 */
   1257   1.21   thorpej #if defined(MULTIPROCESSOR) /* { */
   1258   1.27   thorpej 	if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
   1259   1.88     blymn 		if (alp->lock_holder == cpu_num)
   1260   1.21   thorpej 			SLOCK_WHERE("simple_lock_try: locking against myself\n",
   1261   1.26  sommerfe 			    alp, id, l);
   1262   1.21   thorpej 		goto out;
   1263   1.21   thorpej 	}
   1264   1.21   thorpej #else
   1265   1.27   thorpej 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
   1266   1.21   thorpej 		SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
   1267   1.21   thorpej 		goto out;
   1268   1.18       chs 	}
   1269   1.27   thorpej 	alp->lock_data = __SIMPLELOCK_LOCKED;
   1270   1.21   thorpej #endif /* MULTIPROCESSOR */ /* } */
   1271   1.21   thorpej 
   1272   1.21   thorpej 	/*
   1273   1.21   thorpej 	 * At this point, we have acquired the lock.
   1274   1.21   thorpej 	 */
   1275   1.21   thorpej 
   1276   1.21   thorpej 	rv = 1;
   1277   1.18       chs 
   1278    1.5       chs 	alp->lock_file = id;
   1279    1.5       chs 	alp->lock_line = l;
   1280   1.88     blymn 	alp->lock_holder = cpu_num;
   1281   1.21   thorpej 
   1282   1.21   thorpej 	SLOCK_LIST_LOCK();
   1283   1.87  christos 	TAILQ_INSERT_TAIL(&simplelock_list, alp, list);
   1284   1.21   thorpej 	SLOCK_LIST_UNLOCK();
   1285   1.21   thorpej 
   1286   1.21   thorpej 	SLOCK_COUNT(1);
   1287   1.21   thorpej 
   1288   1.21   thorpej  out:
   1289   1.12       chs 	splx(s);
   1290   1.21   thorpej 	return (rv);
   1291    1.1      fvdl }
   1292    1.1      fvdl 
   1293    1.1      fvdl void
   1294   1.91     perry _simple_unlock(volatile struct simplelock *alp, const char *id, int l)
   1295    1.1      fvdl {
   1296   1.12       chs 	int s;
   1297    1.1      fvdl 
   1298  1.114        ad 	s = splhigh();
   1299   1.21   thorpej 
   1300   1.21   thorpej 	/*
   1301   1.21   thorpej 	 * MULTIPROCESSOR case: This is `safe' because we think we hold
   1302   1.21   thorpej 	 * the lock, and if we don't, we don't take any action.
   1303   1.21   thorpej 	 */
   1304   1.27   thorpej 	if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
   1305   1.21   thorpej 		SLOCK_WHERE("simple_unlock: lock not held\n",
   1306   1.21   thorpej 		    alp, id, l);
   1307   1.21   thorpej 		goto out;
   1308   1.21   thorpej 	}
   1309   1.21   thorpej 
   1310   1.21   thorpej 	SLOCK_LIST_LOCK();
   1311   1.21   thorpej 	TAILQ_REMOVE(&simplelock_list, alp, list);
   1312   1.21   thorpej 	SLOCK_LIST_UNLOCK();
   1313   1.21   thorpej 
   1314   1.21   thorpej 	SLOCK_COUNT(-1);
   1315   1.21   thorpej 
   1316   1.21   thorpej 	alp->list.tqe_next = NULL;	/* sanity */
   1317   1.21   thorpej 	alp->list.tqe_prev = NULL;	/* sanity */
   1318   1.21   thorpej 
   1319    1.5       chs 	alp->unlock_file = id;
   1320    1.5       chs 	alp->unlock_line = l;
   1321   1.21   thorpej 
   1322   1.21   thorpej #if defined(MULTIPROCESSOR) /* { */
   1323   1.26  sommerfe 	alp->lock_holder = LK_NOCPU;
   1324   1.21   thorpej 	/* Now that we've modified all fields, release the lock. */
   1325   1.27   thorpej 	__cpu_simple_unlock(&alp->lock_data);
   1326   1.21   thorpej #else
   1327   1.27   thorpej 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
   1328   1.41   thorpej 	KASSERT(alp->lock_holder == cpu_number());
   1329   1.41   thorpej 	alp->lock_holder = LK_NOCPU;
   1330   1.21   thorpej #endif /* } */
   1331   1.21   thorpej 
   1332   1.21   thorpej  out:
   1333   1.18       chs 	splx(s);
   1334   1.12       chs }
   1335   1.12       chs 
   1336   1.12       chs void
   1337   1.33   thorpej simple_lock_dump(void)
   1338   1.12       chs {
   1339   1.91     perry 	volatile struct simplelock *alp;
   1340   1.12       chs 	int s;
   1341   1.12       chs 
   1342  1.114        ad 	s = splhigh();
   1343   1.21   thorpej 	SLOCK_LIST_LOCK();
   1344   1.25   thorpej 	lock_printf("all simple locks:\n");
   1345   1.58       chs 	TAILQ_FOREACH(alp, &simplelock_list, list) {
   1346   1.25   thorpej 		lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
   1347   1.21   thorpej 		    alp->lock_file, alp->lock_line);
   1348   1.12       chs 	}
   1349   1.21   thorpej 	SLOCK_LIST_UNLOCK();
   1350   1.12       chs 	splx(s);
   1351   1.12       chs }
   1352   1.12       chs 
   1353   1.12       chs void
   1354   1.33   thorpej simple_lock_freecheck(void *start, void *end)
   1355   1.12       chs {
   1356   1.91     perry 	volatile struct simplelock *alp;
   1357   1.12       chs 	int s;
   1358   1.12       chs 
   1359  1.114        ad 	s = splhigh();
   1360   1.21   thorpej 	SLOCK_LIST_LOCK();
   1361   1.58       chs 	TAILQ_FOREACH(alp, &simplelock_list, list) {
   1362   1.91     perry 		if ((volatile void *)alp >= start &&
   1363   1.91     perry 		    (volatile void *)alp < end) {
   1364   1.25   thorpej 			lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
   1365   1.34   thorpej 			    alp, alp->lock_holder, alp->lock_file,
   1366   1.34   thorpej 			    alp->lock_line);
   1367   1.34   thorpej 			SLOCK_DEBUGGER();
   1368   1.34   thorpej 		}
   1369   1.34   thorpej 	}
   1370   1.34   thorpej 	SLOCK_LIST_UNLOCK();
   1371   1.34   thorpej 	splx(s);
   1372   1.34   thorpej }
   1373   1.34   thorpej 
   1374   1.55   thorpej /*
   1375  1.113      yamt  * We must be holding exactly one lock: the spc_lock.
   1376   1.55   thorpej  */
   1377   1.55   thorpej 
   1378   1.34   thorpej void
   1379   1.34   thorpej simple_lock_switchcheck(void)
   1380   1.34   thorpej {
   1381   1.55   thorpej 
   1382  1.105        ad 	simple_lock_only_held(NULL, "switching");
   1383   1.55   thorpej }
   1384   1.55   thorpej 
   1385   1.93       erh /*
   1386   1.93       erh  * Drop into the debugger if lp isn't the only lock held.
   1387   1.93       erh  * lp may be NULL.
   1388   1.93       erh  */
   1389   1.55   thorpej void
   1390   1.55   thorpej simple_lock_only_held(volatile struct simplelock *lp, const char *where)
   1391   1.55   thorpej {
   1392   1.91     perry 	volatile struct simplelock *alp;
   1393   1.88     blymn 	cpuid_t cpu_num = cpu_number();
   1394   1.34   thorpej 	int s;
   1395   1.34   thorpej 
   1396   1.55   thorpej 	if (lp) {
   1397   1.55   thorpej 		LOCK_ASSERT(simple_lock_held(lp));
   1398   1.55   thorpej 	}
   1399  1.114        ad 	s = splhigh();
   1400   1.34   thorpej 	SLOCK_LIST_LOCK();
   1401   1.58       chs 	TAILQ_FOREACH(alp, &simplelock_list, list) {
   1402   1.55   thorpej 		if (alp == lp)
   1403   1.42   thorpej 			continue;
   1404   1.88     blymn 		if (alp->lock_holder == cpu_num)
   1405   1.55   thorpej 			break;
   1406   1.12       chs 	}
   1407   1.21   thorpej 	SLOCK_LIST_UNLOCK();
   1408   1.12       chs 	splx(s);
   1409   1.55   thorpej 
   1410   1.55   thorpej 	if (alp != NULL) {
   1411   1.58       chs 		lock_printf("\n%s with held simple_lock %p "
   1412   1.55   thorpej 		    "CPU %lu %s:%d\n",
   1413   1.55   thorpej 		    where, alp, alp->lock_holder, alp->lock_file,
   1414   1.55   thorpej 		    alp->lock_line);
   1415   1.58       chs 		SLOCK_TRACE();
   1416   1.55   thorpej 		SLOCK_DEBUGGER();
   1417   1.55   thorpej 	}
   1418    1.1      fvdl }
   1419   1.94       erh 
   1420   1.94       erh /*
   1421   1.94       erh  * Set to 1 by simple_lock_assert_*().
   1422   1.94       erh  * Can be cleared from ddb to avoid a panic.
   1423   1.94       erh  */
   1424   1.94       erh int slock_assert_will_panic;
   1425   1.94       erh 
   1426   1.94       erh /*
   1427   1.94       erh  * If the lock isn't held, print a traceback, optionally drop into the
   1428   1.94       erh  *  debugger, then panic.
   1429   1.94       erh  * The panic can be avoided by clearing slock_assert_with_panic from the
   1430   1.94       erh  *  debugger.
   1431   1.94       erh  */
   1432   1.94       erh void
   1433   1.94       erh _simple_lock_assert_locked(volatile struct simplelock *alp,
   1434   1.94       erh     const char *lockname, const char *id, int l)
   1435   1.94       erh {
   1436   1.94       erh 	if (simple_lock_held(alp) == 0) {
   1437   1.94       erh 		slock_assert_will_panic = 1;
   1438   1.94       erh 		lock_printf("%s lock not held\n", lockname);
   1439   1.94       erh 		SLOCK_WHERE("lock not held", alp, id, l);
   1440  1.117        ad 		if (slock_assert_will_panic && panicstr == NULL)
   1441   1.94       erh 			panic("%s: not locked", lockname);
   1442   1.94       erh 	}
   1443   1.94       erh }
   1444   1.94       erh 
   1445   1.94       erh void
   1446   1.94       erh _simple_lock_assert_unlocked(volatile struct simplelock *alp,
   1447   1.94       erh     const char *lockname, const char *id, int l)
   1448   1.94       erh {
   1449   1.94       erh 	if (simple_lock_held(alp)) {
   1450   1.94       erh 		slock_assert_will_panic = 1;
   1451   1.94       erh 		lock_printf("%s lock held\n", lockname);
   1452   1.94       erh 		SLOCK_WHERE("lock held", alp, id, l);
   1453  1.117        ad 		if (slock_assert_will_panic && panicstr == NULL)
   1454   1.94       erh 			panic("%s: locked", lockname);
   1455   1.94       erh 	}
   1456   1.94       erh }
   1457   1.94       erh 
   1458   1.96      yamt void
   1459   1.96      yamt assert_sleepable(struct simplelock *interlock, const char *msg)
   1460   1.96      yamt {
   1461   1.96      yamt 
   1462  1.117        ad 	if (panicstr != NULL)
   1463  1.117        ad 		return;
   1464  1.113      yamt 	if (CURCPU_IDLE_P()) {
   1465  1.113      yamt 		panic("assert_sleepable: idle");
   1466   1.97      yamt 	}
   1467   1.96      yamt 	simple_lock_only_held(interlock, msg);
   1468   1.96      yamt }
   1469   1.96      yamt 
   1470   1.21   thorpej #endif /* LOCKDEBUG */ /* } */
   1471   1.62   thorpej 
   1472  1.116        ad int kernel_lock_id;
   1473  1.116        ad __cpu_simple_lock_t kernel_lock;
   1474  1.116        ad 
   1475   1.62   thorpej #if defined(MULTIPROCESSOR)
   1476  1.105        ad 
   1477   1.62   thorpej /*
   1478   1.62   thorpej  * Functions for manipulating the kernel_lock.  We put them here
   1479   1.62   thorpej  * so that they show up in profiles.
   1480   1.62   thorpej  */
   1481   1.62   thorpej 
   1482  1.105        ad #define	_KERNEL_LOCK_ABORT(msg)						\
   1483  1.105        ad     LOCKDEBUG_ABORT(kernel_lock_id, &kernel_lock, &_kernel_lock_ops,	\
   1484  1.105        ad         __FUNCTION__, msg)
   1485  1.105        ad 
   1486  1.105        ad #ifdef LOCKDEBUG
   1487  1.105        ad #define	_KERNEL_LOCK_ASSERT(cond)					\
   1488  1.105        ad do {									\
   1489  1.105        ad 	if (!(cond))							\
   1490  1.105        ad 		_KERNEL_LOCK_ABORT("assertion failed: " #cond);		\
   1491  1.105        ad } while (/* CONSTCOND */ 0)
   1492  1.105        ad #else
   1493  1.105        ad #define	_KERNEL_LOCK_ASSERT(cond)	/* nothing */
   1494  1.105        ad #endif
   1495  1.105        ad 
   1496  1.105        ad void	_kernel_lock_dump(volatile void *);
   1497  1.105        ad 
   1498  1.105        ad lockops_t _kernel_lock_ops = {
   1499  1.105        ad 	"Kernel lock",
   1500  1.105        ad 	0,
   1501  1.105        ad 	_kernel_lock_dump
   1502  1.105        ad };
   1503  1.105        ad 
   1504   1.85      yamt /*
   1505  1.105        ad  * Initialize the kernel lock.
   1506   1.85      yamt  */
   1507   1.62   thorpej void
   1508   1.62   thorpej _kernel_lock_init(void)
   1509   1.62   thorpej {
   1510   1.62   thorpej 
   1511  1.105        ad 	__cpu_simple_lock_init(&kernel_lock);
   1512  1.105        ad 	kernel_lock_id = LOCKDEBUG_ALLOC(&kernel_lock, &_kernel_lock_ops);
   1513   1.62   thorpej }
   1514   1.62   thorpej 
   1515   1.62   thorpej /*
   1516  1.105        ad  * Print debugging information about the kernel lock.
   1517   1.62   thorpej  */
   1518   1.62   thorpej void
   1519  1.105        ad _kernel_lock_dump(volatile void *junk)
   1520   1.62   thorpej {
   1521   1.85      yamt 	struct cpu_info *ci = curcpu();
   1522   1.62   thorpej 
   1523  1.105        ad 	(void)junk;
   1524   1.85      yamt 
   1525  1.105        ad 	printf_nolog("curcpu holds : %18d wanted by: %#018lx\n",
   1526  1.105        ad 	    ci->ci_biglock_count, (long)ci->ci_biglock_wanted);
   1527   1.62   thorpej }
   1528   1.62   thorpej 
   1529  1.105        ad /*
   1530  1.105        ad  * Acquire 'nlocks' holds on the kernel lock.  If 'l' is non-null, the
   1531  1.105        ad  * acquisition is from process context.
   1532  1.105        ad  */
   1533   1.62   thorpej void
   1534  1.105        ad _kernel_lock(int nlocks, struct lwp *l)
   1535   1.62   thorpej {
   1536   1.85      yamt 	struct cpu_info *ci = curcpu();
   1537  1.105        ad 	LOCKSTAT_TIMER(spintime);
   1538  1.105        ad 	LOCKSTAT_FLAG(lsflag);
   1539  1.105        ad 	struct lwp *owant;
   1540  1.105        ad #ifdef LOCKDEBUG
   1541  1.105        ad 	u_int spins;
   1542  1.105        ad #endif
   1543   1.85      yamt 	int s;
   1544   1.85      yamt 
   1545  1.105        ad 	(void)l;
   1546  1.105        ad 
   1547  1.105        ad 	if (nlocks == 0)
   1548  1.105        ad 		return;
   1549  1.105        ad 	_KERNEL_LOCK_ASSERT(nlocks > 0);
   1550   1.62   thorpej 
   1551  1.115        ad 	s = splsched();	/* XXX splvm() */
   1552  1.105        ad 
   1553  1.105        ad 	if (ci->ci_biglock_count != 0) {
   1554  1.105        ad 		_KERNEL_LOCK_ASSERT(kernel_lock == __SIMPLELOCK_LOCKED);
   1555  1.105        ad 		ci->ci_biglock_count += nlocks;
   1556  1.105        ad 		splx(s);
   1557  1.105        ad 		return;
   1558  1.105        ad 	}
   1559  1.105        ad 
   1560  1.107        ad 	LOCKDEBUG_WANTLOCK(kernel_lock_id,
   1561  1.107        ad 	    (uintptr_t)__builtin_return_address(0), 0);
   1562  1.107        ad 
   1563  1.105        ad 	if (__cpu_simple_lock_try(&kernel_lock)) {
   1564  1.105        ad 		ci->ci_biglock_count = nlocks;
   1565  1.105        ad 		LOCKDEBUG_LOCKED(kernel_lock_id,
   1566  1.105        ad 		    (uintptr_t)__builtin_return_address(0), 0);
   1567  1.105        ad 		splx(s);
   1568  1.105        ad 		return;
   1569  1.105        ad 	}
   1570  1.105        ad 
   1571  1.105        ad 	LOCKSTAT_ENTER(lsflag);
   1572  1.105        ad 	LOCKSTAT_START_TIMER(lsflag, spintime);
   1573  1.105        ad 
   1574  1.105        ad 	/*
   1575  1.105        ad 	 * Before setting ci_biglock_wanted we must post a store
   1576  1.105        ad 	 * fence (see kern_mutex.c).  This is accomplished by the
   1577  1.105        ad 	 * __cpu_simple_lock_try() above.
   1578  1.105        ad 	 */
   1579  1.105        ad 	owant = ci->ci_biglock_wanted;
   1580  1.105        ad 	ci->ci_biglock_wanted = curlwp;	/* XXXAD */
   1581  1.105        ad 
   1582  1.105        ad #ifdef LOCKDEBUG
   1583  1.105        ad 	spins = 0;
   1584  1.105        ad #endif
   1585  1.105        ad 
   1586  1.105        ad 	do {
   1587  1.105        ad 		while (kernel_lock == __SIMPLELOCK_LOCKED) {
   1588  1.105        ad #ifdef LOCKDEBUG
   1589  1.105        ad 			if (SPINLOCK_SPINOUT(spins))
   1590  1.105        ad 				_KERNEL_LOCK_ABORT("spinout");
   1591  1.105        ad #endif
   1592  1.105        ad 			splx(s);
   1593  1.105        ad 			SPINLOCK_SPIN_HOOK;
   1594  1.115        ad 			(void)splsched();	/* XXX splvm() */
   1595  1.105        ad 		}
   1596  1.105        ad 	} while (!__cpu_simple_lock_try(&kernel_lock));
   1597  1.105        ad 
   1598  1.105        ad 	ci->ci_biglock_wanted = owant;
   1599  1.105        ad 	ci->ci_biglock_count += nlocks;
   1600  1.107        ad 	LOCKSTAT_STOP_TIMER(lsflag, spintime);
   1601  1.107        ad 	LOCKDEBUG_LOCKED(kernel_lock_id,
   1602  1.107        ad 	    (uintptr_t)__builtin_return_address(0), 0);
   1603   1.85      yamt 	splx(s);
   1604  1.105        ad 
   1605  1.105        ad 	/*
   1606  1.105        ad 	 * Again, another store fence is required (see kern_mutex.c).
   1607  1.105        ad 	 */
   1608  1.105        ad 	mb_write();
   1609  1.107        ad 	if (owant == NULL) {
   1610  1.107        ad 		LOCKSTAT_EVENT(lsflag, &kernel_lock, LB_KERNEL_LOCK | LB_SPIN,
   1611  1.107        ad 		    1, spintime);
   1612  1.107        ad 	}
   1613  1.105        ad 	LOCKSTAT_EXIT(lsflag);
   1614   1.62   thorpej }
   1615   1.62   thorpej 
   1616   1.62   thorpej /*
   1617  1.105        ad  * Release 'nlocks' holds on the kernel lock.  If 'nlocks' is zero, release
   1618  1.105        ad  * all holds.  If 'l' is non-null, the release is from process context.
   1619   1.62   thorpej  */
   1620   1.62   thorpej void
   1621  1.105        ad _kernel_unlock(int nlocks, struct lwp *l, int *countp)
   1622   1.62   thorpej {
   1623  1.105        ad 	struct cpu_info *ci = curcpu();
   1624  1.105        ad 	u_int olocks;
   1625  1.105        ad 	int s;
   1626   1.62   thorpej 
   1627  1.105        ad 	(void)l;
   1628   1.62   thorpej 
   1629  1.105        ad 	_KERNEL_LOCK_ASSERT(nlocks < 2);
   1630   1.62   thorpej 
   1631  1.105        ad 	olocks = ci->ci_biglock_count;
   1632   1.77      yamt 
   1633  1.105        ad 	if (olocks == 0) {
   1634  1.105        ad 		_KERNEL_LOCK_ASSERT(nlocks <= 0);
   1635  1.105        ad 		if (countp != NULL)
   1636  1.105        ad 			*countp = 0;
   1637  1.105        ad 		return;
   1638  1.105        ad 	}
   1639   1.77      yamt 
   1640  1.105        ad 	_KERNEL_LOCK_ASSERT(kernel_lock == __SIMPLELOCK_LOCKED);
   1641   1.85      yamt 
   1642  1.105        ad 	if (nlocks == 0)
   1643  1.105        ad 		nlocks = olocks;
   1644  1.105        ad 	else if (nlocks == -1) {
   1645  1.105        ad 		nlocks = 1;
   1646  1.105        ad 		_KERNEL_LOCK_ASSERT(olocks == 1);
   1647  1.105        ad 	}
   1648   1.85      yamt 
   1649  1.115        ad 	s = splsched();	/* XXX splvm() */
   1650  1.105        ad 	if ((ci->ci_biglock_count -= nlocks) == 0) {
   1651  1.105        ad 		LOCKDEBUG_UNLOCKED(kernel_lock_id,
   1652  1.105        ad 		    (uintptr_t)__builtin_return_address(0), 0);
   1653  1.105        ad 		__cpu_simple_unlock(&kernel_lock);
   1654   1.85      yamt 	}
   1655  1.105        ad 	splx(s);
   1656   1.77      yamt 
   1657  1.105        ad 	if (countp != NULL)
   1658  1.105        ad 		*countp = olocks;
   1659   1.77      yamt }
   1660   1.77      yamt 
   1661   1.84      yamt #if defined(DEBUG)
   1662  1.105        ad /*
   1663  1.105        ad  * Assert that the kernel lock is held.
   1664  1.105        ad  */
   1665   1.84      yamt void
   1666  1.105        ad _kernel_lock_assert_locked(void)
   1667   1.84      yamt {
   1668  1.100      yamt 
   1669  1.105        ad 	if (kernel_lock != __SIMPLELOCK_LOCKED ||
   1670  1.105        ad 	    curcpu()->ci_biglock_count == 0)
   1671  1.105        ad 		_KERNEL_LOCK_ABORT("not locked");
   1672   1.84      yamt }
   1673  1.100      yamt 
   1674  1.100      yamt void
   1675  1.100      yamt _kernel_lock_assert_unlocked()
   1676  1.100      yamt {
   1677  1.100      yamt 
   1678  1.105        ad 	if (curcpu()->ci_biglock_count != 0)
   1679  1.105        ad 		_KERNEL_LOCK_ABORT("locked");
   1680  1.100      yamt }
   1681   1.84      yamt #endif
   1682   1.94       erh 
   1683  1.105        ad #endif	/* MULTIPROCESSOR || LOCKDEBUG */
   1684