Home | History | Annotate | Line # | Download | only in kern
kern_lock.c revision 1.166
      1 /*	$NetBSD: kern_lock.c,v 1.166 2020/01/22 13:19:33 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, and by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.166 2020/01/22 13:19:33 ad Exp $");
     35 
     36 #include <sys/param.h>
     37 #include <sys/proc.h>
     38 #include <sys/lock.h>
     39 #include <sys/systm.h>
     40 #include <sys/kernel.h>
     41 #include <sys/lockdebug.h>
     42 #include <sys/cpu.h>
     43 #include <sys/syslog.h>
     44 #include <sys/atomic.h>
     45 #include <sys/lwp.h>
     46 #include <sys/pserialize.h>
     47 
     48 #include <machine/lock.h>
     49 
     50 #include <dev/lockstat.h>
     51 
     52 #define	RETURN_ADDRESS	(uintptr_t)__builtin_return_address(0)
     53 
     54 bool	kernel_lock_dodebug;
     55 
     56 __cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)]
     57     __cacheline_aligned;
     58 
     59 void
     60 assert_sleepable(void)
     61 {
     62 	const char *reason;
     63 	uint64_t pctr;
     64 	bool idle;
     65 
     66 	if (panicstr != NULL) {
     67 		return;
     68 	}
     69 
     70 	LOCKDEBUG_BARRIER(kernel_lock, 1);
     71 
     72 	/*
     73 	 * Avoid disabling/re-enabling preemption here since this
     74 	 * routine may be called in delicate situations.
     75 	 */
     76 	do {
     77 		pctr = lwp_pctr();
     78 		__insn_barrier();
     79 		idle = CURCPU_IDLE_P();
     80 		__insn_barrier();
     81 	} while (pctr != lwp_pctr());
     82 
     83 	reason = NULL;
     84 	if (idle && !cold &&
     85 	    kcpuset_isset(kcpuset_running, cpu_index(curcpu()))) {
     86 		reason = "idle";
     87 	}
     88 	if (cpu_intr_p()) {
     89 		reason = "interrupt";
     90 	}
     91 	if (cpu_softintr_p()) {
     92 		reason = "softint";
     93 	}
     94 	if (!pserialize_not_in_read_section()) {
     95 		reason = "pserialize";
     96 	}
     97 
     98 	if (reason) {
     99 		panic("%s: %s caller=%p", __func__, reason,
    100 		    (void *)RETURN_ADDRESS);
    101 	}
    102 }
    103 
    104 /*
    105  * Functions for manipulating the kernel_lock.  We put them here
    106  * so that they show up in profiles.
    107  */
    108 
    109 #define	_KERNEL_LOCK_ABORT(msg)						\
    110     LOCKDEBUG_ABORT(__func__, __LINE__, kernel_lock, &_kernel_lock_ops, msg)
    111 
    112 #ifdef LOCKDEBUG
    113 #define	_KERNEL_LOCK_ASSERT(cond)					\
    114 do {									\
    115 	if (!(cond))							\
    116 		_KERNEL_LOCK_ABORT("assertion failed: " #cond);		\
    117 } while (/* CONSTCOND */ 0)
    118 #else
    119 #define	_KERNEL_LOCK_ASSERT(cond)	/* nothing */
    120 #endif
    121 
    122 static void	_kernel_lock_dump(const volatile void *, lockop_printer_t);
    123 
    124 lockops_t _kernel_lock_ops = {
    125 	.lo_name = "Kernel lock",
    126 	.lo_type = LOCKOPS_SPIN,
    127 	.lo_dump = _kernel_lock_dump,
    128 };
    129 
    130 /*
    131  * Initialize the kernel lock.
    132  */
    133 void
    134 kernel_lock_init(void)
    135 {
    136 
    137 	__cpu_simple_lock_init(kernel_lock);
    138 	kernel_lock_dodebug = LOCKDEBUG_ALLOC(kernel_lock, &_kernel_lock_ops,
    139 	    RETURN_ADDRESS);
    140 }
    141 CTASSERT(CACHE_LINE_SIZE >= sizeof(__cpu_simple_lock_t));
    142 
    143 /*
    144  * Print debugging information about the kernel lock.
    145  */
    146 static void
    147 _kernel_lock_dump(const volatile void *junk, lockop_printer_t pr)
    148 {
    149 	struct cpu_info *ci = curcpu();
    150 
    151 	(void)junk;
    152 
    153 	pr("curcpu holds : %18d wanted by: %#018lx\n",
    154 	    ci->ci_biglock_count, (long)ci->ci_biglock_wanted);
    155 }
    156 
    157 /*
    158  * Acquire 'nlocks' holds on the kernel lock.
    159  */
    160 void
    161 _kernel_lock(int nlocks)
    162 {
    163 	struct cpu_info *ci;
    164 	LOCKSTAT_TIMER(spintime);
    165 	LOCKSTAT_FLAG(lsflag);
    166 	struct lwp *owant;
    167 	u_int count;
    168 #ifdef LOCKDEBUG
    169 	u_int spins = 0;
    170 #endif
    171 	int s;
    172 	struct lwp *l = curlwp;
    173 
    174 	_KERNEL_LOCK_ASSERT(nlocks > 0);
    175 
    176 	s = splvm();
    177 	ci = curcpu();
    178 	if (ci->ci_biglock_count != 0) {
    179 		_KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
    180 		ci->ci_biglock_count += nlocks;
    181 		l->l_blcnt += nlocks;
    182 		splx(s);
    183 		return;
    184 	}
    185 
    186 	_KERNEL_LOCK_ASSERT(l->l_blcnt == 0);
    187 	LOCKDEBUG_WANTLOCK(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS,
    188 	    0);
    189 
    190 	if (__predict_true(__cpu_simple_lock_try(kernel_lock))) {
    191 		ci->ci_biglock_count = nlocks;
    192 		l->l_blcnt = nlocks;
    193 		LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
    194 		    RETURN_ADDRESS, 0);
    195 		splx(s);
    196 		return;
    197 	}
    198 
    199 	/*
    200 	 * To remove the ordering constraint between adaptive mutexes
    201 	 * and kernel_lock we must make it appear as if this thread is
    202 	 * blocking.  For non-interlocked mutex release, a store fence
    203 	 * is required to ensure that the result of any mutex_exit()
    204 	 * by the current LWP becomes visible on the bus before the set
    205 	 * of ci->ci_biglock_wanted becomes visible.
    206 	 *
    207 	 * However, we won't set ci_biglock_wanted until we've spun for
    208 	 * a bit, as we don't want to make any lock waiters in rw_oncpu()
    209 	 * or mutex_oncpu() block prematurely.
    210 	 */
    211 	membar_producer();
    212 	owant = ci->ci_biglock_wanted;
    213 
    214 	/*
    215 	 * Stay pinned to the CPU and spin until we acquire the lock.  Once
    216 	 * we have it, record the time spent with lockstat.
    217 	 */
    218 	l->l_nopreempt++;
    219 	LOCKSTAT_ENTER(lsflag);
    220 	LOCKSTAT_START_TIMER(lsflag, spintime);
    221 
    222 	count = SPINLOCK_BACKOFF_MIN;
    223 	do {
    224 		splx(s);
    225 		while (__SIMPLELOCK_LOCKED_P(kernel_lock)) {
    226 #ifdef LOCKDEBUG
    227 			if (SPINLOCK_SPINOUT(spins)) {
    228 				extern int start_init_exec;
    229 				if (!start_init_exec)
    230 					_KERNEL_LOCK_ABORT("spinout");
    231 			}
    232 #endif
    233 			SPINLOCK_BACKOFF(count);
    234 			if (count == SPINLOCK_BACKOFF_MAX) {
    235 				/* Ok, waiting for real. */
    236 				ci->ci_biglock_wanted = l;
    237 			}
    238 		}
    239 		s = splvm();
    240 	} while (!__cpu_simple_lock_try(kernel_lock));
    241 
    242 	/*
    243 	 * Got it; not re-enable preemption, although we now can't do a
    244 	 * preemption as kernel_lock is held!
    245 	 */
    246 	l->l_nopreempt--;
    247 	ci->ci_biglock_count = nlocks;
    248 	l->l_blcnt = nlocks;
    249 	splx(s);
    250 
    251 	LOCKSTAT_STOP_TIMER(lsflag, spintime);
    252 	LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
    253 	    RETURN_ADDRESS, 0);
    254 	if (owant == NULL) {
    255 		LOCKSTAT_EVENT_RA(lsflag, kernel_lock,
    256 		    LB_KERNEL_LOCK | LB_SPIN, 1, spintime, RETURN_ADDRESS);
    257 	}
    258 	LOCKSTAT_EXIT(lsflag);
    259 
    260 	/*
    261 	 * Now that we have kernel_lock, reset ci_biglock_wanted.  This
    262 	 * store must be unbuffered (immediately visible on the bus) in
    263 	 * order for non-interlocked mutex release to work correctly.
    264 	 * It must be visible before a mutex_exit() can execute on this
    265 	 * processor.
    266 	 *
    267 	 * Note: only where CAS is available in hardware will this be
    268 	 * an unbuffered write, but non-interlocked release cannot be
    269 	 * done on CPUs without CAS in hardware.
    270 	 */
    271 	(void)atomic_swap_ptr(&ci->ci_biglock_wanted, owant);
    272 
    273 	/*
    274 	 * Issue a memory barrier as we have acquired a lock.  This also
    275 	 * prevents stores from a following mutex_exit() being reordered
    276 	 * to occur before our store to ci_biglock_wanted above.
    277 	 */
    278 #ifndef __HAVE_ATOMIC_AS_MEMBAR
    279 	membar_enter();
    280 #endif
    281 }
    282 
    283 /*
    284  * Release 'nlocks' holds on the kernel lock.  If 'nlocks' is zero, release
    285  * all holds.
    286  */
    287 void
    288 _kernel_unlock(int nlocks, int *countp)
    289 {
    290 	struct cpu_info *ci;
    291 	u_int olocks;
    292 	int s;
    293 	struct lwp *l = curlwp;
    294 
    295 	_KERNEL_LOCK_ASSERT(nlocks < 2);
    296 
    297 	olocks = l->l_blcnt;
    298 
    299 	if (olocks == 0) {
    300 		_KERNEL_LOCK_ASSERT(nlocks <= 0);
    301 		if (countp != NULL)
    302 			*countp = 0;
    303 		return;
    304 	}
    305 
    306 	_KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
    307 
    308 	if (nlocks == 0)
    309 		nlocks = olocks;
    310 	else if (nlocks == -1) {
    311 		nlocks = 1;
    312 		_KERNEL_LOCK_ASSERT(olocks == 1);
    313 	}
    314 	s = splvm();
    315 	ci = curcpu();
    316 	_KERNEL_LOCK_ASSERT(ci->ci_biglock_count >= l->l_blcnt);
    317 	if (ci->ci_biglock_count == nlocks) {
    318 		LOCKDEBUG_UNLOCKED(kernel_lock_dodebug, kernel_lock,
    319 		    RETURN_ADDRESS, 0);
    320 		ci->ci_biglock_count = 0;
    321 		__cpu_simple_unlock(kernel_lock);
    322 		l->l_blcnt -= nlocks;
    323 		splx(s);
    324 		if (l->l_dopreempt)
    325 			kpreempt(0);
    326 	} else {
    327 		ci->ci_biglock_count -= nlocks;
    328 		l->l_blcnt -= nlocks;
    329 		splx(s);
    330 	}
    331 
    332 	if (countp != NULL)
    333 		*countp = olocks;
    334 }
    335 
    336 bool
    337 _kernel_locked_p(void)
    338 {
    339 	return __SIMPLELOCK_LOCKED_P(kernel_lock);
    340 }
    341