kern_lock.c revision 1.172 1 /* $NetBSD: kern_lock.c,v 1.172 2020/12/22 01:57:29 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.172 2020/12/22 01:57:29 ad Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_lockdebug.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/proc.h>
42 #include <sys/lock.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/lockdebug.h>
46 #include <sys/cpu.h>
47 #include <sys/syslog.h>
48 #include <sys/atomic.h>
49 #include <sys/lwp.h>
50 #include <sys/pserialize.h>
51
52 #if defined(DIAGNOSTIC) && !defined(LOCKDEBUG)
53 #include <sys/ksyms.h>
54 #endif
55
56 #include <machine/lock.h>
57
58 #include <dev/lockstat.h>
59
60 #define RETURN_ADDRESS (uintptr_t)__builtin_return_address(0)
61
62 bool kernel_lock_dodebug;
63
64 __cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)]
65 __cacheline_aligned;
66
67 void
68 assert_sleepable(void)
69 {
70 const char *reason;
71 uint64_t pctr;
72 bool idle;
73
74 if (panicstr != NULL) {
75 return;
76 }
77
78 LOCKDEBUG_BARRIER(kernel_lock, 1);
79
80 /*
81 * Avoid disabling/re-enabling preemption here since this
82 * routine may be called in delicate situations.
83 */
84 do {
85 pctr = lwp_pctr();
86 __insn_barrier();
87 idle = CURCPU_IDLE_P();
88 __insn_barrier();
89 } while (pctr != lwp_pctr());
90
91 reason = NULL;
92 if (idle && !cold &&
93 kcpuset_isset(kcpuset_running, cpu_index(curcpu()))) {
94 reason = "idle";
95 }
96 if (cpu_intr_p()) {
97 reason = "interrupt";
98 }
99 if (cpu_softintr_p()) {
100 reason = "softint";
101 }
102 if (!pserialize_not_in_read_section()) {
103 reason = "pserialize";
104 }
105
106 if (reason) {
107 panic("%s: %s caller=%p", __func__, reason,
108 (void *)RETURN_ADDRESS);
109 }
110 }
111
112 /*
113 * Functions for manipulating the kernel_lock. We put them here
114 * so that they show up in profiles.
115 */
116
117 #define _KERNEL_LOCK_ABORT(msg) \
118 LOCKDEBUG_ABORT(__func__, __LINE__, kernel_lock, &_kernel_lock_ops, msg)
119
120 #ifdef LOCKDEBUG
121 #define _KERNEL_LOCK_ASSERT(cond) \
122 do { \
123 if (!(cond)) \
124 _KERNEL_LOCK_ABORT("assertion failed: " #cond); \
125 } while (/* CONSTCOND */ 0)
126 #else
127 #define _KERNEL_LOCK_ASSERT(cond) /* nothing */
128 #endif
129
130 static void _kernel_lock_dump(const volatile void *, lockop_printer_t);
131
132 lockops_t _kernel_lock_ops = {
133 .lo_name = "Kernel lock",
134 .lo_type = LOCKOPS_SPIN,
135 .lo_dump = _kernel_lock_dump,
136 };
137
138 /*
139 * Initialize the kernel lock.
140 */
141 void
142 kernel_lock_init(void)
143 {
144
145 __cpu_simple_lock_init(kernel_lock);
146 kernel_lock_dodebug = LOCKDEBUG_ALLOC(kernel_lock, &_kernel_lock_ops,
147 RETURN_ADDRESS);
148 }
149 CTASSERT(CACHE_LINE_SIZE >= sizeof(__cpu_simple_lock_t));
150
151 /*
152 * Print debugging information about the kernel lock.
153 */
154 static void
155 _kernel_lock_dump(const volatile void *junk, lockop_printer_t pr)
156 {
157 struct cpu_info *ci = curcpu();
158
159 (void)junk;
160
161 pr("curcpu holds : %18d wanted by: %#018lx\n",
162 ci->ci_biglock_count, (long)ci->ci_biglock_wanted);
163 }
164
165 /*
166 * Acquire 'nlocks' holds on the kernel lock.
167 *
168 * Although it may not look it, this is one of the most central, intricate
169 * routines in the kernel, and tons of code elsewhere depends on its exact
170 * behaviour. If you change something in here, expect it to bite you in the
171 * rear.
172 */
173 void
174 _kernel_lock(int nlocks)
175 {
176 struct cpu_info *ci;
177 LOCKSTAT_TIMER(spintime);
178 LOCKSTAT_FLAG(lsflag);
179 struct lwp *owant;
180 #ifdef LOCKDEBUG
181 u_int spins = 0;
182 #endif
183 int s;
184 struct lwp *l = curlwp;
185
186 _KERNEL_LOCK_ASSERT(nlocks > 0);
187
188 s = splvm();
189 ci = curcpu();
190 if (ci->ci_biglock_count != 0) {
191 _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
192 ci->ci_biglock_count += nlocks;
193 l->l_blcnt += nlocks;
194 splx(s);
195 return;
196 }
197
198 _KERNEL_LOCK_ASSERT(l->l_blcnt == 0);
199 LOCKDEBUG_WANTLOCK(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS,
200 0);
201
202 if (__predict_true(__cpu_simple_lock_try(kernel_lock))) {
203 ci->ci_biglock_count = nlocks;
204 l->l_blcnt = nlocks;
205 LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
206 RETURN_ADDRESS, 0);
207 splx(s);
208 return;
209 }
210
211 /*
212 * To remove the ordering constraint between adaptive mutexes
213 * and kernel_lock we must make it appear as if this thread is
214 * blocking. For non-interlocked mutex release, a store fence
215 * is required to ensure that the result of any mutex_exit()
216 * by the current LWP becomes visible on the bus before the set
217 * of ci->ci_biglock_wanted becomes visible.
218 */
219 membar_producer();
220 owant = ci->ci_biglock_wanted;
221 ci->ci_biglock_wanted = l;
222 #if defined(DIAGNOSTIC) && !defined(LOCKDEBUG)
223 l->l_ld_wanted = __builtin_return_address(0);
224 #endif
225
226 /*
227 * Spin until we acquire the lock. Once we have it, record the
228 * time spent with lockstat.
229 */
230 LOCKSTAT_ENTER(lsflag);
231 LOCKSTAT_START_TIMER(lsflag, spintime);
232
233 do {
234 splx(s);
235 while (__SIMPLELOCK_LOCKED_P(kernel_lock)) {
236 #ifdef LOCKDEBUG
237 if (SPINLOCK_SPINOUT(spins)) {
238 extern int start_init_exec;
239 if (start_init_exec)
240 _KERNEL_LOCK_ABORT("spinout");
241 }
242 SPINLOCK_BACKOFF_HOOK;
243 SPINLOCK_SPIN_HOOK;
244 #endif
245 }
246 s = splvm();
247 } while (!__cpu_simple_lock_try(kernel_lock));
248
249 ci->ci_biglock_count = nlocks;
250 l->l_blcnt = nlocks;
251 LOCKSTAT_STOP_TIMER(lsflag, spintime);
252 LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
253 RETURN_ADDRESS, 0);
254 if (owant == NULL) {
255 LOCKSTAT_EVENT_RA(lsflag, kernel_lock,
256 LB_KERNEL_LOCK | LB_SPIN, 1, spintime, RETURN_ADDRESS);
257 }
258 LOCKSTAT_EXIT(lsflag);
259 splx(s);
260
261 /*
262 * Now that we have kernel_lock, reset ci_biglock_wanted. This
263 * store must be unbuffered (immediately visible on the bus) in
264 * order for non-interlocked mutex release to work correctly.
265 * It must be visible before a mutex_exit() can execute on this
266 * processor.
267 *
268 * Note: only where CAS is available in hardware will this be
269 * an unbuffered write, but non-interlocked release cannot be
270 * done on CPUs without CAS in hardware.
271 */
272 (void)atomic_swap_ptr(&ci->ci_biglock_wanted, owant);
273
274 /*
275 * Issue a memory barrier as we have acquired a lock. This also
276 * prevents stores from a following mutex_exit() being reordered
277 * to occur before our store to ci_biglock_wanted above.
278 */
279 #ifndef __HAVE_ATOMIC_AS_MEMBAR
280 membar_enter();
281 #endif
282 }
283
284 /*
285 * Release 'nlocks' holds on the kernel lock. If 'nlocks' is zero, release
286 * all holds.
287 */
288 void
289 _kernel_unlock(int nlocks, int *countp)
290 {
291 struct cpu_info *ci;
292 u_int olocks;
293 int s;
294 struct lwp *l = curlwp;
295
296 _KERNEL_LOCK_ASSERT(nlocks < 2);
297
298 olocks = l->l_blcnt;
299
300 if (olocks == 0) {
301 _KERNEL_LOCK_ASSERT(nlocks <= 0);
302 if (countp != NULL)
303 *countp = 0;
304 return;
305 }
306
307 _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
308
309 if (nlocks == 0)
310 nlocks = olocks;
311 else if (nlocks == -1) {
312 nlocks = 1;
313 _KERNEL_LOCK_ASSERT(olocks == 1);
314 }
315 s = splvm();
316 ci = curcpu();
317 _KERNEL_LOCK_ASSERT(ci->ci_biglock_count >= l->l_blcnt);
318 if (ci->ci_biglock_count == nlocks) {
319 LOCKDEBUG_UNLOCKED(kernel_lock_dodebug, kernel_lock,
320 RETURN_ADDRESS, 0);
321 ci->ci_biglock_count = 0;
322 __cpu_simple_unlock(kernel_lock);
323 l->l_blcnt -= nlocks;
324 splx(s);
325 if (l->l_dopreempt)
326 kpreempt(0);
327 } else {
328 ci->ci_biglock_count -= nlocks;
329 l->l_blcnt -= nlocks;
330 splx(s);
331 }
332
333 if (countp != NULL)
334 *countp = olocks;
335 }
336
337 bool
338 _kernel_locked_p(void)
339 {
340 return __SIMPLELOCK_LOCKED_P(kernel_lock);
341 }
342