kern_lock.c revision 1.178 1 /* $NetBSD: kern_lock.c,v 1.178 2022/08/20 23:37:12 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.178 2022/08/20 23:37:12 riastradh Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_lockdebug.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/proc.h>
42 #include <sys/lock.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/lockdebug.h>
46 #include <sys/cpu.h>
47 #include <sys/syslog.h>
48 #include <sys/atomic.h>
49 #include <sys/lwp.h>
50 #include <sys/pserialize.h>
51
52 #if defined(DIAGNOSTIC) && !defined(LOCKDEBUG)
53 #include <sys/ksyms.h>
54 #endif
55
56 #include <machine/lock.h>
57
58 #include <dev/lockstat.h>
59
60 #define RETURN_ADDRESS (uintptr_t)__builtin_return_address(0)
61
62 bool kernel_lock_dodebug;
63
64 __cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)]
65 __cacheline_aligned;
66
67 void
68 assert_sleepable(void)
69 {
70 const char *reason;
71 uint64_t pctr;
72 bool idle;
73
74 if (panicstr != NULL) {
75 return;
76 }
77
78 LOCKDEBUG_BARRIER(kernel_lock, 1);
79
80 /*
81 * Avoid disabling/re-enabling preemption here since this
82 * routine may be called in delicate situations.
83 */
84 do {
85 pctr = lwp_pctr();
86 __insn_barrier();
87 idle = CURCPU_IDLE_P();
88 __insn_barrier();
89 } while (pctr != lwp_pctr());
90
91 reason = NULL;
92 if (idle && !cold) {
93 reason = "idle";
94 }
95 if (cpu_intr_p()) {
96 reason = "interrupt";
97 }
98 if (cpu_softintr_p()) {
99 reason = "softint";
100 }
101 if (!pserialize_not_in_read_section()) {
102 reason = "pserialize";
103 }
104
105 if (reason) {
106 panic("%s: %s caller=%p", __func__, reason,
107 (void *)RETURN_ADDRESS);
108 }
109 }
110
111 /*
112 * Functions for manipulating the kernel_lock. We put them here
113 * so that they show up in profiles.
114 */
115
116 #define _KERNEL_LOCK_ABORT(msg) \
117 LOCKDEBUG_ABORT(__func__, __LINE__, kernel_lock, &_kernel_lock_ops, msg)
118
119 #ifdef LOCKDEBUG
120 #define _KERNEL_LOCK_ASSERT(cond) \
121 do { \
122 if (!(cond)) \
123 _KERNEL_LOCK_ABORT("assertion failed: " #cond); \
124 } while (/* CONSTCOND */ 0)
125 #else
126 #define _KERNEL_LOCK_ASSERT(cond) /* nothing */
127 #endif
128
129 static void _kernel_lock_dump(const volatile void *, lockop_printer_t);
130
131 lockops_t _kernel_lock_ops = {
132 .lo_name = "Kernel lock",
133 .lo_type = LOCKOPS_SPIN,
134 .lo_dump = _kernel_lock_dump,
135 };
136
137 #ifdef LOCKDEBUG
138
139 #include <ddb/ddb.h>
140
141 static void
142 kernel_lock_trace_ipi(void *cookie)
143 {
144
145 printf("%s[%d %s]: hogging kernel lock\n", cpu_name(curcpu()),
146 curlwp->l_lid,
147 curlwp->l_name ? curlwp->l_name : curproc->p_comm);
148 db_stacktrace();
149 }
150
151 #endif
152
153 /*
154 * Initialize the kernel lock.
155 */
156 void
157 kernel_lock_init(void)
158 {
159
160 __cpu_simple_lock_init(kernel_lock);
161 kernel_lock_dodebug = LOCKDEBUG_ALLOC(kernel_lock, &_kernel_lock_ops,
162 RETURN_ADDRESS);
163 }
164 CTASSERT(CACHE_LINE_SIZE >= sizeof(__cpu_simple_lock_t));
165
166 /*
167 * Print debugging information about the kernel lock.
168 */
169 static void
170 _kernel_lock_dump(const volatile void *junk, lockop_printer_t pr)
171 {
172 struct cpu_info *ci = curcpu();
173
174 (void)junk;
175
176 pr("curcpu holds : %18d wanted by: %#018lx\n",
177 ci->ci_biglock_count, (long)ci->ci_biglock_wanted);
178 }
179
180 /*
181 * Acquire 'nlocks' holds on the kernel lock.
182 *
183 * Although it may not look it, this is one of the most central, intricate
184 * routines in the kernel, and tons of code elsewhere depends on its exact
185 * behaviour. If you change something in here, expect it to bite you in the
186 * rear.
187 */
188 void
189 _kernel_lock(int nlocks)
190 {
191 struct cpu_info *ci;
192 LOCKSTAT_TIMER(spintime);
193 LOCKSTAT_FLAG(lsflag);
194 struct lwp *owant;
195 #ifdef LOCKDEBUG
196 static struct cpu_info *kernel_lock_holder;
197 u_int spins = 0;
198 #endif
199 int s;
200 struct lwp *l = curlwp;
201
202 _KERNEL_LOCK_ASSERT(nlocks > 0);
203
204 s = splvm();
205 ci = curcpu();
206 if (ci->ci_biglock_count != 0) {
207 _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
208 ci->ci_biglock_count += nlocks;
209 l->l_blcnt += nlocks;
210 splx(s);
211 return;
212 }
213
214 _KERNEL_LOCK_ASSERT(l->l_blcnt == 0);
215 LOCKDEBUG_WANTLOCK(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS,
216 0);
217
218 if (__predict_true(__cpu_simple_lock_try(kernel_lock))) {
219 #ifdef LOCKDEBUG
220 kernel_lock_holder = curcpu();
221 #endif
222 ci->ci_biglock_count = nlocks;
223 l->l_blcnt = nlocks;
224 LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
225 RETURN_ADDRESS, 0);
226 splx(s);
227 return;
228 }
229
230 /*
231 * To remove the ordering constraint between adaptive mutexes
232 * and kernel_lock we must make it appear as if this thread is
233 * blocking. For non-interlocked mutex release, a store fence
234 * is required to ensure that the result of any mutex_exit()
235 * by the current LWP becomes visible on the bus before the set
236 * of ci->ci_biglock_wanted becomes visible.
237 */
238 membar_producer();
239 owant = ci->ci_biglock_wanted;
240 ci->ci_biglock_wanted = l;
241 #if defined(DIAGNOSTIC) && !defined(LOCKDEBUG)
242 l->l_ld_wanted = __builtin_return_address(0);
243 #endif
244
245 /*
246 * Spin until we acquire the lock. Once we have it, record the
247 * time spent with lockstat.
248 */
249 LOCKSTAT_ENTER(lsflag);
250 LOCKSTAT_START_TIMER(lsflag, spintime);
251
252 do {
253 splx(s);
254 while (__SIMPLELOCK_LOCKED_P(kernel_lock)) {
255 #ifdef LOCKDEBUG
256 extern int start_init_exec;
257 if (SPINLOCK_SPINOUT(spins) && start_init_exec) {
258 ipi_msg_t msg = {
259 .func = kernel_lock_trace_ipi,
260 };
261 kpreempt_disable();
262 ipi_unicast(&msg, kernel_lock_holder);
263 ipi_wait(&msg);
264 kpreempt_enable();
265 _KERNEL_LOCK_ABORT("spinout");
266 }
267 SPINLOCK_BACKOFF_HOOK;
268 SPINLOCK_SPIN_HOOK;
269 #endif
270 }
271 s = splvm();
272 } while (!__cpu_simple_lock_try(kernel_lock));
273
274 ci->ci_biglock_count = nlocks;
275 l->l_blcnt = nlocks;
276 LOCKSTAT_STOP_TIMER(lsflag, spintime);
277 LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
278 RETURN_ADDRESS, 0);
279 if (owant == NULL) {
280 LOCKSTAT_EVENT_RA(lsflag, kernel_lock,
281 LB_KERNEL_LOCK | LB_SPIN, 1, spintime, RETURN_ADDRESS);
282 }
283 LOCKSTAT_EXIT(lsflag);
284 splx(s);
285
286 /*
287 * Now that we have kernel_lock, reset ci_biglock_wanted. This
288 * store must be unbuffered (immediately visible on the bus) in
289 * order for non-interlocked mutex release to work correctly.
290 * It must be visible before a mutex_exit() can execute on this
291 * processor.
292 *
293 * Note: only where CAS is available in hardware will this be
294 * an unbuffered write, but non-interlocked release cannot be
295 * done on CPUs without CAS in hardware.
296 */
297 (void)atomic_swap_ptr(&ci->ci_biglock_wanted, owant);
298
299 /*
300 * Issue a memory barrier as we have acquired a lock. This also
301 * prevents stores from a following mutex_exit() being reordered
302 * to occur before our store to ci_biglock_wanted above.
303 */
304 #ifndef __HAVE_ATOMIC_AS_MEMBAR
305 membar_enter();
306 #endif
307
308 #ifdef LOCKDEBUG
309 kernel_lock_holder = curcpu();
310 #endif
311 }
312
313 /*
314 * Release 'nlocks' holds on the kernel lock. If 'nlocks' is zero, release
315 * all holds.
316 */
317 void
318 _kernel_unlock(int nlocks, int *countp)
319 {
320 struct cpu_info *ci;
321 u_int olocks;
322 int s;
323 struct lwp *l = curlwp;
324
325 _KERNEL_LOCK_ASSERT(nlocks < 2);
326
327 olocks = l->l_blcnt;
328
329 if (olocks == 0) {
330 _KERNEL_LOCK_ASSERT(nlocks <= 0);
331 if (countp != NULL)
332 *countp = 0;
333 return;
334 }
335
336 _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
337
338 if (nlocks == 0)
339 nlocks = olocks;
340 else if (nlocks == -1) {
341 nlocks = 1;
342 _KERNEL_LOCK_ASSERT(olocks == 1);
343 }
344 s = splvm();
345 ci = curcpu();
346 _KERNEL_LOCK_ASSERT(ci->ci_biglock_count >= l->l_blcnt);
347 if (ci->ci_biglock_count == nlocks) {
348 LOCKDEBUG_UNLOCKED(kernel_lock_dodebug, kernel_lock,
349 RETURN_ADDRESS, 0);
350 ci->ci_biglock_count = 0;
351 __cpu_simple_unlock(kernel_lock);
352 l->l_blcnt -= nlocks;
353 splx(s);
354 if (l->l_dopreempt)
355 kpreempt(0);
356 } else {
357 ci->ci_biglock_count -= nlocks;
358 l->l_blcnt -= nlocks;
359 splx(s);
360 }
361
362 if (countp != NULL)
363 *countp = olocks;
364 }
365
366 bool
367 _kernel_locked_p(void)
368 {
369 return __SIMPLELOCK_LOCKED_P(kernel_lock);
370 }
371