kern_lock.c revision 1.171 1 /* $NetBSD: kern_lock.c,v 1.171 2020/05/02 09:13:40 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.171 2020/05/02 09:13:40 martin Exp $");
35
36 #ifdef _KERNEL_OPT
37 #include "opt_lockdebug.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/proc.h>
42 #include <sys/lock.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/lockdebug.h>
46 #include <sys/cpu.h>
47 #include <sys/syslog.h>
48 #include <sys/atomic.h>
49 #include <sys/lwp.h>
50 #include <sys/pserialize.h>
51
52 #if defined(DIAGNOSTIC) && !defined(LOCKDEBUG)
53 #include <sys/ksyms.h>
54 #endif
55
56 #include <machine/lock.h>
57
58 #include <dev/lockstat.h>
59
60 #define RETURN_ADDRESS (uintptr_t)__builtin_return_address(0)
61
62 bool kernel_lock_dodebug;
63
64 __cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)]
65 __cacheline_aligned;
66
67 void
68 assert_sleepable(void)
69 {
70 const char *reason;
71 uint64_t pctr;
72 bool idle;
73
74 if (panicstr != NULL) {
75 return;
76 }
77
78 LOCKDEBUG_BARRIER(kernel_lock, 1);
79
80 /*
81 * Avoid disabling/re-enabling preemption here since this
82 * routine may be called in delicate situations.
83 */
84 do {
85 pctr = lwp_pctr();
86 __insn_barrier();
87 idle = CURCPU_IDLE_P();
88 __insn_barrier();
89 } while (pctr != lwp_pctr());
90
91 reason = NULL;
92 if (idle && !cold &&
93 kcpuset_isset(kcpuset_running, cpu_index(curcpu()))) {
94 reason = "idle";
95 }
96 if (cpu_intr_p()) {
97 reason = "interrupt";
98 }
99 if (cpu_softintr_p()) {
100 reason = "softint";
101 }
102 if (!pserialize_not_in_read_section()) {
103 reason = "pserialize";
104 }
105
106 if (reason) {
107 panic("%s: %s caller=%p", __func__, reason,
108 (void *)RETURN_ADDRESS);
109 }
110 }
111
112 /*
113 * Functions for manipulating the kernel_lock. We put them here
114 * so that they show up in profiles.
115 */
116
117 #define _KERNEL_LOCK_ABORT(msg) \
118 LOCKDEBUG_ABORT(__func__, __LINE__, kernel_lock, &_kernel_lock_ops, msg)
119
120 #ifdef LOCKDEBUG
121 #define _KERNEL_LOCK_ASSERT(cond) \
122 do { \
123 if (!(cond)) \
124 _KERNEL_LOCK_ABORT("assertion failed: " #cond); \
125 } while (/* CONSTCOND */ 0)
126 #else
127 #define _KERNEL_LOCK_ASSERT(cond) /* nothing */
128 #endif
129
130 static void _kernel_lock_dump(const volatile void *, lockop_printer_t);
131
132 lockops_t _kernel_lock_ops = {
133 .lo_name = "Kernel lock",
134 .lo_type = LOCKOPS_SPIN,
135 .lo_dump = _kernel_lock_dump,
136 };
137
138 /*
139 * Initialize the kernel lock.
140 */
141 void
142 kernel_lock_init(void)
143 {
144
145 __cpu_simple_lock_init(kernel_lock);
146 kernel_lock_dodebug = LOCKDEBUG_ALLOC(kernel_lock, &_kernel_lock_ops,
147 RETURN_ADDRESS);
148 }
149 CTASSERT(CACHE_LINE_SIZE >= sizeof(__cpu_simple_lock_t));
150
151 /*
152 * Print debugging information about the kernel lock.
153 */
154 static void
155 _kernel_lock_dump(const volatile void *junk, lockop_printer_t pr)
156 {
157 struct cpu_info *ci = curcpu();
158
159 (void)junk;
160
161 pr("curcpu holds : %18d wanted by: %#018lx\n",
162 ci->ci_biglock_count, (long)ci->ci_biglock_wanted);
163 }
164
165 /*
166 * Acquire 'nlocks' holds on the kernel lock.
167 *
168 * Although it may not look it, this is one of the most central, intricate
169 * routines in the kernel, and tons of code elsewhere depends on its exact
170 * behaviour. If you change something in here, expect it to bite you in the
171 * rear.
172 */
173 void
174 _kernel_lock(int nlocks)
175 {
176 struct cpu_info *ci;
177 LOCKSTAT_TIMER(spintime);
178 LOCKSTAT_FLAG(lsflag);
179 struct lwp *owant;
180 #ifdef LOCKDEBUG
181 u_int spins = 0;
182 #endif
183 int s;
184 struct lwp *l = curlwp;
185
186 _KERNEL_LOCK_ASSERT(nlocks > 0);
187
188 s = splvm();
189 ci = curcpu();
190 if (ci->ci_biglock_count != 0) {
191 _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
192 ci->ci_biglock_count += nlocks;
193 l->l_blcnt += nlocks;
194 splx(s);
195 return;
196 }
197
198 _KERNEL_LOCK_ASSERT(l->l_blcnt == 0);
199 LOCKDEBUG_WANTLOCK(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS,
200 0);
201
202 if (__predict_true(__cpu_simple_lock_try(kernel_lock))) {
203 ci->ci_biglock_count = nlocks;
204 l->l_blcnt = nlocks;
205 LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
206 RETURN_ADDRESS, 0);
207 splx(s);
208 return;
209 }
210
211 /*
212 * To remove the ordering constraint between adaptive mutexes
213 * and kernel_lock we must make it appear as if this thread is
214 * blocking. For non-interlocked mutex release, a store fence
215 * is required to ensure that the result of any mutex_exit()
216 * by the current LWP becomes visible on the bus before the set
217 * of ci->ci_biglock_wanted becomes visible.
218 *
219 * However, we won't set ci_biglock_wanted until we've spun for
220 * a bit, as we don't want to make any lock waiters in rw_oncpu()
221 * or mutex_oncpu() block prematurely.
222 */
223 membar_producer();
224 owant = ci->ci_biglock_wanted;
225 ci->ci_biglock_wanted = l;
226 #if defined(DIAGNOSTIC) && !defined(LOCKDEBUG)
227 l->l_ld_wanted = __builtin_return_address(0);
228 #endif
229
230 /*
231 * Spin until we acquire the lock. Once we have it, record the
232 * time spent with lockstat.
233 */
234 LOCKSTAT_ENTER(lsflag);
235 LOCKSTAT_START_TIMER(lsflag, spintime);
236
237 do {
238 splx(s);
239 while (__SIMPLELOCK_LOCKED_P(kernel_lock)) {
240 #ifdef LOCKDEBUG
241 if (SPINLOCK_SPINOUT(spins)) {
242 extern int start_init_exec;
243 if (start_init_exec)
244 _KERNEL_LOCK_ABORT("spinout");
245 }
246 SPINLOCK_BACKOFF_HOOK;
247 SPINLOCK_SPIN_HOOK;
248 #endif
249 }
250 s = splvm();
251 } while (!__cpu_simple_lock_try(kernel_lock));
252
253 ci->ci_biglock_count = nlocks;
254 l->l_blcnt = nlocks;
255 LOCKSTAT_STOP_TIMER(lsflag, spintime);
256 LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
257 RETURN_ADDRESS, 0);
258 if (owant == NULL) {
259 LOCKSTAT_EVENT_RA(lsflag, kernel_lock,
260 LB_KERNEL_LOCK | LB_SPIN, 1, spintime, RETURN_ADDRESS);
261 }
262 LOCKSTAT_EXIT(lsflag);
263 splx(s);
264
265 /*
266 * Now that we have kernel_lock, reset ci_biglock_wanted. This
267 * store must be unbuffered (immediately visible on the bus) in
268 * order for non-interlocked mutex release to work correctly.
269 * It must be visible before a mutex_exit() can execute on this
270 * processor.
271 *
272 * Note: only where CAS is available in hardware will this be
273 * an unbuffered write, but non-interlocked release cannot be
274 * done on CPUs without CAS in hardware.
275 */
276 (void)atomic_swap_ptr(&ci->ci_biglock_wanted, owant);
277
278 /*
279 * Issue a memory barrier as we have acquired a lock. This also
280 * prevents stores from a following mutex_exit() being reordered
281 * to occur before our store to ci_biglock_wanted above.
282 */
283 #ifndef __HAVE_ATOMIC_AS_MEMBAR
284 membar_enter();
285 #endif
286 }
287
288 /*
289 * Release 'nlocks' holds on the kernel lock. If 'nlocks' is zero, release
290 * all holds.
291 */
292 void
293 _kernel_unlock(int nlocks, int *countp)
294 {
295 struct cpu_info *ci;
296 u_int olocks;
297 int s;
298 struct lwp *l = curlwp;
299
300 _KERNEL_LOCK_ASSERT(nlocks < 2);
301
302 olocks = l->l_blcnt;
303
304 if (olocks == 0) {
305 _KERNEL_LOCK_ASSERT(nlocks <= 0);
306 if (countp != NULL)
307 *countp = 0;
308 return;
309 }
310
311 _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
312
313 if (nlocks == 0)
314 nlocks = olocks;
315 else if (nlocks == -1) {
316 nlocks = 1;
317 _KERNEL_LOCK_ASSERT(olocks == 1);
318 }
319 s = splvm();
320 ci = curcpu();
321 _KERNEL_LOCK_ASSERT(ci->ci_biglock_count >= l->l_blcnt);
322 if (ci->ci_biglock_count == nlocks) {
323 LOCKDEBUG_UNLOCKED(kernel_lock_dodebug, kernel_lock,
324 RETURN_ADDRESS, 0);
325 ci->ci_biglock_count = 0;
326 __cpu_simple_unlock(kernel_lock);
327 l->l_blcnt -= nlocks;
328 splx(s);
329 if (l->l_dopreempt)
330 kpreempt(0);
331 } else {
332 ci->ci_biglock_count -= nlocks;
333 l->l_blcnt -= nlocks;
334 splx(s);
335 }
336
337 if (countp != NULL)
338 *countp = olocks;
339 }
340
341 bool
342 _kernel_locked_p(void)
343 {
344 return __SIMPLELOCK_LOCKED_P(kernel_lock);
345 }
346