kern_lock.c revision 1.135 1 1.135 yamt /* $NetBSD: kern_lock.c,v 1.135 2008/03/17 08:27:50 yamt Exp $ */
2 1.19 thorpej
3 1.19 thorpej /*-
4 1.134 ad * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 1.19 thorpej * All rights reserved.
6 1.19 thorpej *
7 1.19 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.19 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.105 ad * NASA Ames Research Center, and by Andrew Doran.
10 1.19 thorpej *
11 1.19 thorpej * Redistribution and use in source and binary forms, with or without
12 1.19 thorpej * modification, are permitted provided that the following conditions
13 1.19 thorpej * are met:
14 1.19 thorpej * 1. Redistributions of source code must retain the above copyright
15 1.19 thorpej * notice, this list of conditions and the following disclaimer.
16 1.19 thorpej * 2. Redistributions in binary form must reproduce the above copyright
17 1.19 thorpej * notice, this list of conditions and the following disclaimer in the
18 1.19 thorpej * documentation and/or other materials provided with the distribution.
19 1.19 thorpej * 3. All advertising materials mentioning features or use of this software
20 1.19 thorpej * must display the following acknowledgement:
21 1.19 thorpej * This product includes software developed by the NetBSD
22 1.19 thorpej * Foundation, Inc. and its contributors.
23 1.19 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.19 thorpej * contributors may be used to endorse or promote products derived
25 1.19 thorpej * from this software without specific prior written permission.
26 1.19 thorpej *
27 1.19 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.19 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.19 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.19 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.19 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.19 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.19 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.19 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.19 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.19 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.19 thorpej * POSSIBILITY OF SUCH DAMAGE.
38 1.19 thorpej */
39 1.2 fvdl
40 1.60 lukem #include <sys/cdefs.h>
41 1.135 yamt __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.135 2008/03/17 08:27:50 yamt Exp $");
42 1.7 thorpej
43 1.21 thorpej #include "opt_multiprocessor.h"
44 1.105 ad
45 1.1 fvdl #include <sys/param.h>
46 1.1 fvdl #include <sys/proc.h>
47 1.1 fvdl #include <sys/lock.h>
48 1.2 fvdl #include <sys/systm.h>
49 1.125 ad #include <sys/kernel.h>
50 1.105 ad #include <sys/lockdebug.h>
51 1.122 ad #include <sys/cpu.h>
52 1.122 ad #include <sys/syslog.h>
53 1.128 ad #include <sys/atomic.h>
54 1.105 ad
55 1.110 christos #include <machine/stdarg.h>
56 1.131 ad #include <machine/lock.h>
57 1.1 fvdl
58 1.98 ad #include <dev/lockstat.h>
59 1.98 ad
60 1.134 ad #define RETURN_ADDRESS (uintptr_t)__builtin_return_address(0)
61 1.25 thorpej
62 1.127 yamt bool kernel_lock_dodebug;
63 1.132 ad
64 1.132 ad __cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)]
65 1.132 ad __aligned(CACHE_LINE_SIZE);
66 1.1 fvdl
67 1.135 yamt #if defined(DEBUG) || defined(LKM)
68 1.96 yamt void
69 1.135 yamt assert_sleepable(void)
70 1.96 yamt {
71 1.135 yamt #if !defined(_RUMPKERNEL)
72 1.135 yamt const char *reason;
73 1.96 yamt
74 1.135 yamt if (panicstr != NULL) {
75 1.117 ad return;
76 1.135 yamt }
77 1.135 yamt
78 1.132 ad LOCKDEBUG_BARRIER(kernel_lock, 1);
79 1.135 yamt
80 1.135 yamt reason = NULL;
81 1.125 ad if (CURCPU_IDLE_P() && !cold) {
82 1.135 yamt reason = "idle";
83 1.135 yamt }
84 1.135 yamt if (cpu_intr_p()) {
85 1.135 yamt reason = "interrupt";
86 1.97 yamt }
87 1.135 yamt if ((curlwp->l_pflag & LP_INTR) != 0) {
88 1.135 yamt reason = "softint";
89 1.135 yamt }
90 1.135 yamt
91 1.135 yamt if (reason) {
92 1.135 yamt panic("%s: %s caller=%p", __func__, reason,
93 1.135 yamt (void *)RETURN_ADDRESS);
94 1.135 yamt }
95 1.135 yamt #endif /* !defined(_RUMPKERNEL) */
96 1.96 yamt }
97 1.135 yamt #endif /* defined(DEBUG) || defined(LKM) */
98 1.105 ad
99 1.62 thorpej /*
100 1.124 pooka * rump doesn't need the kernel lock so force it out. We cannot
101 1.124 pooka * currently easily include it for compilation because of
102 1.128 ad * a) SPINLOCK_* b) membar_producer(). They are defined in different
103 1.124 pooka * places / way for each arch, so just simply do not bother to
104 1.124 pooka * fight a lot for no gain (i.e. pain but still no gain).
105 1.124 pooka */
106 1.124 pooka #ifndef _RUMPKERNEL
107 1.124 pooka /*
108 1.62 thorpej * Functions for manipulating the kernel_lock. We put them here
109 1.62 thorpej * so that they show up in profiles.
110 1.62 thorpej */
111 1.62 thorpej
112 1.105 ad #define _KERNEL_LOCK_ABORT(msg) \
113 1.132 ad LOCKDEBUG_ABORT(kernel_lock, &_kernel_lock_ops, __func__, msg)
114 1.105 ad
115 1.105 ad #ifdef LOCKDEBUG
116 1.105 ad #define _KERNEL_LOCK_ASSERT(cond) \
117 1.105 ad do { \
118 1.105 ad if (!(cond)) \
119 1.105 ad _KERNEL_LOCK_ABORT("assertion failed: " #cond); \
120 1.105 ad } while (/* CONSTCOND */ 0)
121 1.105 ad #else
122 1.105 ad #define _KERNEL_LOCK_ASSERT(cond) /* nothing */
123 1.105 ad #endif
124 1.105 ad
125 1.105 ad void _kernel_lock_dump(volatile void *);
126 1.105 ad
127 1.105 ad lockops_t _kernel_lock_ops = {
128 1.105 ad "Kernel lock",
129 1.105 ad 0,
130 1.105 ad _kernel_lock_dump
131 1.105 ad };
132 1.105 ad
133 1.85 yamt /*
134 1.105 ad * Initialize the kernel lock.
135 1.85 yamt */
136 1.62 thorpej void
137 1.122 ad kernel_lock_init(void)
138 1.62 thorpej {
139 1.62 thorpej
140 1.132 ad KASSERT(CACHE_LINE_SIZE >= sizeof(__cpu_simple_lock_t));
141 1.132 ad __cpu_simple_lock_init(kernel_lock);
142 1.132 ad kernel_lock_dodebug = LOCKDEBUG_ALLOC(kernel_lock, &_kernel_lock_ops,
143 1.122 ad RETURN_ADDRESS);
144 1.62 thorpej }
145 1.62 thorpej
146 1.62 thorpej /*
147 1.105 ad * Print debugging information about the kernel lock.
148 1.62 thorpej */
149 1.62 thorpej void
150 1.105 ad _kernel_lock_dump(volatile void *junk)
151 1.62 thorpej {
152 1.85 yamt struct cpu_info *ci = curcpu();
153 1.62 thorpej
154 1.105 ad (void)junk;
155 1.85 yamt
156 1.105 ad printf_nolog("curcpu holds : %18d wanted by: %#018lx\n",
157 1.105 ad ci->ci_biglock_count, (long)ci->ci_biglock_wanted);
158 1.62 thorpej }
159 1.62 thorpej
160 1.105 ad /*
161 1.105 ad * Acquire 'nlocks' holds on the kernel lock. If 'l' is non-null, the
162 1.105 ad * acquisition is from process context.
163 1.105 ad */
164 1.62 thorpej void
165 1.105 ad _kernel_lock(int nlocks, struct lwp *l)
166 1.62 thorpej {
167 1.85 yamt struct cpu_info *ci = curcpu();
168 1.105 ad LOCKSTAT_TIMER(spintime);
169 1.105 ad LOCKSTAT_FLAG(lsflag);
170 1.105 ad struct lwp *owant;
171 1.105 ad u_int spins;
172 1.85 yamt int s;
173 1.85 yamt
174 1.105 ad if (nlocks == 0)
175 1.105 ad return;
176 1.105 ad _KERNEL_LOCK_ASSERT(nlocks > 0);
177 1.62 thorpej
178 1.122 ad l = curlwp;
179 1.105 ad
180 1.105 ad if (ci->ci_biglock_count != 0) {
181 1.132 ad _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
182 1.105 ad ci->ci_biglock_count += nlocks;
183 1.122 ad l->l_blcnt += nlocks;
184 1.105 ad return;
185 1.105 ad }
186 1.105 ad
187 1.122 ad _KERNEL_LOCK_ASSERT(l->l_blcnt == 0);
188 1.132 ad LOCKDEBUG_WANTLOCK(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS,
189 1.127 yamt 0);
190 1.107 ad
191 1.122 ad s = splvm();
192 1.132 ad if (__cpu_simple_lock_try(kernel_lock)) {
193 1.105 ad ci->ci_biglock_count = nlocks;
194 1.122 ad l->l_blcnt = nlocks;
195 1.132 ad LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock,
196 1.127 yamt RETURN_ADDRESS, 0);
197 1.105 ad splx(s);
198 1.105 ad return;
199 1.105 ad }
200 1.105 ad
201 1.132 ad /*
202 1.132 ad * To remove the ordering constraint between adaptive mutexes
203 1.132 ad * and kernel_lock we must make it appear as if this thread is
204 1.132 ad * blocking. For non-interlocked mutex release, a store fence
205 1.132 ad * is required to ensure that the result of any mutex_exit()
206 1.132 ad * by the current LWP becomes visible on the bus before the set
207 1.132 ad * of ci->ci_biglock_wanted becomes visible.
208 1.132 ad */
209 1.132 ad membar_producer();
210 1.132 ad owant = ci->ci_biglock_wanted;
211 1.132 ad ci->ci_biglock_wanted = l;
212 1.105 ad
213 1.105 ad /*
214 1.132 ad * Spin until we acquire the lock. Once we have it, record the
215 1.132 ad * time spent with lockstat.
216 1.105 ad */
217 1.132 ad LOCKSTAT_ENTER(lsflag);
218 1.132 ad LOCKSTAT_START_TIMER(lsflag, spintime);
219 1.105 ad
220 1.105 ad spins = 0;
221 1.105 ad do {
222 1.122 ad splx(s);
223 1.132 ad while (__SIMPLELOCK_LOCKED_P(kernel_lock)) {
224 1.132 ad if (SPINLOCK_SPINOUT(spins)) {
225 1.105 ad _KERNEL_LOCK_ABORT("spinout");
226 1.132 ad }
227 1.122 ad SPINLOCK_BACKOFF_HOOK;
228 1.105 ad SPINLOCK_SPIN_HOOK;
229 1.105 ad }
230 1.132 ad s = splvm();
231 1.132 ad } while (!__cpu_simple_lock_try(kernel_lock));
232 1.105 ad
233 1.122 ad ci->ci_biglock_count = nlocks;
234 1.122 ad l->l_blcnt = nlocks;
235 1.107 ad LOCKSTAT_STOP_TIMER(lsflag, spintime);
236 1.132 ad LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS, 0);
237 1.132 ad if (owant == NULL) {
238 1.132 ad LOCKSTAT_EVENT_RA(lsflag, kernel_lock,
239 1.132 ad LB_KERNEL_LOCK | LB_SPIN, 1, spintime, RETURN_ADDRESS);
240 1.132 ad }
241 1.132 ad LOCKSTAT_EXIT(lsflag);
242 1.85 yamt splx(s);
243 1.105 ad
244 1.105 ad /*
245 1.132 ad * Now that we have kernel_lock, reset ci_biglock_wanted. This
246 1.132 ad * store must be unbuffered (immediately visible on the bus) in
247 1.132 ad * order for non-interlocked mutex release to work correctly.
248 1.132 ad * It must be visible before a mutex_exit() can execute on this
249 1.132 ad * processor.
250 1.132 ad *
251 1.132 ad * Note: only where CAS is available in hardware will this be
252 1.132 ad * an unbuffered write, but non-interlocked release cannot be
253 1.132 ad * done on CPUs without CAS in hardware.
254 1.105 ad */
255 1.132 ad (void)atomic_swap_ptr(&ci->ci_biglock_wanted, owant);
256 1.132 ad
257 1.132 ad /*
258 1.132 ad * Issue a memory barrier as we have acquired a lock. This also
259 1.132 ad * prevents stores from a following mutex_exit() being reordered
260 1.132 ad * to occur before our store to ci_biglock_wanted above.
261 1.132 ad */
262 1.132 ad membar_enter();
263 1.62 thorpej }
264 1.62 thorpej
265 1.62 thorpej /*
266 1.105 ad * Release 'nlocks' holds on the kernel lock. If 'nlocks' is zero, release
267 1.105 ad * all holds. If 'l' is non-null, the release is from process context.
268 1.62 thorpej */
269 1.62 thorpej void
270 1.105 ad _kernel_unlock(int nlocks, struct lwp *l, int *countp)
271 1.62 thorpej {
272 1.105 ad struct cpu_info *ci = curcpu();
273 1.105 ad u_int olocks;
274 1.105 ad int s;
275 1.62 thorpej
276 1.122 ad l = curlwp;
277 1.62 thorpej
278 1.105 ad _KERNEL_LOCK_ASSERT(nlocks < 2);
279 1.62 thorpej
280 1.122 ad olocks = l->l_blcnt;
281 1.77 yamt
282 1.105 ad if (olocks == 0) {
283 1.105 ad _KERNEL_LOCK_ASSERT(nlocks <= 0);
284 1.105 ad if (countp != NULL)
285 1.105 ad *countp = 0;
286 1.105 ad return;
287 1.105 ad }
288 1.77 yamt
289 1.132 ad _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
290 1.85 yamt
291 1.105 ad if (nlocks == 0)
292 1.105 ad nlocks = olocks;
293 1.105 ad else if (nlocks == -1) {
294 1.105 ad nlocks = 1;
295 1.105 ad _KERNEL_LOCK_ASSERT(olocks == 1);
296 1.105 ad }
297 1.85 yamt
298 1.122 ad _KERNEL_LOCK_ASSERT(ci->ci_biglock_count >= l->l_blcnt);
299 1.122 ad
300 1.122 ad l->l_blcnt -= nlocks;
301 1.122 ad if (ci->ci_biglock_count == nlocks) {
302 1.122 ad s = splvm();
303 1.132 ad LOCKDEBUG_UNLOCKED(kernel_lock_dodebug, kernel_lock,
304 1.127 yamt RETURN_ADDRESS, 0);
305 1.122 ad ci->ci_biglock_count = 0;
306 1.132 ad __cpu_simple_unlock(kernel_lock);
307 1.122 ad splx(s);
308 1.122 ad } else
309 1.122 ad ci->ci_biglock_count -= nlocks;
310 1.77 yamt
311 1.105 ad if (countp != NULL)
312 1.105 ad *countp = olocks;
313 1.77 yamt }
314 1.124 pooka #endif /* !_RUMPKERNEL */
315