kern_lock.c revision 1.134 1 /* $NetBSD: kern_lock.c,v 1.134 2008/01/30 14:54:26 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.134 2008/01/30 14:54:26 ad Exp $");
42
43 #include "opt_multiprocessor.h"
44
45 #include <sys/param.h>
46 #include <sys/proc.h>
47 #include <sys/lock.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/lockdebug.h>
51 #include <sys/cpu.h>
52 #include <sys/syslog.h>
53 #include <sys/atomic.h>
54
55 #include <machine/stdarg.h>
56 #include <machine/lock.h>
57
58 #include <dev/lockstat.h>
59
60 #define RETURN_ADDRESS (uintptr_t)__builtin_return_address(0)
61
62 bool kernel_lock_dodebug;
63
64 __cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)]
65 __aligned(CACHE_LINE_SIZE);
66
67 #if defined(LOCKDEBUG)
68 void
69 assert_sleepable(struct simplelock *interlock, const char *msg)
70 {
71
72 if (panicstr != NULL)
73 return;
74 LOCKDEBUG_BARRIER(kernel_lock, 1);
75 if (CURCPU_IDLE_P() && !cold) {
76 panic("assert_sleepable: idle");
77 }
78 }
79 #endif
80
81 /*
82 * rump doesn't need the kernel lock so force it out. We cannot
83 * currently easily include it for compilation because of
84 * a) SPINLOCK_* b) membar_producer(). They are defined in different
85 * places / way for each arch, so just simply do not bother to
86 * fight a lot for no gain (i.e. pain but still no gain).
87 */
88 #ifndef _RUMPKERNEL
89 /*
90 * Functions for manipulating the kernel_lock. We put them here
91 * so that they show up in profiles.
92 */
93
94 #define _KERNEL_LOCK_ABORT(msg) \
95 LOCKDEBUG_ABORT(kernel_lock, &_kernel_lock_ops, __func__, msg)
96
97 #ifdef LOCKDEBUG
98 #define _KERNEL_LOCK_ASSERT(cond) \
99 do { \
100 if (!(cond)) \
101 _KERNEL_LOCK_ABORT("assertion failed: " #cond); \
102 } while (/* CONSTCOND */ 0)
103 #else
104 #define _KERNEL_LOCK_ASSERT(cond) /* nothing */
105 #endif
106
107 void _kernel_lock_dump(volatile void *);
108
109 lockops_t _kernel_lock_ops = {
110 "Kernel lock",
111 0,
112 _kernel_lock_dump
113 };
114
115 /*
116 * Initialize the kernel lock.
117 */
118 void
119 kernel_lock_init(void)
120 {
121
122 KASSERT(CACHE_LINE_SIZE >= sizeof(__cpu_simple_lock_t));
123 __cpu_simple_lock_init(kernel_lock);
124 kernel_lock_dodebug = LOCKDEBUG_ALLOC(kernel_lock, &_kernel_lock_ops,
125 RETURN_ADDRESS);
126 }
127
128 /*
129 * Print debugging information about the kernel lock.
130 */
131 void
132 _kernel_lock_dump(volatile void *junk)
133 {
134 struct cpu_info *ci = curcpu();
135
136 (void)junk;
137
138 printf_nolog("curcpu holds : %18d wanted by: %#018lx\n",
139 ci->ci_biglock_count, (long)ci->ci_biglock_wanted);
140 }
141
142 /*
143 * Acquire 'nlocks' holds on the kernel lock. If 'l' is non-null, the
144 * acquisition is from process context.
145 */
146 void
147 _kernel_lock(int nlocks, struct lwp *l)
148 {
149 struct cpu_info *ci = curcpu();
150 LOCKSTAT_TIMER(spintime);
151 LOCKSTAT_FLAG(lsflag);
152 struct lwp *owant;
153 u_int spins;
154 int s;
155
156 if (nlocks == 0)
157 return;
158 _KERNEL_LOCK_ASSERT(nlocks > 0);
159
160 l = curlwp;
161
162 if (ci->ci_biglock_count != 0) {
163 _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
164 ci->ci_biglock_count += nlocks;
165 l->l_blcnt += nlocks;
166 return;
167 }
168
169 _KERNEL_LOCK_ASSERT(l->l_blcnt == 0);
170 LOCKDEBUG_WANTLOCK(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS,
171 0);
172
173 s = splvm();
174 if (__cpu_simple_lock_try(kernel_lock)) {
175 ci->ci_biglock_count = nlocks;
176 l->l_blcnt = nlocks;
177 LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock,
178 RETURN_ADDRESS, 0);
179 splx(s);
180 return;
181 }
182
183 /*
184 * To remove the ordering constraint between adaptive mutexes
185 * and kernel_lock we must make it appear as if this thread is
186 * blocking. For non-interlocked mutex release, a store fence
187 * is required to ensure that the result of any mutex_exit()
188 * by the current LWP becomes visible on the bus before the set
189 * of ci->ci_biglock_wanted becomes visible.
190 */
191 membar_producer();
192 owant = ci->ci_biglock_wanted;
193 ci->ci_biglock_wanted = l;
194
195 /*
196 * Spin until we acquire the lock. Once we have it, record the
197 * time spent with lockstat.
198 */
199 LOCKSTAT_ENTER(lsflag);
200 LOCKSTAT_START_TIMER(lsflag, spintime);
201
202 spins = 0;
203 do {
204 splx(s);
205 while (__SIMPLELOCK_LOCKED_P(kernel_lock)) {
206 if (SPINLOCK_SPINOUT(spins)) {
207 _KERNEL_LOCK_ABORT("spinout");
208 }
209 SPINLOCK_BACKOFF_HOOK;
210 SPINLOCK_SPIN_HOOK;
211 }
212 s = splvm();
213 } while (!__cpu_simple_lock_try(kernel_lock));
214
215 ci->ci_biglock_count = nlocks;
216 l->l_blcnt = nlocks;
217 LOCKSTAT_STOP_TIMER(lsflag, spintime);
218 LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS, 0);
219 if (owant == NULL) {
220 LOCKSTAT_EVENT_RA(lsflag, kernel_lock,
221 LB_KERNEL_LOCK | LB_SPIN, 1, spintime, RETURN_ADDRESS);
222 }
223 LOCKSTAT_EXIT(lsflag);
224 splx(s);
225
226 /*
227 * Now that we have kernel_lock, reset ci_biglock_wanted. This
228 * store must be unbuffered (immediately visible on the bus) in
229 * order for non-interlocked mutex release to work correctly.
230 * It must be visible before a mutex_exit() can execute on this
231 * processor.
232 *
233 * Note: only where CAS is available in hardware will this be
234 * an unbuffered write, but non-interlocked release cannot be
235 * done on CPUs without CAS in hardware.
236 */
237 (void)atomic_swap_ptr(&ci->ci_biglock_wanted, owant);
238
239 /*
240 * Issue a memory barrier as we have acquired a lock. This also
241 * prevents stores from a following mutex_exit() being reordered
242 * to occur before our store to ci_biglock_wanted above.
243 */
244 membar_enter();
245 }
246
247 /*
248 * Release 'nlocks' holds on the kernel lock. If 'nlocks' is zero, release
249 * all holds. If 'l' is non-null, the release is from process context.
250 */
251 void
252 _kernel_unlock(int nlocks, struct lwp *l, int *countp)
253 {
254 struct cpu_info *ci = curcpu();
255 u_int olocks;
256 int s;
257
258 l = curlwp;
259
260 _KERNEL_LOCK_ASSERT(nlocks < 2);
261
262 olocks = l->l_blcnt;
263
264 if (olocks == 0) {
265 _KERNEL_LOCK_ASSERT(nlocks <= 0);
266 if (countp != NULL)
267 *countp = 0;
268 return;
269 }
270
271 _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
272
273 if (nlocks == 0)
274 nlocks = olocks;
275 else if (nlocks == -1) {
276 nlocks = 1;
277 _KERNEL_LOCK_ASSERT(olocks == 1);
278 }
279
280 _KERNEL_LOCK_ASSERT(ci->ci_biglock_count >= l->l_blcnt);
281
282 l->l_blcnt -= nlocks;
283 if (ci->ci_biglock_count == nlocks) {
284 s = splvm();
285 LOCKDEBUG_UNLOCKED(kernel_lock_dodebug, kernel_lock,
286 RETURN_ADDRESS, 0);
287 ci->ci_biglock_count = 0;
288 __cpu_simple_unlock(kernel_lock);
289 splx(s);
290 } else
291 ci->ci_biglock_count -= nlocks;
292
293 if (countp != NULL)
294 *countp = olocks;
295 }
296 #endif /* !_RUMPKERNEL */
297