lockstat.c revision 1.2.6.2 1 1.2.6.2 yamt /* $NetBSD: lockstat.c,v 1.2.6.2 2006/09/14 12:31:26 yamt Exp $ */
2 1.2.6.2 yamt
3 1.2.6.2 yamt /*-
4 1.2.6.2 yamt * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 1.2.6.2 yamt * All rights reserved.
6 1.2.6.2 yamt *
7 1.2.6.2 yamt * This code is derived from software contributed to The NetBSD Foundation
8 1.2.6.2 yamt * by Andrew Doran.
9 1.2.6.2 yamt *
10 1.2.6.2 yamt * Redistribution and use in source and binary forms, with or without
11 1.2.6.2 yamt * modification, are permitted provided that the following conditions
12 1.2.6.2 yamt * are met:
13 1.2.6.2 yamt * 1. Redistributions of source code must retain the above copyright
14 1.2.6.2 yamt * notice, this list of conditions and the following disclaimer.
15 1.2.6.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
16 1.2.6.2 yamt * notice, this list of conditions and the following disclaimer in the
17 1.2.6.2 yamt * documentation and/or other materials provided with the distribution.
18 1.2.6.2 yamt * 3. All advertising materials mentioning features or use of this software
19 1.2.6.2 yamt * must display the following acknowledgement:
20 1.2.6.2 yamt * This product includes software developed by the NetBSD
21 1.2.6.2 yamt * Foundation, Inc. and its contributors.
22 1.2.6.2 yamt * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.2.6.2 yamt * contributors may be used to endorse or promote products derived
24 1.2.6.2 yamt * from this software without specific prior written permission.
25 1.2.6.2 yamt *
26 1.2.6.2 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.2.6.2 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.2.6.2 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.2.6.2 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.2.6.2 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.2.6.2 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.2.6.2 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.2.6.2 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.2.6.2 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.2.6.2 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.2.6.2 yamt * POSSIBILITY OF SUCH DAMAGE.
37 1.2.6.2 yamt */
38 1.2.6.2 yamt
39 1.2.6.2 yamt /*
40 1.2.6.2 yamt * Lock statistics driver, providing kernel support for the lockstat(8)
41 1.2.6.2 yamt * command.
42 1.2.6.2 yamt */
43 1.2.6.2 yamt
44 1.2.6.2 yamt #include <sys/cdefs.h>
45 1.2.6.2 yamt __KERNEL_RCSID(0, "$NetBSD: lockstat.c,v 1.2.6.2 2006/09/14 12:31:26 yamt Exp $");
46 1.2.6.2 yamt
47 1.2.6.2 yamt #include <sys/types.h>
48 1.2.6.2 yamt #include <sys/param.h>
49 1.2.6.2 yamt #include <sys/lock.h>
50 1.2.6.2 yamt #include <sys/proc.h>
51 1.2.6.2 yamt #include <sys/resourcevar.h>
52 1.2.6.2 yamt #include <sys/systm.h>
53 1.2.6.2 yamt #include <sys/kernel.h>
54 1.2.6.2 yamt #include <sys/malloc.h>
55 1.2.6.2 yamt #include <sys/conf.h>
56 1.2.6.2 yamt #include <sys/syslog.h>
57 1.2.6.2 yamt
58 1.2.6.2 yamt #include <dev/lockstat.h>
59 1.2.6.2 yamt
60 1.2.6.2 yamt #ifndef __HAVE_CPU_COUNTER
61 1.2.6.2 yamt #error CPU counters not available
62 1.2.6.2 yamt #endif
63 1.2.6.2 yamt
64 1.2.6.2 yamt #if LONG_BIT == 64
65 1.2.6.2 yamt #define LOCKSTAT_HASH_SHIFT 3
66 1.2.6.2 yamt #elif LONG_BIT == 32
67 1.2.6.2 yamt #define LOCKSTAT_HASH_SHIFT 2
68 1.2.6.2 yamt #endif
69 1.2.6.2 yamt
70 1.2.6.2 yamt #define LOCKSTAT_MINBUFS 100
71 1.2.6.2 yamt #define LOCKSTAT_DEFBUFS 1000
72 1.2.6.2 yamt #define LOCKSTAT_MAXBUFS 10000
73 1.2.6.2 yamt
74 1.2.6.2 yamt #define LOCKSTAT_HASH_SIZE 64
75 1.2.6.2 yamt #define LOCKSTAT_HASH_MASK (LOCKSTAT_HASH_SIZE - 1)
76 1.2.6.2 yamt #define LOCKSTAT_HASH(key) \
77 1.2.6.2 yamt ((key >> LOCKSTAT_HASH_SHIFT) & LOCKSTAT_HASH_MASK)
78 1.2.6.2 yamt
79 1.2.6.2 yamt typedef struct lscpu {
80 1.2.6.2 yamt SLIST_HEAD(, lsbuf) lc_free;
81 1.2.6.2 yamt u_int lc_overflow;
82 1.2.6.2 yamt LIST_HEAD(lslist, lsbuf) lc_hash[LOCKSTAT_HASH_SIZE];
83 1.2.6.2 yamt } lscpu_t;
84 1.2.6.2 yamt
85 1.2.6.2 yamt typedef struct lslist lslist_t;
86 1.2.6.2 yamt
87 1.2.6.2 yamt void lockstatattach(int);
88 1.2.6.2 yamt void lockstat_start(lsenable_t *);
89 1.2.6.2 yamt int lockstat_alloc(lsenable_t *);
90 1.2.6.2 yamt void lockstat_init_tables(lsenable_t *);
91 1.2.6.2 yamt int lockstat_stop(lsdisable_t *);
92 1.2.6.2 yamt void lockstat_free(void);
93 1.2.6.2 yamt
94 1.2.6.2 yamt dev_type_open(lockstat_open);
95 1.2.6.2 yamt dev_type_close(lockstat_close);
96 1.2.6.2 yamt dev_type_read(lockstat_read);
97 1.2.6.2 yamt dev_type_ioctl(lockstat_ioctl);
98 1.2.6.2 yamt
99 1.2.6.2 yamt /* Protected against write by lockstat_lock(). Used by lockstat_event(). */
100 1.2.6.2 yamt volatile u_int lockstat_enabled;
101 1.2.6.2 yamt uintptr_t lockstat_csstart;
102 1.2.6.2 yamt uintptr_t lockstat_csend;
103 1.2.6.2 yamt uintptr_t lockstat_csmask;
104 1.2.6.2 yamt uintptr_t lockstat_lockaddr;
105 1.2.6.2 yamt
106 1.2.6.2 yamt /* Protected by lockstat_lock(). */
107 1.2.6.2 yamt struct simplelock lockstat_slock;
108 1.2.6.2 yamt lsbuf_t *lockstat_baseb;
109 1.2.6.2 yamt size_t lockstat_sizeb;
110 1.2.6.2 yamt int lockstat_busy;
111 1.2.6.2 yamt int lockstat_devopen;
112 1.2.6.2 yamt struct timespec lockstat_stime;
113 1.2.6.2 yamt
114 1.2.6.2 yamt const struct cdevsw lockstat_cdevsw = {
115 1.2.6.2 yamt lockstat_open, lockstat_close, lockstat_read, nowrite, lockstat_ioctl,
116 1.2.6.2 yamt nostop, notty, nopoll, nommap, nokqfilter, 0
117 1.2.6.2 yamt };
118 1.2.6.2 yamt
119 1.2.6.2 yamt MALLOC_DEFINE(M_LOCKSTAT, "lockstat", "lockstat event buffers");
120 1.2.6.2 yamt
121 1.2.6.2 yamt /*
122 1.2.6.2 yamt * Called when the pseudo-driver is attached.
123 1.2.6.2 yamt */
124 1.2.6.2 yamt void
125 1.2.6.2 yamt lockstatattach(int nunits)
126 1.2.6.2 yamt {
127 1.2.6.2 yamt
128 1.2.6.2 yamt (void)nunits;
129 1.2.6.2 yamt
130 1.2.6.2 yamt __cpu_simple_lock_init(&lockstat_slock.lock_data);
131 1.2.6.2 yamt }
132 1.2.6.2 yamt
133 1.2.6.2 yamt /*
134 1.2.6.2 yamt * Grab the global lock. If busy is set, we want to block out operations on
135 1.2.6.2 yamt * the control device.
136 1.2.6.2 yamt */
137 1.2.6.2 yamt static inline int
138 1.2.6.2 yamt lockstat_lock(int busy)
139 1.2.6.2 yamt {
140 1.2.6.2 yamt
141 1.2.6.2 yamt if (!__cpu_simple_lock_try(&lockstat_slock.lock_data))
142 1.2.6.2 yamt return (EBUSY);
143 1.2.6.2 yamt if (busy) {
144 1.2.6.2 yamt if (lockstat_busy) {
145 1.2.6.2 yamt __cpu_simple_unlock(&lockstat_slock.lock_data);
146 1.2.6.2 yamt return (EBUSY);
147 1.2.6.2 yamt }
148 1.2.6.2 yamt lockstat_busy = 1;
149 1.2.6.2 yamt }
150 1.2.6.2 yamt KASSERT(lockstat_busy);
151 1.2.6.2 yamt
152 1.2.6.2 yamt return 0;
153 1.2.6.2 yamt }
154 1.2.6.2 yamt
155 1.2.6.2 yamt /*
156 1.2.6.2 yamt * Release the global lock. If unbusy is set, we want to allow new
157 1.2.6.2 yamt * operations on the control device.
158 1.2.6.2 yamt */
159 1.2.6.2 yamt static inline void
160 1.2.6.2 yamt lockstat_unlock(int unbusy)
161 1.2.6.2 yamt {
162 1.2.6.2 yamt
163 1.2.6.2 yamt KASSERT(lockstat_busy);
164 1.2.6.2 yamt if (unbusy)
165 1.2.6.2 yamt lockstat_busy = 0;
166 1.2.6.2 yamt __cpu_simple_unlock(&lockstat_slock.lock_data);
167 1.2.6.2 yamt }
168 1.2.6.2 yamt
169 1.2.6.2 yamt /*
170 1.2.6.2 yamt * Prepare the per-CPU tables for use, or clear down tables when tracing is
171 1.2.6.2 yamt * stopped.
172 1.2.6.2 yamt */
173 1.2.6.2 yamt void
174 1.2.6.2 yamt lockstat_init_tables(lsenable_t *le)
175 1.2.6.2 yamt {
176 1.2.6.2 yamt int i, ncpu, per, slop, cpuno;
177 1.2.6.2 yamt CPU_INFO_ITERATOR cii;
178 1.2.6.2 yamt struct cpu_info *ci;
179 1.2.6.2 yamt lscpu_t *lc;
180 1.2.6.2 yamt lsbuf_t *lb;
181 1.2.6.2 yamt
182 1.2.6.2 yamt KASSERT(!lockstat_enabled);
183 1.2.6.2 yamt
184 1.2.6.2 yamt ncpu = 0;
185 1.2.6.2 yamt for (CPU_INFO_FOREACH(cii, ci)) {
186 1.2.6.2 yamt if (ci->ci_lockstat != NULL) {
187 1.2.6.2 yamt free(ci->ci_lockstat, M_LOCKSTAT);
188 1.2.6.2 yamt ci->ci_lockstat = NULL;
189 1.2.6.2 yamt }
190 1.2.6.2 yamt ncpu++;
191 1.2.6.2 yamt }
192 1.2.6.2 yamt
193 1.2.6.2 yamt if (le == NULL)
194 1.2.6.2 yamt return;
195 1.2.6.2 yamt
196 1.2.6.2 yamt lb = lockstat_baseb;
197 1.2.6.2 yamt per = le->le_nbufs / ncpu;
198 1.2.6.2 yamt slop = le->le_nbufs - (per * ncpu);
199 1.2.6.2 yamt cpuno = 0;
200 1.2.6.2 yamt for (CPU_INFO_FOREACH(cii, ci)) {
201 1.2.6.2 yamt lc = malloc(sizeof(*lc), M_LOCKSTAT, M_WAITOK);
202 1.2.6.2 yamt lc->lc_overflow = 0;
203 1.2.6.2 yamt ci->ci_lockstat = lc;
204 1.2.6.2 yamt
205 1.2.6.2 yamt SLIST_INIT(&lc->lc_free);
206 1.2.6.2 yamt for (i = 0; i < LOCKSTAT_HASH_SIZE; i++)
207 1.2.6.2 yamt LIST_INIT(&lc->lc_hash[i]);
208 1.2.6.2 yamt
209 1.2.6.2 yamt for (i = per; i != 0; i--, lb++) {
210 1.2.6.2 yamt lb->lb_cpu = (uint16_t)cpuno;
211 1.2.6.2 yamt SLIST_INSERT_HEAD(&lc->lc_free, lb, lb_chain.slist);
212 1.2.6.2 yamt }
213 1.2.6.2 yamt if (--slop > 0) {
214 1.2.6.2 yamt lb->lb_cpu = (uint16_t)cpuno;
215 1.2.6.2 yamt SLIST_INSERT_HEAD(&lc->lc_free, lb, lb_chain.slist);
216 1.2.6.2 yamt lb++;
217 1.2.6.2 yamt }
218 1.2.6.2 yamt cpuno++;
219 1.2.6.2 yamt }
220 1.2.6.2 yamt }
221 1.2.6.2 yamt
222 1.2.6.2 yamt /*
223 1.2.6.2 yamt * Start collecting lock statistics.
224 1.2.6.2 yamt */
225 1.2.6.2 yamt void
226 1.2.6.2 yamt lockstat_start(lsenable_t *le)
227 1.2.6.2 yamt {
228 1.2.6.2 yamt
229 1.2.6.2 yamt KASSERT(!lockstat_enabled);
230 1.2.6.2 yamt
231 1.2.6.2 yamt lockstat_init_tables(le);
232 1.2.6.2 yamt
233 1.2.6.2 yamt if ((le->le_flags & LE_CALLSITE) != 0)
234 1.2.6.2 yamt lockstat_csmask = (uintptr_t)-1LL;
235 1.2.6.2 yamt else
236 1.2.6.2 yamt lockstat_csmask = 0;
237 1.2.6.2 yamt
238 1.2.6.2 yamt lockstat_csstart = le->le_csstart;
239 1.2.6.2 yamt lockstat_csend = le->le_csend;
240 1.2.6.2 yamt lockstat_lockaddr = le->le_lock;
241 1.2.6.2 yamt
242 1.2.6.2 yamt /*
243 1.2.6.2 yamt * Force a write barrier. XXX This may not be sufficient..
244 1.2.6.2 yamt */
245 1.2.6.2 yamt lockstat_unlock(0);
246 1.2.6.2 yamt tsleep(&lockstat_start, PPAUSE, "lockstat", mstohz(10));
247 1.2.6.2 yamt (void)lockstat_lock(0);
248 1.2.6.2 yamt
249 1.2.6.2 yamt getnanotime(&lockstat_stime);
250 1.2.6.2 yamt lockstat_enabled = le->le_mask;
251 1.2.6.2 yamt lockstat_unlock(0);
252 1.2.6.2 yamt (void)lockstat_lock(0);
253 1.2.6.2 yamt }
254 1.2.6.2 yamt
255 1.2.6.2 yamt /*
256 1.2.6.2 yamt * Stop collecting lock statistics.
257 1.2.6.2 yamt */
258 1.2.6.2 yamt int
259 1.2.6.2 yamt lockstat_stop(lsdisable_t *ld)
260 1.2.6.2 yamt {
261 1.2.6.2 yamt CPU_INFO_ITERATOR cii;
262 1.2.6.2 yamt struct cpu_info *ci;
263 1.2.6.2 yamt u_int cpuno, overflow;
264 1.2.6.2 yamt struct timespec ts;
265 1.2.6.2 yamt int error;
266 1.2.6.2 yamt
267 1.2.6.2 yamt KASSERT(lockstat_enabled);
268 1.2.6.2 yamt
269 1.2.6.2 yamt /*
270 1.2.6.2 yamt * Set enabled false, force a write barrier, and wait for other CPUs
271 1.2.6.2 yamt * to exit lockstat_event(). XXX This may not be sufficient..
272 1.2.6.2 yamt */
273 1.2.6.2 yamt lockstat_enabled = 0;
274 1.2.6.2 yamt lockstat_unlock(0);
275 1.2.6.2 yamt getnanotime(&ts);
276 1.2.6.2 yamt tsleep(&lockstat_stop, PPAUSE, "lockstat", mstohz(10));
277 1.2.6.2 yamt (void)lockstat_lock(0);
278 1.2.6.2 yamt
279 1.2.6.2 yamt /*
280 1.2.6.2 yamt * Did we run out of buffers while tracing?
281 1.2.6.2 yamt */
282 1.2.6.2 yamt overflow = 0;
283 1.2.6.2 yamt for (CPU_INFO_FOREACH(cii, ci))
284 1.2.6.2 yamt overflow += ((lscpu_t *)ci->ci_lockstat)->lc_overflow;
285 1.2.6.2 yamt
286 1.2.6.2 yamt if (overflow != 0) {
287 1.2.6.2 yamt error = EOVERFLOW;
288 1.2.6.2 yamt log(LOG_NOTICE, "lockstat: %d buffer allocations failed\n",
289 1.2.6.2 yamt overflow);
290 1.2.6.2 yamt } else
291 1.2.6.2 yamt error = 0;
292 1.2.6.2 yamt
293 1.2.6.2 yamt lockstat_init_tables(NULL);
294 1.2.6.2 yamt
295 1.2.6.2 yamt if (ld == NULL)
296 1.2.6.2 yamt return (error);
297 1.2.6.2 yamt
298 1.2.6.2 yamt /*
299 1.2.6.2 yamt * Fill out the disable struct for the caller.
300 1.2.6.2 yamt */
301 1.2.6.2 yamt timespecsub(&ts, &lockstat_stime, &ld->ld_time);
302 1.2.6.2 yamt ld->ld_size = lockstat_sizeb;
303 1.2.6.2 yamt
304 1.2.6.2 yamt cpuno = 0;
305 1.2.6.2 yamt for (CPU_INFO_FOREACH(cii, ci)) {
306 1.2.6.2 yamt if (cpuno > sizeof(ld->ld_freq) / sizeof(ld->ld_freq[0])) {
307 1.2.6.2 yamt log(LOG_WARNING, "lockstat: too many CPUs\n");
308 1.2.6.2 yamt break;
309 1.2.6.2 yamt }
310 1.2.6.2 yamt ld->ld_freq[cpuno++] = cpu_frequency(ci);
311 1.2.6.2 yamt }
312 1.2.6.2 yamt
313 1.2.6.2 yamt return (error);
314 1.2.6.2 yamt }
315 1.2.6.2 yamt
316 1.2.6.2 yamt /*
317 1.2.6.2 yamt * Allocate buffers for lockstat_start().
318 1.2.6.2 yamt */
319 1.2.6.2 yamt int
320 1.2.6.2 yamt lockstat_alloc(lsenable_t *le)
321 1.2.6.2 yamt {
322 1.2.6.2 yamt lsbuf_t *lb;
323 1.2.6.2 yamt size_t sz;
324 1.2.6.2 yamt
325 1.2.6.2 yamt KASSERT(!lockstat_enabled);
326 1.2.6.2 yamt lockstat_free();
327 1.2.6.2 yamt
328 1.2.6.2 yamt sz = sizeof(*lb) * le->le_nbufs;
329 1.2.6.2 yamt
330 1.2.6.2 yamt lockstat_unlock(0);
331 1.2.6.2 yamt lb = malloc(sz, M_LOCKSTAT, M_WAITOK | M_ZERO);
332 1.2.6.2 yamt (void)lockstat_lock(0);
333 1.2.6.2 yamt
334 1.2.6.2 yamt if (lb == NULL)
335 1.2.6.2 yamt return (ENOMEM);
336 1.2.6.2 yamt
337 1.2.6.2 yamt KASSERT(!lockstat_enabled);
338 1.2.6.2 yamt KASSERT(lockstat_baseb == NULL);
339 1.2.6.2 yamt lockstat_sizeb = sz;
340 1.2.6.2 yamt lockstat_baseb = lb;
341 1.2.6.2 yamt
342 1.2.6.2 yamt return (0);
343 1.2.6.2 yamt }
344 1.2.6.2 yamt
345 1.2.6.2 yamt /*
346 1.2.6.2 yamt * Free allocated buffers after tracing has stopped.
347 1.2.6.2 yamt */
348 1.2.6.2 yamt void
349 1.2.6.2 yamt lockstat_free(void)
350 1.2.6.2 yamt {
351 1.2.6.2 yamt
352 1.2.6.2 yamt KASSERT(!lockstat_enabled);
353 1.2.6.2 yamt
354 1.2.6.2 yamt if (lockstat_baseb != NULL) {
355 1.2.6.2 yamt free(lockstat_baseb, M_LOCKSTAT);
356 1.2.6.2 yamt lockstat_baseb = NULL;
357 1.2.6.2 yamt }
358 1.2.6.2 yamt }
359 1.2.6.2 yamt
360 1.2.6.2 yamt /*
361 1.2.6.2 yamt * Main entry point from lock primatives.
362 1.2.6.2 yamt */
363 1.2.6.2 yamt void
364 1.2.6.2 yamt lockstat_event(uintptr_t lock, uintptr_t callsite, u_int flags, u_int count,
365 1.2.6.2 yamt uint64_t time)
366 1.2.6.2 yamt {
367 1.2.6.2 yamt lslist_t *ll;
368 1.2.6.2 yamt lscpu_t *lc;
369 1.2.6.2 yamt lsbuf_t *lb;
370 1.2.6.2 yamt u_int event;
371 1.2.6.2 yamt int s;
372 1.2.6.2 yamt
373 1.2.6.2 yamt if ((flags & lockstat_enabled) != flags || count == 0)
374 1.2.6.2 yamt return;
375 1.2.6.2 yamt if (lockstat_lockaddr != 0 && lock != lockstat_lockaddr)
376 1.2.6.2 yamt return;
377 1.2.6.2 yamt if (callsite < lockstat_csstart || callsite > lockstat_csend)
378 1.2.6.2 yamt return;
379 1.2.6.2 yamt
380 1.2.6.2 yamt callsite &= lockstat_csmask;
381 1.2.6.2 yamt
382 1.2.6.2 yamt /*
383 1.2.6.2 yamt * Find the table for this lock+callsite pair, and try to locate a
384 1.2.6.2 yamt * buffer with the same key.
385 1.2.6.2 yamt */
386 1.2.6.2 yamt lc = curcpu()->ci_lockstat;
387 1.2.6.2 yamt ll = &lc->lc_hash[LOCKSTAT_HASH(lock ^ callsite)];
388 1.2.6.2 yamt event = (flags & LB_EVENT_MASK) - 1;
389 1.2.6.2 yamt s = spllock();
390 1.2.6.2 yamt
391 1.2.6.2 yamt LIST_FOREACH(lb, ll, lb_chain.list) {
392 1.2.6.2 yamt if (lb->lb_lock == lock && lb->lb_callsite == callsite)
393 1.2.6.2 yamt break;
394 1.2.6.2 yamt }
395 1.2.6.2 yamt
396 1.2.6.2 yamt if (lb != NULL) {
397 1.2.6.2 yamt /*
398 1.2.6.2 yamt * We found a record. Move it to the front of the list, as
399 1.2.6.2 yamt * we're likely to hit it again soon.
400 1.2.6.2 yamt */
401 1.2.6.2 yamt if (lb != LIST_FIRST(ll)) {
402 1.2.6.2 yamt LIST_REMOVE(lb, lb_chain.list);
403 1.2.6.2 yamt LIST_INSERT_HEAD(ll, lb, lb_chain.list);
404 1.2.6.2 yamt }
405 1.2.6.2 yamt lb->lb_counts[event] += count;
406 1.2.6.2 yamt lb->lb_times[event] += time;
407 1.2.6.2 yamt } else if ((lb = SLIST_FIRST(&lc->lc_free)) != NULL) {
408 1.2.6.2 yamt /*
409 1.2.6.2 yamt * Pinch a new buffer and fill it out.
410 1.2.6.2 yamt */
411 1.2.6.2 yamt SLIST_REMOVE_HEAD(&lc->lc_free, lb_chain.slist);
412 1.2.6.2 yamt LIST_INSERT_HEAD(ll, lb, lb_chain.list);
413 1.2.6.2 yamt lb->lb_flags = (uint16_t)flags;
414 1.2.6.2 yamt lb->lb_lock = lock;
415 1.2.6.2 yamt lb->lb_callsite = callsite;
416 1.2.6.2 yamt lb->lb_counts[event] = count;
417 1.2.6.2 yamt lb->lb_times[event] = time;
418 1.2.6.2 yamt } else {
419 1.2.6.2 yamt /*
420 1.2.6.2 yamt * We didn't find a buffer and there were none free.
421 1.2.6.2 yamt * lockstat_stop() will notice later on and report the
422 1.2.6.2 yamt * error.
423 1.2.6.2 yamt */
424 1.2.6.2 yamt lc->lc_overflow++;
425 1.2.6.2 yamt }
426 1.2.6.2 yamt
427 1.2.6.2 yamt splx(s);
428 1.2.6.2 yamt }
429 1.2.6.2 yamt
430 1.2.6.2 yamt /*
431 1.2.6.2 yamt * Accept an open() on /dev/lockstat.
432 1.2.6.2 yamt */
433 1.2.6.2 yamt int
434 1.2.6.2 yamt lockstat_open(dev_t dev, int flag, int mode, struct lwp *l)
435 1.2.6.2 yamt {
436 1.2.6.2 yamt int error;
437 1.2.6.2 yamt
438 1.2.6.2 yamt if ((error = lockstat_lock(1)) != 0)
439 1.2.6.2 yamt return error;
440 1.2.6.2 yamt
441 1.2.6.2 yamt if (lockstat_devopen)
442 1.2.6.2 yamt error = EBUSY;
443 1.2.6.2 yamt else {
444 1.2.6.2 yamt lockstat_devopen = 1;
445 1.2.6.2 yamt error = 0;
446 1.2.6.2 yamt }
447 1.2.6.2 yamt
448 1.2.6.2 yamt lockstat_unlock(1);
449 1.2.6.2 yamt
450 1.2.6.2 yamt return error;
451 1.2.6.2 yamt }
452 1.2.6.2 yamt
453 1.2.6.2 yamt /*
454 1.2.6.2 yamt * Accept the last close() on /dev/lockstat.
455 1.2.6.2 yamt */
456 1.2.6.2 yamt int
457 1.2.6.2 yamt lockstat_close(dev_t dev, int flag, int mode, struct lwp *l)
458 1.2.6.2 yamt {
459 1.2.6.2 yamt int error;
460 1.2.6.2 yamt
461 1.2.6.2 yamt if ((error = lockstat_lock(1)) == 0) {
462 1.2.6.2 yamt if (lockstat_enabled)
463 1.2.6.2 yamt (void)lockstat_stop(NULL);
464 1.2.6.2 yamt lockstat_free();
465 1.2.6.2 yamt lockstat_devopen = 0;
466 1.2.6.2 yamt lockstat_unlock(1);
467 1.2.6.2 yamt }
468 1.2.6.2 yamt
469 1.2.6.2 yamt return error;
470 1.2.6.2 yamt }
471 1.2.6.2 yamt
472 1.2.6.2 yamt /*
473 1.2.6.2 yamt * Handle control operations.
474 1.2.6.2 yamt */
475 1.2.6.2 yamt int
476 1.2.6.2 yamt lockstat_ioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
477 1.2.6.2 yamt {
478 1.2.6.2 yamt lsenable_t *le;
479 1.2.6.2 yamt int error;
480 1.2.6.2 yamt
481 1.2.6.2 yamt if ((error = lockstat_lock(1)) != 0)
482 1.2.6.2 yamt return error;
483 1.2.6.2 yamt
484 1.2.6.2 yamt switch (cmd) {
485 1.2.6.2 yamt case IOC_LOCKSTAT_GVERSION:
486 1.2.6.2 yamt *(int *)data = LS_VERSION;
487 1.2.6.2 yamt error = 0;
488 1.2.6.2 yamt break;
489 1.2.6.2 yamt
490 1.2.6.2 yamt case IOC_LOCKSTAT_ENABLE:
491 1.2.6.2 yamt le = (lsenable_t *)data;
492 1.2.6.2 yamt
493 1.2.6.2 yamt if (!cpu_hascounter()) {
494 1.2.6.2 yamt error = ENODEV;
495 1.2.6.2 yamt break;
496 1.2.6.2 yamt }
497 1.2.6.2 yamt if (lockstat_enabled) {
498 1.2.6.2 yamt error = EBUSY;
499 1.2.6.2 yamt break;
500 1.2.6.2 yamt }
501 1.2.6.2 yamt
502 1.2.6.2 yamt /*
503 1.2.6.2 yamt * Sanitize the arguments passed in and set up filtering.
504 1.2.6.2 yamt */
505 1.2.6.2 yamt if (le->le_nbufs == 0)
506 1.2.6.2 yamt le->le_nbufs = LOCKSTAT_DEFBUFS;
507 1.2.6.2 yamt else if (le->le_nbufs > LOCKSTAT_MAXBUFS ||
508 1.2.6.2 yamt le->le_nbufs < LOCKSTAT_MINBUFS) {
509 1.2.6.2 yamt error = EINVAL;
510 1.2.6.2 yamt break;
511 1.2.6.2 yamt }
512 1.2.6.2 yamt if ((le->le_flags & LE_ONE_CALLSITE) == 0) {
513 1.2.6.2 yamt le->le_csstart = 0;
514 1.2.6.2 yamt le->le_csend = le->le_csstart - 1;
515 1.2.6.2 yamt }
516 1.2.6.2 yamt if ((le->le_flags & LE_ONE_LOCK) == 0)
517 1.2.6.2 yamt le->le_lock = 0;
518 1.2.6.2 yamt if ((le->le_mask & LB_EVENT_MASK) == 0)
519 1.2.6.2 yamt return (EINVAL);
520 1.2.6.2 yamt if ((le->le_mask & LB_LOCK_MASK) == 0)
521 1.2.6.2 yamt return (EINVAL);
522 1.2.6.2 yamt
523 1.2.6.2 yamt /*
524 1.2.6.2 yamt * Start tracing.
525 1.2.6.2 yamt */
526 1.2.6.2 yamt if ((error = lockstat_alloc(le)) == 0)
527 1.2.6.2 yamt lockstat_start(le);
528 1.2.6.2 yamt break;
529 1.2.6.2 yamt
530 1.2.6.2 yamt case IOC_LOCKSTAT_DISABLE:
531 1.2.6.2 yamt if (!lockstat_enabled)
532 1.2.6.2 yamt error = EINVAL;
533 1.2.6.2 yamt else
534 1.2.6.2 yamt error = lockstat_stop((lsdisable_t *)data);
535 1.2.6.2 yamt break;
536 1.2.6.2 yamt
537 1.2.6.2 yamt default:
538 1.2.6.2 yamt error = ENOTTY;
539 1.2.6.2 yamt break;
540 1.2.6.2 yamt }
541 1.2.6.2 yamt
542 1.2.6.2 yamt lockstat_unlock(1);
543 1.2.6.2 yamt return error;
544 1.2.6.2 yamt }
545 1.2.6.2 yamt
546 1.2.6.2 yamt /*
547 1.2.6.2 yamt * Copy buffers out to user-space.
548 1.2.6.2 yamt */
549 1.2.6.2 yamt int
550 1.2.6.2 yamt lockstat_read(dev_t dev, struct uio *uio, int flag)
551 1.2.6.2 yamt {
552 1.2.6.2 yamt int error;
553 1.2.6.2 yamt
554 1.2.6.2 yamt if ((error = lockstat_lock(1)) != 0)
555 1.2.6.2 yamt return (error);
556 1.2.6.2 yamt
557 1.2.6.2 yamt if (lockstat_enabled) {
558 1.2.6.2 yamt lockstat_unlock(1);
559 1.2.6.2 yamt return (EBUSY);
560 1.2.6.2 yamt }
561 1.2.6.2 yamt
562 1.2.6.2 yamt lockstat_unlock(0);
563 1.2.6.2 yamt error = uiomove(lockstat_baseb, lockstat_sizeb, uio);
564 1.2.6.2 yamt lockstat_lock(0);
565 1.2.6.2 yamt
566 1.2.6.2 yamt lockstat_unlock(1);
567 1.2.6.2 yamt
568 1.2.6.2 yamt return (error);
569 1.2.6.2 yamt }
570