lockstat.c revision 1.27 1 1.27 ad /* $NetBSD: lockstat.c,v 1.27 2020/05/23 23:42:42 ad Exp $ */
2 1.1 ad
3 1.1 ad /*-
4 1.26 ad * Copyright (c) 2006, 2007, 2019 The NetBSD Foundation, Inc.
5 1.1 ad * All rights reserved.
6 1.1 ad *
7 1.1 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.1 ad * by Andrew Doran.
9 1.1 ad *
10 1.1 ad * Redistribution and use in source and binary forms, with or without
11 1.1 ad * modification, are permitted provided that the following conditions
12 1.1 ad * are met:
13 1.1 ad * 1. Redistributions of source code must retain the above copyright
14 1.1 ad * notice, this list of conditions and the following disclaimer.
15 1.1 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 ad * notice, this list of conditions and the following disclaimer in the
17 1.1 ad * documentation and/or other materials provided with the distribution.
18 1.1 ad *
19 1.1 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 ad * POSSIBILITY OF SUCH DAMAGE.
30 1.1 ad */
31 1.1 ad
32 1.1 ad /*
33 1.1 ad * Lock statistics driver, providing kernel support for the lockstat(8)
34 1.1 ad * command.
35 1.5 ad *
36 1.11 ad * We use a global lock word (lockstat_lock) to track device opens.
37 1.11 ad * Only one thread can hold the device at a time, providing a global lock.
38 1.11 ad *
39 1.5 ad * XXX Timings for contention on sleep locks are currently incorrect.
40 1.26 ad * XXX Convert this to use timecounters!
41 1.1 ad */
42 1.1 ad
43 1.1 ad #include <sys/cdefs.h>
44 1.27 ad __KERNEL_RCSID(0, "$NetBSD: lockstat.c,v 1.27 2020/05/23 23:42:42 ad Exp $");
45 1.1 ad
46 1.1 ad #include <sys/types.h>
47 1.1 ad #include <sys/param.h>
48 1.1 ad #include <sys/proc.h>
49 1.1 ad #include <sys/resourcevar.h>
50 1.1 ad #include <sys/systm.h>
51 1.1 ad #include <sys/kernel.h>
52 1.11 ad #include <sys/kmem.h>
53 1.1 ad #include <sys/conf.h>
54 1.23 matt #include <sys/cpu.h>
55 1.1 ad #include <sys/syslog.h>
56 1.12 ad #include <sys/atomic.h>
57 1.1 ad
58 1.1 ad #include <dev/lockstat.h>
59 1.1 ad
60 1.13 ad #include <machine/lock.h>
61 1.13 ad
62 1.24 christos #include "ioconf.h"
63 1.24 christos
64 1.1 ad #ifndef __HAVE_CPU_COUNTER
65 1.1 ad #error CPU counters not available
66 1.1 ad #endif
67 1.1 ad
68 1.1 ad #if LONG_BIT == 64
69 1.1 ad #define LOCKSTAT_HASH_SHIFT 3
70 1.1 ad #elif LONG_BIT == 32
71 1.1 ad #define LOCKSTAT_HASH_SHIFT 2
72 1.1 ad #endif
73 1.1 ad
74 1.10 ad #define LOCKSTAT_MINBUFS 1000
75 1.26 ad #define LOCKSTAT_DEFBUFS 20000
76 1.18 chs #define LOCKSTAT_MAXBUFS 1000000
77 1.1 ad
78 1.11 ad #define LOCKSTAT_HASH_SIZE 128
79 1.1 ad #define LOCKSTAT_HASH_MASK (LOCKSTAT_HASH_SIZE - 1)
80 1.1 ad #define LOCKSTAT_HASH(key) \
81 1.1 ad ((key >> LOCKSTAT_HASH_SHIFT) & LOCKSTAT_HASH_MASK)
82 1.1 ad
83 1.1 ad typedef struct lscpu {
84 1.1 ad SLIST_HEAD(, lsbuf) lc_free;
85 1.1 ad u_int lc_overflow;
86 1.1 ad LIST_HEAD(lslist, lsbuf) lc_hash[LOCKSTAT_HASH_SIZE];
87 1.1 ad } lscpu_t;
88 1.1 ad
89 1.1 ad typedef struct lslist lslist_t;
90 1.1 ad
91 1.1 ad void lockstat_start(lsenable_t *);
92 1.1 ad int lockstat_alloc(lsenable_t *);
93 1.1 ad void lockstat_init_tables(lsenable_t *);
94 1.1 ad int lockstat_stop(lsdisable_t *);
95 1.1 ad void lockstat_free(void);
96 1.1 ad
97 1.1 ad dev_type_open(lockstat_open);
98 1.1 ad dev_type_close(lockstat_close);
99 1.1 ad dev_type_read(lockstat_read);
100 1.1 ad dev_type_ioctl(lockstat_ioctl);
101 1.1 ad
102 1.1 ad volatile u_int lockstat_enabled;
103 1.21 christos volatile u_int lockstat_dev_enabled;
104 1.1 ad uintptr_t lockstat_csstart;
105 1.1 ad uintptr_t lockstat_csend;
106 1.1 ad uintptr_t lockstat_csmask;
107 1.10 ad uintptr_t lockstat_lamask;
108 1.5 ad uintptr_t lockstat_lockstart;
109 1.5 ad uintptr_t lockstat_lockend;
110 1.11 ad __cpu_simple_lock_t lockstat_lock;
111 1.11 ad lwp_t *lockstat_lwp;
112 1.1 ad lsbuf_t *lockstat_baseb;
113 1.1 ad size_t lockstat_sizeb;
114 1.1 ad int lockstat_busy;
115 1.1 ad struct timespec lockstat_stime;
116 1.1 ad
117 1.20 christos #ifdef KDTRACE_HOOKS
118 1.21 christos volatile u_int lockstat_dtrace_enabled;
119 1.20 christos CTASSERT(LB_NEVENT <= 3);
120 1.20 christos CTASSERT(LB_NLOCK <= (7 << LB_LOCK_SHIFT));
121 1.20 christos void
122 1.20 christos lockstat_probe_stub(uint32_t id, uintptr_t lock, uintptr_t callsite,
123 1.20 christos uintptr_t flags, uintptr_t count, uintptr_t cycles)
124 1.20 christos {
125 1.20 christos }
126 1.20 christos
127 1.20 christos uint32_t lockstat_probemap[LS_NPROBES];
128 1.20 christos void (*lockstat_probe_func)(uint32_t, uintptr_t, uintptr_t,
129 1.20 christos uintptr_t, uintptr_t, uintptr_t) = &lockstat_probe_stub;
130 1.20 christos #endif
131 1.20 christos
132 1.1 ad const struct cdevsw lockstat_cdevsw = {
133 1.17 dholland .d_open = lockstat_open,
134 1.17 dholland .d_close = lockstat_close,
135 1.17 dholland .d_read = lockstat_read,
136 1.17 dholland .d_write = nowrite,
137 1.17 dholland .d_ioctl = lockstat_ioctl,
138 1.17 dholland .d_stop = nostop,
139 1.17 dholland .d_tty = notty,
140 1.17 dholland .d_poll = nopoll,
141 1.17 dholland .d_mmap = nommap,
142 1.17 dholland .d_kqfilter = nokqfilter,
143 1.19 dholland .d_discard = nodiscard,
144 1.17 dholland .d_flag = D_OTHER | D_MPSAFE
145 1.1 ad };
146 1.1 ad
147 1.1 ad /*
148 1.1 ad * Called when the pseudo-driver is attached.
149 1.1 ad */
150 1.1 ad void
151 1.1 ad lockstatattach(int nunits)
152 1.1 ad {
153 1.1 ad
154 1.1 ad (void)nunits;
155 1.1 ad
156 1.11 ad __cpu_simple_lock_init(&lockstat_lock);
157 1.1 ad }
158 1.1 ad
159 1.1 ad /*
160 1.1 ad * Prepare the per-CPU tables for use, or clear down tables when tracing is
161 1.1 ad * stopped.
162 1.1 ad */
163 1.1 ad void
164 1.1 ad lockstat_init_tables(lsenable_t *le)
165 1.1 ad {
166 1.7 ad int i, per, slop, cpuno;
167 1.1 ad CPU_INFO_ITERATOR cii;
168 1.1 ad struct cpu_info *ci;
169 1.1 ad lscpu_t *lc;
170 1.1 ad lsbuf_t *lb;
171 1.1 ad
172 1.22 christos /* coverity[assert_side_effect] */
173 1.21 christos KASSERT(!lockstat_dev_enabled);
174 1.1 ad
175 1.1 ad for (CPU_INFO_FOREACH(cii, ci)) {
176 1.1 ad if (ci->ci_lockstat != NULL) {
177 1.11 ad kmem_free(ci->ci_lockstat, sizeof(lscpu_t));
178 1.1 ad ci->ci_lockstat = NULL;
179 1.1 ad }
180 1.1 ad }
181 1.1 ad
182 1.1 ad if (le == NULL)
183 1.1 ad return;
184 1.1 ad
185 1.1 ad lb = lockstat_baseb;
186 1.1 ad per = le->le_nbufs / ncpu;
187 1.1 ad slop = le->le_nbufs - (per * ncpu);
188 1.1 ad cpuno = 0;
189 1.1 ad for (CPU_INFO_FOREACH(cii, ci)) {
190 1.11 ad lc = kmem_alloc(sizeof(*lc), KM_SLEEP);
191 1.1 ad lc->lc_overflow = 0;
192 1.1 ad ci->ci_lockstat = lc;
193 1.1 ad
194 1.1 ad SLIST_INIT(&lc->lc_free);
195 1.1 ad for (i = 0; i < LOCKSTAT_HASH_SIZE; i++)
196 1.1 ad LIST_INIT(&lc->lc_hash[i]);
197 1.1 ad
198 1.1 ad for (i = per; i != 0; i--, lb++) {
199 1.1 ad lb->lb_cpu = (uint16_t)cpuno;
200 1.1 ad SLIST_INSERT_HEAD(&lc->lc_free, lb, lb_chain.slist);
201 1.1 ad }
202 1.1 ad if (--slop > 0) {
203 1.1 ad lb->lb_cpu = (uint16_t)cpuno;
204 1.1 ad SLIST_INSERT_HEAD(&lc->lc_free, lb, lb_chain.slist);
205 1.1 ad lb++;
206 1.1 ad }
207 1.1 ad cpuno++;
208 1.1 ad }
209 1.1 ad }
210 1.1 ad
211 1.1 ad /*
212 1.1 ad * Start collecting lock statistics.
213 1.1 ad */
214 1.1 ad void
215 1.1 ad lockstat_start(lsenable_t *le)
216 1.1 ad {
217 1.1 ad
218 1.22 christos /* coverity[assert_side_effect] */
219 1.21 christos KASSERT(!lockstat_dev_enabled);
220 1.1 ad
221 1.1 ad lockstat_init_tables(le);
222 1.1 ad
223 1.1 ad if ((le->le_flags & LE_CALLSITE) != 0)
224 1.1 ad lockstat_csmask = (uintptr_t)-1LL;
225 1.1 ad else
226 1.1 ad lockstat_csmask = 0;
227 1.1 ad
228 1.10 ad if ((le->le_flags & LE_LOCK) != 0)
229 1.10 ad lockstat_lamask = (uintptr_t)-1LL;
230 1.10 ad else
231 1.10 ad lockstat_lamask = 0;
232 1.10 ad
233 1.1 ad lockstat_csstart = le->le_csstart;
234 1.1 ad lockstat_csend = le->le_csend;
235 1.5 ad lockstat_lockstart = le->le_lockstart;
236 1.6 ad lockstat_lockstart = le->le_lockstart;
237 1.5 ad lockstat_lockend = le->le_lockend;
238 1.12 ad membar_sync();
239 1.1 ad getnanotime(&lockstat_stime);
240 1.21 christos lockstat_dev_enabled = le->le_mask;
241 1.21 christos LOCKSTAT_ENABLED_UPDATE();
242 1.1 ad }
243 1.1 ad
244 1.1 ad /*
245 1.1 ad * Stop collecting lock statistics.
246 1.1 ad */
247 1.1 ad int
248 1.1 ad lockstat_stop(lsdisable_t *ld)
249 1.1 ad {
250 1.1 ad CPU_INFO_ITERATOR cii;
251 1.1 ad struct cpu_info *ci;
252 1.1 ad u_int cpuno, overflow;
253 1.1 ad struct timespec ts;
254 1.1 ad int error;
255 1.14 ad lwp_t *l;
256 1.1 ad
257 1.22 christos /* coverity[assert_side_effect] */
258 1.21 christos KASSERT(lockstat_dev_enabled);
259 1.1 ad
260 1.1 ad /*
261 1.1 ad * Set enabled false, force a write barrier, and wait for other CPUs
262 1.5 ad * to exit lockstat_event().
263 1.1 ad */
264 1.21 christos lockstat_dev_enabled = 0;
265 1.21 christos LOCKSTAT_ENABLED_UPDATE();
266 1.1 ad getnanotime(&ts);
267 1.1 ad tsleep(&lockstat_stop, PPAUSE, "lockstat", mstohz(10));
268 1.1 ad
269 1.1 ad /*
270 1.1 ad * Did we run out of buffers while tracing?
271 1.1 ad */
272 1.1 ad overflow = 0;
273 1.1 ad for (CPU_INFO_FOREACH(cii, ci))
274 1.1 ad overflow += ((lscpu_t *)ci->ci_lockstat)->lc_overflow;
275 1.1 ad
276 1.1 ad if (overflow != 0) {
277 1.1 ad error = EOVERFLOW;
278 1.1 ad log(LOG_NOTICE, "lockstat: %d buffer allocations failed\n",
279 1.1 ad overflow);
280 1.1 ad } else
281 1.1 ad error = 0;
282 1.1 ad
283 1.1 ad lockstat_init_tables(NULL);
284 1.1 ad
285 1.14 ad /* Run through all LWPs and clear the slate for the next run. */
286 1.27 ad mutex_enter(&proc_lock);
287 1.14 ad LIST_FOREACH(l, &alllwp, l_list) {
288 1.14 ad l->l_pfailaddr = 0;
289 1.14 ad l->l_pfailtime = 0;
290 1.14 ad l->l_pfaillock = 0;
291 1.14 ad }
292 1.27 ad mutex_exit(&proc_lock);
293 1.14 ad
294 1.1 ad if (ld == NULL)
295 1.11 ad return error;
296 1.1 ad
297 1.1 ad /*
298 1.1 ad * Fill out the disable struct for the caller.
299 1.1 ad */
300 1.1 ad timespecsub(&ts, &lockstat_stime, &ld->ld_time);
301 1.1 ad ld->ld_size = lockstat_sizeb;
302 1.1 ad
303 1.1 ad cpuno = 0;
304 1.1 ad for (CPU_INFO_FOREACH(cii, ci)) {
305 1.16 msaitoh if (cpuno >= sizeof(ld->ld_freq) / sizeof(ld->ld_freq[0])) {
306 1.1 ad log(LOG_WARNING, "lockstat: too many CPUs\n");
307 1.1 ad break;
308 1.1 ad }
309 1.1 ad ld->ld_freq[cpuno++] = cpu_frequency(ci);
310 1.1 ad }
311 1.1 ad
312 1.11 ad return error;
313 1.1 ad }
314 1.1 ad
315 1.1 ad /*
316 1.1 ad * Allocate buffers for lockstat_start().
317 1.1 ad */
318 1.1 ad int
319 1.1 ad lockstat_alloc(lsenable_t *le)
320 1.1 ad {
321 1.1 ad lsbuf_t *lb;
322 1.1 ad size_t sz;
323 1.1 ad
324 1.22 christos /* coverity[assert_side_effect] */
325 1.21 christos KASSERT(!lockstat_dev_enabled);
326 1.1 ad lockstat_free();
327 1.1 ad
328 1.1 ad sz = sizeof(*lb) * le->le_nbufs;
329 1.1 ad
330 1.11 ad lb = kmem_zalloc(sz, KM_SLEEP);
331 1.1 ad
332 1.22 christos /* coverity[assert_side_effect] */
333 1.21 christos KASSERT(!lockstat_dev_enabled);
334 1.1 ad KASSERT(lockstat_baseb == NULL);
335 1.1 ad lockstat_sizeb = sz;
336 1.1 ad lockstat_baseb = lb;
337 1.1 ad
338 1.1 ad return (0);
339 1.1 ad }
340 1.1 ad
341 1.1 ad /*
342 1.1 ad * Free allocated buffers after tracing has stopped.
343 1.1 ad */
344 1.1 ad void
345 1.1 ad lockstat_free(void)
346 1.1 ad {
347 1.1 ad
348 1.22 christos /* coverity[assert_side_effect] */
349 1.21 christos KASSERT(!lockstat_dev_enabled);
350 1.1 ad
351 1.1 ad if (lockstat_baseb != NULL) {
352 1.11 ad kmem_free(lockstat_baseb, lockstat_sizeb);
353 1.1 ad lockstat_baseb = NULL;
354 1.1 ad }
355 1.1 ad }
356 1.1 ad
357 1.1 ad /*
358 1.1 ad * Main entry point from lock primatives.
359 1.1 ad */
360 1.1 ad void
361 1.1 ad lockstat_event(uintptr_t lock, uintptr_t callsite, u_int flags, u_int count,
362 1.6 ad uint64_t cycles)
363 1.1 ad {
364 1.1 ad lslist_t *ll;
365 1.1 ad lscpu_t *lc;
366 1.1 ad lsbuf_t *lb;
367 1.1 ad u_int event;
368 1.1 ad int s;
369 1.1 ad
370 1.20 christos #ifdef KDTRACE_HOOKS
371 1.20 christos uint32_t id;
372 1.20 christos CTASSERT((LS_NPROBES & (LS_NPROBES - 1)) == 0);
373 1.20 christos if ((id = lockstat_probemap[LS_COMPRESS(flags)]) != 0)
374 1.20 christos (*lockstat_probe_func)(id, lock, callsite, flags, count,
375 1.20 christos cycles);
376 1.20 christos #endif
377 1.20 christos
378 1.21 christos if ((flags & lockstat_dev_enabled) != flags || count == 0)
379 1.1 ad return;
380 1.5 ad if (lock < lockstat_lockstart || lock > lockstat_lockend)
381 1.1 ad return;
382 1.1 ad if (callsite < lockstat_csstart || callsite > lockstat_csend)
383 1.1 ad return;
384 1.1 ad
385 1.1 ad callsite &= lockstat_csmask;
386 1.10 ad lock &= lockstat_lamask;
387 1.1 ad
388 1.1 ad /*
389 1.1 ad * Find the table for this lock+callsite pair, and try to locate a
390 1.1 ad * buffer with the same key.
391 1.1 ad */
392 1.11 ad s = splhigh();
393 1.1 ad lc = curcpu()->ci_lockstat;
394 1.1 ad ll = &lc->lc_hash[LOCKSTAT_HASH(lock ^ callsite)];
395 1.1 ad event = (flags & LB_EVENT_MASK) - 1;
396 1.1 ad
397 1.1 ad LIST_FOREACH(lb, ll, lb_chain.list) {
398 1.1 ad if (lb->lb_lock == lock && lb->lb_callsite == callsite)
399 1.1 ad break;
400 1.1 ad }
401 1.1 ad
402 1.1 ad if (lb != NULL) {
403 1.1 ad /*
404 1.1 ad * We found a record. Move it to the front of the list, as
405 1.1 ad * we're likely to hit it again soon.
406 1.1 ad */
407 1.1 ad if (lb != LIST_FIRST(ll)) {
408 1.1 ad LIST_REMOVE(lb, lb_chain.list);
409 1.1 ad LIST_INSERT_HEAD(ll, lb, lb_chain.list);
410 1.1 ad }
411 1.1 ad lb->lb_counts[event] += count;
412 1.6 ad lb->lb_times[event] += cycles;
413 1.1 ad } else if ((lb = SLIST_FIRST(&lc->lc_free)) != NULL) {
414 1.1 ad /*
415 1.1 ad * Pinch a new buffer and fill it out.
416 1.1 ad */
417 1.1 ad SLIST_REMOVE_HEAD(&lc->lc_free, lb_chain.slist);
418 1.1 ad LIST_INSERT_HEAD(ll, lb, lb_chain.list);
419 1.1 ad lb->lb_flags = (uint16_t)flags;
420 1.1 ad lb->lb_lock = lock;
421 1.1 ad lb->lb_callsite = callsite;
422 1.1 ad lb->lb_counts[event] = count;
423 1.6 ad lb->lb_times[event] = cycles;
424 1.1 ad } else {
425 1.1 ad /*
426 1.1 ad * We didn't find a buffer and there were none free.
427 1.1 ad * lockstat_stop() will notice later on and report the
428 1.1 ad * error.
429 1.1 ad */
430 1.1 ad lc->lc_overflow++;
431 1.1 ad }
432 1.1 ad
433 1.1 ad splx(s);
434 1.1 ad }
435 1.1 ad
436 1.1 ad /*
437 1.1 ad * Accept an open() on /dev/lockstat.
438 1.1 ad */
439 1.1 ad int
440 1.11 ad lockstat_open(dev_t dev, int flag, int mode, lwp_t *l)
441 1.1 ad {
442 1.1 ad
443 1.11 ad if (!__cpu_simple_lock_try(&lockstat_lock))
444 1.11 ad return EBUSY;
445 1.11 ad lockstat_lwp = curlwp;
446 1.11 ad return 0;
447 1.1 ad }
448 1.1 ad
449 1.1 ad /*
450 1.1 ad * Accept the last close() on /dev/lockstat.
451 1.1 ad */
452 1.1 ad int
453 1.11 ad lockstat_close(dev_t dev, int flag, int mode, lwp_t *l)
454 1.1 ad {
455 1.1 ad
456 1.11 ad lockstat_lwp = NULL;
457 1.26 ad if (lockstat_dev_enabled) {
458 1.26 ad lockstat_stop(NULL);
459 1.26 ad lockstat_free();
460 1.26 ad }
461 1.11 ad __cpu_simple_unlock(&lockstat_lock);
462 1.11 ad return 0;
463 1.1 ad }
464 1.1 ad
465 1.1 ad /*
466 1.1 ad * Handle control operations.
467 1.1 ad */
468 1.1 ad int
469 1.11 ad lockstat_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
470 1.1 ad {
471 1.1 ad lsenable_t *le;
472 1.1 ad int error;
473 1.1 ad
474 1.11 ad if (lockstat_lwp != curlwp)
475 1.11 ad return EBUSY;
476 1.1 ad
477 1.1 ad switch (cmd) {
478 1.1 ad case IOC_LOCKSTAT_GVERSION:
479 1.1 ad *(int *)data = LS_VERSION;
480 1.1 ad error = 0;
481 1.1 ad break;
482 1.1 ad
483 1.1 ad case IOC_LOCKSTAT_ENABLE:
484 1.1 ad le = (lsenable_t *)data;
485 1.1 ad
486 1.1 ad if (!cpu_hascounter()) {
487 1.1 ad error = ENODEV;
488 1.1 ad break;
489 1.1 ad }
490 1.21 christos if (lockstat_dev_enabled) {
491 1.1 ad error = EBUSY;
492 1.1 ad break;
493 1.1 ad }
494 1.1 ad
495 1.1 ad /*
496 1.1 ad * Sanitize the arguments passed in and set up filtering.
497 1.1 ad */
498 1.26 ad if (le->le_nbufs == 0) {
499 1.26 ad le->le_nbufs = MIN(LOCKSTAT_DEFBUFS * ncpu,
500 1.26 ad LOCKSTAT_MAXBUFS);
501 1.26 ad } else if (le->le_nbufs > LOCKSTAT_MAXBUFS ||
502 1.1 ad le->le_nbufs < LOCKSTAT_MINBUFS) {
503 1.1 ad error = EINVAL;
504 1.1 ad break;
505 1.1 ad }
506 1.1 ad if ((le->le_flags & LE_ONE_CALLSITE) == 0) {
507 1.1 ad le->le_csstart = 0;
508 1.1 ad le->le_csend = le->le_csstart - 1;
509 1.1 ad }
510 1.5 ad if ((le->le_flags & LE_ONE_LOCK) == 0) {
511 1.5 ad le->le_lockstart = 0;
512 1.5 ad le->le_lockend = le->le_lockstart - 1;
513 1.5 ad }
514 1.1 ad if ((le->le_mask & LB_EVENT_MASK) == 0)
515 1.11 ad return EINVAL;
516 1.1 ad if ((le->le_mask & LB_LOCK_MASK) == 0)
517 1.11 ad return EINVAL;
518 1.1 ad
519 1.1 ad /*
520 1.1 ad * Start tracing.
521 1.1 ad */
522 1.1 ad if ((error = lockstat_alloc(le)) == 0)
523 1.1 ad lockstat_start(le);
524 1.1 ad break;
525 1.1 ad
526 1.1 ad case IOC_LOCKSTAT_DISABLE:
527 1.21 christos if (!lockstat_dev_enabled)
528 1.1 ad error = EINVAL;
529 1.1 ad else
530 1.1 ad error = lockstat_stop((lsdisable_t *)data);
531 1.1 ad break;
532 1.1 ad
533 1.1 ad default:
534 1.1 ad error = ENOTTY;
535 1.1 ad break;
536 1.1 ad }
537 1.1 ad
538 1.1 ad return error;
539 1.1 ad }
540 1.1 ad
541 1.1 ad /*
542 1.1 ad * Copy buffers out to user-space.
543 1.1 ad */
544 1.1 ad int
545 1.4 christos lockstat_read(dev_t dev, struct uio *uio, int flag)
546 1.1 ad {
547 1.1 ad
548 1.21 christos if (curlwp != lockstat_lwp || lockstat_dev_enabled)
549 1.11 ad return EBUSY;
550 1.11 ad return uiomove(lockstat_baseb, lockstat_sizeb, uio);
551 1.1 ad }
552