tprof.c revision 1.13 1 1.13 christos /* $NetBSD: tprof.c,v 1.13 2015/08/20 14:40:18 christos Exp $ */
2 1.1 yamt
3 1.1 yamt /*-
4 1.8 yamt * Copyright (c)2008,2009,2010 YAMAMOTO Takashi,
5 1.1 yamt * All rights reserved.
6 1.1 yamt *
7 1.1 yamt * Redistribution and use in source and binary forms, with or without
8 1.1 yamt * modification, are permitted provided that the following conditions
9 1.1 yamt * are met:
10 1.1 yamt * 1. Redistributions of source code must retain the above copyright
11 1.1 yamt * notice, this list of conditions and the following disclaimer.
12 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 yamt * notice, this list of conditions and the following disclaimer in the
14 1.1 yamt * documentation and/or other materials provided with the distribution.
15 1.1 yamt *
16 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.1 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.1 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.1 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.1 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.1 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.1 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1 yamt * SUCH DAMAGE.
27 1.1 yamt */
28 1.1 yamt
29 1.1 yamt #include <sys/cdefs.h>
30 1.13 christos __KERNEL_RCSID(0, "$NetBSD: tprof.c,v 1.13 2015/08/20 14:40:18 christos Exp $");
31 1.1 yamt
32 1.1 yamt #include <sys/param.h>
33 1.1 yamt #include <sys/systm.h>
34 1.1 yamt #include <sys/kernel.h>
35 1.1 yamt
36 1.1 yamt #include <sys/cpu.h>
37 1.1 yamt #include <sys/conf.h>
38 1.1 yamt #include <sys/callout.h>
39 1.1 yamt #include <sys/kmem.h>
40 1.4 yamt #include <sys/module.h>
41 1.8 yamt #include <sys/proc.h>
42 1.1 yamt #include <sys/workqueue.h>
43 1.1 yamt #include <sys/queue.h>
44 1.1 yamt
45 1.1 yamt #include <dev/tprof/tprof.h>
46 1.1 yamt #include <dev/tprof/tprof_ioctl.h>
47 1.1 yamt
48 1.13 christos #include "ioconf.h"
49 1.13 christos
50 1.4 yamt /*
51 1.4 yamt * locking order:
52 1.4 yamt * tprof_reader_lock -> tprof_lock
53 1.4 yamt * tprof_startstop_lock -> tprof_lock
54 1.4 yamt */
55 1.4 yamt
56 1.4 yamt /*
57 1.4 yamt * protected by:
58 1.4 yamt * L: tprof_lock
59 1.4 yamt * R: tprof_reader_lock
60 1.4 yamt * S: tprof_startstop_lock
61 1.8 yamt * s: writer should hold tprof_startstop_lock and tprof_lock
62 1.8 yamt * reader should hold tprof_startstop_lock or tprof_lock
63 1.4 yamt */
64 1.4 yamt
65 1.1 yamt typedef struct tprof_buf {
66 1.1 yamt u_int b_used;
67 1.1 yamt u_int b_size;
68 1.1 yamt u_int b_overflow;
69 1.1 yamt u_int b_unused;
70 1.1 yamt STAILQ_ENTRY(tprof_buf) b_list;
71 1.1 yamt tprof_sample_t b_data[];
72 1.1 yamt } tprof_buf_t;
73 1.1 yamt #define TPROF_BUF_BYTESIZE(sz) \
74 1.1 yamt (sizeof(tprof_buf_t) + (sz) * sizeof(tprof_sample_t))
75 1.1 yamt #define TPROF_MAX_SAMPLES_PER_BUF 10000
76 1.1 yamt
77 1.1 yamt #define TPROF_MAX_BUF 100
78 1.1 yamt
79 1.1 yamt typedef struct {
80 1.1 yamt tprof_buf_t *c_buf;
81 1.10 yamt uint32_t c_cpuid;
82 1.1 yamt struct work c_work;
83 1.1 yamt callout_t c_callout;
84 1.1 yamt } __aligned(CACHE_LINE_SIZE) tprof_cpu_t;
85 1.1 yamt
86 1.4 yamt typedef struct tprof_backend {
87 1.4 yamt const char *tb_name;
88 1.4 yamt const tprof_backend_ops_t *tb_ops;
89 1.4 yamt LIST_ENTRY(tprof_backend) tb_list;
90 1.4 yamt int tb_usecount; /* S: */
91 1.4 yamt } tprof_backend_t;
92 1.3 yamt
93 1.1 yamt static kmutex_t tprof_lock;
94 1.8 yamt static bool tprof_running; /* s: */
95 1.4 yamt static u_int tprof_nworker; /* L: # of running worker LWPs */
96 1.1 yamt static lwp_t *tprof_owner;
97 1.4 yamt static STAILQ_HEAD(, tprof_buf) tprof_list; /* L: global buffer list */
98 1.4 yamt static u_int tprof_nbuf_on_list; /* L: # of buffers on tprof_list */
99 1.1 yamt static struct workqueue *tprof_wq;
100 1.1 yamt static tprof_cpu_t tprof_cpus[MAXCPUS] __aligned(CACHE_LINE_SIZE);
101 1.1 yamt static u_int tprof_samples_per_buf;
102 1.1 yamt
103 1.4 yamt static tprof_backend_t *tprof_backend; /* S: */
104 1.4 yamt static LIST_HEAD(, tprof_backend) tprof_backends =
105 1.4 yamt LIST_HEAD_INITIALIZER(tprof_backend); /* S: */
106 1.4 yamt
107 1.1 yamt static kmutex_t tprof_reader_lock;
108 1.4 yamt static kcondvar_t tprof_reader_cv; /* L: */
109 1.4 yamt static off_t tprof_reader_offset; /* R: */
110 1.1 yamt
111 1.1 yamt static kmutex_t tprof_startstop_lock;
112 1.4 yamt static kcondvar_t tprof_cv; /* L: */
113 1.1 yamt
114 1.4 yamt static struct tprof_stat tprof_stat; /* L: */
115 1.1 yamt
116 1.1 yamt static tprof_cpu_t *
117 1.1 yamt tprof_cpu(struct cpu_info *ci)
118 1.1 yamt {
119 1.1 yamt
120 1.1 yamt return &tprof_cpus[cpu_index(ci)];
121 1.1 yamt }
122 1.1 yamt
123 1.1 yamt static tprof_cpu_t *
124 1.1 yamt tprof_curcpu(void)
125 1.1 yamt {
126 1.1 yamt
127 1.1 yamt return tprof_cpu(curcpu());
128 1.1 yamt }
129 1.1 yamt
130 1.1 yamt static tprof_buf_t *
131 1.1 yamt tprof_buf_alloc(void)
132 1.1 yamt {
133 1.1 yamt tprof_buf_t *new;
134 1.1 yamt u_int size = tprof_samples_per_buf;
135 1.1 yamt
136 1.1 yamt new = kmem_alloc(TPROF_BUF_BYTESIZE(size), KM_SLEEP);
137 1.1 yamt new->b_used = 0;
138 1.1 yamt new->b_size = size;
139 1.1 yamt new->b_overflow = 0;
140 1.1 yamt return new;
141 1.1 yamt }
142 1.1 yamt
143 1.1 yamt static void
144 1.1 yamt tprof_buf_free(tprof_buf_t *buf)
145 1.1 yamt {
146 1.1 yamt
147 1.1 yamt kmem_free(buf, TPROF_BUF_BYTESIZE(buf->b_size));
148 1.1 yamt }
149 1.1 yamt
150 1.1 yamt static tprof_buf_t *
151 1.1 yamt tprof_buf_switch(tprof_cpu_t *c, tprof_buf_t *new)
152 1.1 yamt {
153 1.1 yamt tprof_buf_t *old;
154 1.1 yamt
155 1.1 yamt old = c->c_buf;
156 1.1 yamt c->c_buf = new;
157 1.1 yamt return old;
158 1.1 yamt }
159 1.1 yamt
160 1.1 yamt static tprof_buf_t *
161 1.1 yamt tprof_buf_refresh(void)
162 1.1 yamt {
163 1.1 yamt tprof_cpu_t * const c = tprof_curcpu();
164 1.1 yamt tprof_buf_t *new;
165 1.1 yamt
166 1.1 yamt new = tprof_buf_alloc();
167 1.1 yamt return tprof_buf_switch(c, new);
168 1.1 yamt }
169 1.1 yamt
170 1.1 yamt static void
171 1.1 yamt tprof_worker(struct work *wk, void *dummy)
172 1.1 yamt {
173 1.1 yamt tprof_cpu_t * const c = tprof_curcpu();
174 1.1 yamt tprof_buf_t *buf;
175 1.1 yamt bool shouldstop;
176 1.1 yamt
177 1.1 yamt KASSERT(wk == &c->c_work);
178 1.1 yamt KASSERT(dummy == NULL);
179 1.1 yamt
180 1.1 yamt /*
181 1.1 yamt * get a per cpu buffer.
182 1.1 yamt */
183 1.1 yamt buf = tprof_buf_refresh();
184 1.1 yamt
185 1.1 yamt /*
186 1.1 yamt * and put it on the global list for read(2).
187 1.1 yamt */
188 1.1 yamt mutex_enter(&tprof_lock);
189 1.1 yamt shouldstop = !tprof_running;
190 1.1 yamt if (shouldstop) {
191 1.1 yamt KASSERT(tprof_nworker > 0);
192 1.1 yamt tprof_nworker--;
193 1.1 yamt cv_broadcast(&tprof_cv);
194 1.1 yamt cv_broadcast(&tprof_reader_cv);
195 1.1 yamt }
196 1.1 yamt if (buf->b_used == 0) {
197 1.1 yamt tprof_stat.ts_emptybuf++;
198 1.1 yamt } else if (tprof_nbuf_on_list < TPROF_MAX_BUF) {
199 1.1 yamt tprof_stat.ts_sample += buf->b_used;
200 1.1 yamt tprof_stat.ts_overflow += buf->b_overflow;
201 1.1 yamt tprof_stat.ts_buf++;
202 1.1 yamt STAILQ_INSERT_TAIL(&tprof_list, buf, b_list);
203 1.1 yamt tprof_nbuf_on_list++;
204 1.1 yamt buf = NULL;
205 1.1 yamt cv_broadcast(&tprof_reader_cv);
206 1.1 yamt } else {
207 1.1 yamt tprof_stat.ts_dropbuf_sample += buf->b_used;
208 1.1 yamt tprof_stat.ts_dropbuf++;
209 1.1 yamt }
210 1.1 yamt mutex_exit(&tprof_lock);
211 1.1 yamt if (buf) {
212 1.1 yamt tprof_buf_free(buf);
213 1.1 yamt }
214 1.1 yamt if (!shouldstop) {
215 1.1 yamt callout_schedule(&c->c_callout, hz);
216 1.1 yamt }
217 1.1 yamt }
218 1.1 yamt
219 1.1 yamt static void
220 1.1 yamt tprof_kick(void *vp)
221 1.1 yamt {
222 1.1 yamt struct cpu_info * const ci = vp;
223 1.1 yamt tprof_cpu_t * const c = tprof_cpu(ci);
224 1.1 yamt
225 1.1 yamt workqueue_enqueue(tprof_wq, &c->c_work, ci);
226 1.1 yamt }
227 1.1 yamt
228 1.1 yamt static void
229 1.1 yamt tprof_stop1(void)
230 1.1 yamt {
231 1.1 yamt CPU_INFO_ITERATOR cii;
232 1.1 yamt struct cpu_info *ci;
233 1.1 yamt
234 1.1 yamt KASSERT(mutex_owned(&tprof_startstop_lock));
235 1.6 yamt KASSERT(tprof_nworker == 0);
236 1.1 yamt
237 1.1 yamt for (CPU_INFO_FOREACH(cii, ci)) {
238 1.1 yamt tprof_cpu_t * const c = tprof_cpu(ci);
239 1.1 yamt tprof_buf_t *old;
240 1.1 yamt
241 1.1 yamt old = tprof_buf_switch(c, NULL);
242 1.1 yamt if (old != NULL) {
243 1.1 yamt tprof_buf_free(old);
244 1.1 yamt }
245 1.1 yamt callout_destroy(&c->c_callout);
246 1.1 yamt }
247 1.1 yamt workqueue_destroy(tprof_wq);
248 1.1 yamt }
249 1.1 yamt
250 1.1 yamt static int
251 1.1 yamt tprof_start(const struct tprof_param *param)
252 1.1 yamt {
253 1.1 yamt CPU_INFO_ITERATOR cii;
254 1.1 yamt struct cpu_info *ci;
255 1.1 yamt int error;
256 1.1 yamt uint64_t freq;
257 1.4 yamt tprof_backend_t *tb;
258 1.1 yamt
259 1.1 yamt KASSERT(mutex_owned(&tprof_startstop_lock));
260 1.1 yamt if (tprof_running) {
261 1.1 yamt error = EBUSY;
262 1.1 yamt goto done;
263 1.1 yamt }
264 1.1 yamt
265 1.4 yamt tb = tprof_backend;
266 1.4 yamt if (tb == NULL) {
267 1.4 yamt error = ENOENT;
268 1.4 yamt goto done;
269 1.4 yamt }
270 1.4 yamt if (tb->tb_usecount > 0) {
271 1.4 yamt error = EBUSY;
272 1.4 yamt goto done;
273 1.4 yamt }
274 1.4 yamt
275 1.4 yamt tb->tb_usecount++;
276 1.4 yamt freq = tb->tb_ops->tbo_estimate_freq();
277 1.1 yamt tprof_samples_per_buf = MIN(freq * 2, TPROF_MAX_SAMPLES_PER_BUF);
278 1.1 yamt
279 1.1 yamt error = workqueue_create(&tprof_wq, "tprofmv", tprof_worker, NULL,
280 1.2 yamt PRI_NONE, IPL_SOFTCLOCK, WQ_MPSAFE | WQ_PERCPU);
281 1.1 yamt if (error != 0) {
282 1.1 yamt goto done;
283 1.1 yamt }
284 1.1 yamt
285 1.1 yamt for (CPU_INFO_FOREACH(cii, ci)) {
286 1.1 yamt tprof_cpu_t * const c = tprof_cpu(ci);
287 1.1 yamt tprof_buf_t *new;
288 1.1 yamt tprof_buf_t *old;
289 1.1 yamt
290 1.1 yamt new = tprof_buf_alloc();
291 1.1 yamt old = tprof_buf_switch(c, new);
292 1.1 yamt if (old != NULL) {
293 1.1 yamt tprof_buf_free(old);
294 1.1 yamt }
295 1.1 yamt callout_init(&c->c_callout, CALLOUT_MPSAFE);
296 1.1 yamt callout_setfunc(&c->c_callout, tprof_kick, ci);
297 1.1 yamt }
298 1.1 yamt
299 1.4 yamt error = tb->tb_ops->tbo_start(NULL);
300 1.1 yamt if (error != 0) {
301 1.9 yamt KASSERT(tb->tb_usecount > 0);
302 1.9 yamt tb->tb_usecount--;
303 1.1 yamt tprof_stop1();
304 1.1 yamt goto done;
305 1.1 yamt }
306 1.1 yamt
307 1.1 yamt mutex_enter(&tprof_lock);
308 1.1 yamt tprof_running = true;
309 1.1 yamt mutex_exit(&tprof_lock);
310 1.1 yamt for (CPU_INFO_FOREACH(cii, ci)) {
311 1.1 yamt tprof_cpu_t * const c = tprof_cpu(ci);
312 1.1 yamt
313 1.1 yamt mutex_enter(&tprof_lock);
314 1.1 yamt tprof_nworker++;
315 1.1 yamt mutex_exit(&tprof_lock);
316 1.1 yamt workqueue_enqueue(tprof_wq, &c->c_work, ci);
317 1.1 yamt }
318 1.1 yamt done:
319 1.1 yamt return error;
320 1.1 yamt }
321 1.1 yamt
322 1.1 yamt static void
323 1.1 yamt tprof_stop(void)
324 1.1 yamt {
325 1.4 yamt tprof_backend_t *tb;
326 1.1 yamt
327 1.1 yamt KASSERT(mutex_owned(&tprof_startstop_lock));
328 1.1 yamt if (!tprof_running) {
329 1.1 yamt goto done;
330 1.1 yamt }
331 1.1 yamt
332 1.4 yamt tb = tprof_backend;
333 1.4 yamt KASSERT(tb->tb_usecount > 0);
334 1.4 yamt tb->tb_ops->tbo_stop(NULL);
335 1.4 yamt tb->tb_usecount--;
336 1.1 yamt
337 1.1 yamt mutex_enter(&tprof_lock);
338 1.1 yamt tprof_running = false;
339 1.1 yamt cv_broadcast(&tprof_reader_cv);
340 1.8 yamt while (tprof_nworker > 0) {
341 1.8 yamt cv_wait(&tprof_cv, &tprof_lock);
342 1.8 yamt }
343 1.1 yamt mutex_exit(&tprof_lock);
344 1.1 yamt
345 1.1 yamt tprof_stop1();
346 1.1 yamt done:
347 1.1 yamt ;
348 1.1 yamt }
349 1.1 yamt
350 1.4 yamt /*
351 1.4 yamt * tprof_clear: drain unread samples.
352 1.4 yamt */
353 1.4 yamt
354 1.1 yamt static void
355 1.1 yamt tprof_clear(void)
356 1.1 yamt {
357 1.1 yamt tprof_buf_t *buf;
358 1.1 yamt
359 1.1 yamt mutex_enter(&tprof_reader_lock);
360 1.1 yamt mutex_enter(&tprof_lock);
361 1.1 yamt while ((buf = STAILQ_FIRST(&tprof_list)) != NULL) {
362 1.1 yamt if (buf != NULL) {
363 1.1 yamt STAILQ_REMOVE_HEAD(&tprof_list, b_list);
364 1.1 yamt KASSERT(tprof_nbuf_on_list > 0);
365 1.1 yamt tprof_nbuf_on_list--;
366 1.1 yamt mutex_exit(&tprof_lock);
367 1.1 yamt tprof_buf_free(buf);
368 1.1 yamt mutex_enter(&tprof_lock);
369 1.1 yamt }
370 1.1 yamt }
371 1.1 yamt KASSERT(tprof_nbuf_on_list == 0);
372 1.1 yamt mutex_exit(&tprof_lock);
373 1.1 yamt tprof_reader_offset = 0;
374 1.1 yamt mutex_exit(&tprof_reader_lock);
375 1.1 yamt
376 1.1 yamt memset(&tprof_stat, 0, sizeof(tprof_stat));
377 1.1 yamt }
378 1.1 yamt
379 1.4 yamt static tprof_backend_t *
380 1.4 yamt tprof_backend_lookup(const char *name)
381 1.4 yamt {
382 1.4 yamt tprof_backend_t *tb;
383 1.4 yamt
384 1.4 yamt KASSERT(mutex_owned(&tprof_startstop_lock));
385 1.4 yamt
386 1.4 yamt LIST_FOREACH(tb, &tprof_backends, tb_list) {
387 1.4 yamt if (!strcmp(tb->tb_name, name)) {
388 1.4 yamt return tb;
389 1.4 yamt }
390 1.4 yamt }
391 1.4 yamt return NULL;
392 1.4 yamt }
393 1.4 yamt
394 1.1 yamt /* -------------------- backend interfaces */
395 1.1 yamt
396 1.1 yamt /*
397 1.1 yamt * tprof_sample: record a sample on the per-cpu buffer.
398 1.1 yamt *
399 1.1 yamt * be careful; can be called in NMI context.
400 1.10 yamt * we are bluntly assuming the followings are safe.
401 1.10 yamt * curcpu()
402 1.10 yamt * curlwp->l_lid
403 1.10 yamt * curlwp->l_proc->p_pid
404 1.1 yamt */
405 1.1 yamt
406 1.1 yamt void
407 1.5 yamt tprof_sample(tprof_backend_cookie_t *cookie, const tprof_frame_info_t *tfi)
408 1.1 yamt {
409 1.1 yamt tprof_cpu_t * const c = tprof_curcpu();
410 1.1 yamt tprof_buf_t * const buf = c->c_buf;
411 1.8 yamt tprof_sample_t *sp;
412 1.5 yamt const uintptr_t pc = tfi->tfi_pc;
413 1.10 yamt const lwp_t * const l = curlwp;
414 1.1 yamt u_int idx;
415 1.1 yamt
416 1.1 yamt idx = buf->b_used;
417 1.1 yamt if (__predict_false(idx >= buf->b_size)) {
418 1.1 yamt buf->b_overflow++;
419 1.1 yamt return;
420 1.1 yamt }
421 1.8 yamt sp = &buf->b_data[idx];
422 1.10 yamt sp->s_pid = l->l_proc->p_pid;
423 1.10 yamt sp->s_lwpid = l->l_lid;
424 1.10 yamt sp->s_cpuid = c->c_cpuid;
425 1.8 yamt sp->s_flags = (tfi->tfi_inkernel) ? TPROF_SAMPLE_INKERNEL : 0;
426 1.8 yamt sp->s_pc = pc;
427 1.1 yamt buf->b_used = idx + 1;
428 1.1 yamt }
429 1.1 yamt
430 1.4 yamt /*
431 1.4 yamt * tprof_backend_register:
432 1.4 yamt */
433 1.4 yamt
434 1.4 yamt int
435 1.4 yamt tprof_backend_register(const char *name, const tprof_backend_ops_t *ops,
436 1.4 yamt int vers)
437 1.4 yamt {
438 1.4 yamt tprof_backend_t *tb;
439 1.4 yamt
440 1.4 yamt if (vers != TPROF_BACKEND_VERSION) {
441 1.4 yamt return EINVAL;
442 1.4 yamt }
443 1.4 yamt
444 1.4 yamt mutex_enter(&tprof_startstop_lock);
445 1.4 yamt tb = tprof_backend_lookup(name);
446 1.4 yamt if (tb != NULL) {
447 1.4 yamt mutex_exit(&tprof_startstop_lock);
448 1.4 yamt return EEXIST;
449 1.4 yamt }
450 1.4 yamt #if 1 /* XXX for now */
451 1.4 yamt if (!LIST_EMPTY(&tprof_backends)) {
452 1.4 yamt mutex_exit(&tprof_startstop_lock);
453 1.4 yamt return ENOTSUP;
454 1.4 yamt }
455 1.4 yamt #endif
456 1.4 yamt tb = kmem_alloc(sizeof(*tb), KM_SLEEP);
457 1.4 yamt tb->tb_name = name;
458 1.4 yamt tb->tb_ops = ops;
459 1.4 yamt tb->tb_usecount = 0;
460 1.4 yamt LIST_INSERT_HEAD(&tprof_backends, tb, tb_list);
461 1.4 yamt #if 1 /* XXX for now */
462 1.4 yamt if (tprof_backend == NULL) {
463 1.4 yamt tprof_backend = tb;
464 1.4 yamt }
465 1.4 yamt #endif
466 1.4 yamt mutex_exit(&tprof_startstop_lock);
467 1.4 yamt
468 1.4 yamt return 0;
469 1.4 yamt }
470 1.4 yamt
471 1.4 yamt /*
472 1.4 yamt * tprof_backend_unregister:
473 1.4 yamt */
474 1.4 yamt
475 1.4 yamt int
476 1.4 yamt tprof_backend_unregister(const char *name)
477 1.4 yamt {
478 1.4 yamt tprof_backend_t *tb;
479 1.4 yamt
480 1.4 yamt mutex_enter(&tprof_startstop_lock);
481 1.4 yamt tb = tprof_backend_lookup(name);
482 1.4 yamt #if defined(DIAGNOSTIC)
483 1.4 yamt if (tb == NULL) {
484 1.4 yamt mutex_exit(&tprof_startstop_lock);
485 1.4 yamt panic("%s: not found '%s'", __func__, name);
486 1.4 yamt }
487 1.4 yamt #endif /* defined(DIAGNOSTIC) */
488 1.4 yamt if (tb->tb_usecount > 0) {
489 1.4 yamt mutex_exit(&tprof_startstop_lock);
490 1.4 yamt return EBUSY;
491 1.4 yamt }
492 1.4 yamt #if 1 /* XXX for now */
493 1.4 yamt if (tprof_backend == tb) {
494 1.4 yamt tprof_backend = NULL;
495 1.4 yamt }
496 1.4 yamt #endif
497 1.4 yamt LIST_REMOVE(tb, tb_list);
498 1.4 yamt mutex_exit(&tprof_startstop_lock);
499 1.4 yamt
500 1.4 yamt kmem_free(tb, sizeof(*tb));
501 1.4 yamt
502 1.4 yamt return 0;
503 1.4 yamt }
504 1.4 yamt
505 1.1 yamt /* -------------------- cdevsw interfaces */
506 1.1 yamt
507 1.1 yamt static int
508 1.1 yamt tprof_open(dev_t dev, int flags, int type, struct lwp *l)
509 1.1 yamt {
510 1.1 yamt
511 1.1 yamt if (minor(dev) != 0) {
512 1.1 yamt return EXDEV;
513 1.1 yamt }
514 1.1 yamt mutex_enter(&tprof_lock);
515 1.1 yamt if (tprof_owner != NULL) {
516 1.1 yamt mutex_exit(&tprof_lock);
517 1.1 yamt return EBUSY;
518 1.1 yamt }
519 1.1 yamt tprof_owner = curlwp;
520 1.1 yamt mutex_exit(&tprof_lock);
521 1.1 yamt
522 1.1 yamt return 0;
523 1.1 yamt }
524 1.1 yamt
525 1.1 yamt static int
526 1.1 yamt tprof_close(dev_t dev, int flags, int type, struct lwp *l)
527 1.1 yamt {
528 1.1 yamt
529 1.1 yamt KASSERT(minor(dev) == 0);
530 1.1 yamt
531 1.1 yamt mutex_enter(&tprof_startstop_lock);
532 1.1 yamt mutex_enter(&tprof_lock);
533 1.1 yamt tprof_owner = NULL;
534 1.1 yamt mutex_exit(&tprof_lock);
535 1.1 yamt tprof_stop();
536 1.1 yamt tprof_clear();
537 1.1 yamt mutex_exit(&tprof_startstop_lock);
538 1.1 yamt
539 1.1 yamt return 0;
540 1.1 yamt }
541 1.1 yamt
542 1.1 yamt static int
543 1.1 yamt tprof_read(dev_t dev, struct uio *uio, int flags)
544 1.1 yamt {
545 1.1 yamt tprof_buf_t *buf;
546 1.1 yamt size_t bytes;
547 1.1 yamt size_t resid;
548 1.1 yamt size_t done;
549 1.1 yamt int error = 0;
550 1.1 yamt
551 1.1 yamt KASSERT(minor(dev) == 0);
552 1.1 yamt mutex_enter(&tprof_reader_lock);
553 1.1 yamt while (uio->uio_resid > 0 && error == 0) {
554 1.1 yamt /*
555 1.1 yamt * take the first buffer from the list.
556 1.1 yamt */
557 1.1 yamt mutex_enter(&tprof_lock);
558 1.1 yamt buf = STAILQ_FIRST(&tprof_list);
559 1.1 yamt if (buf == NULL) {
560 1.1 yamt if (tprof_nworker == 0) {
561 1.1 yamt mutex_exit(&tprof_lock);
562 1.1 yamt error = 0;
563 1.1 yamt break;
564 1.1 yamt }
565 1.1 yamt mutex_exit(&tprof_reader_lock);
566 1.1 yamt error = cv_wait_sig(&tprof_reader_cv, &tprof_lock);
567 1.1 yamt mutex_exit(&tprof_lock);
568 1.1 yamt mutex_enter(&tprof_reader_lock);
569 1.1 yamt continue;
570 1.1 yamt }
571 1.1 yamt STAILQ_REMOVE_HEAD(&tprof_list, b_list);
572 1.1 yamt KASSERT(tprof_nbuf_on_list > 0);
573 1.1 yamt tprof_nbuf_on_list--;
574 1.1 yamt mutex_exit(&tprof_lock);
575 1.1 yamt
576 1.1 yamt /*
577 1.1 yamt * copy it out.
578 1.1 yamt */
579 1.1 yamt bytes = MIN(buf->b_used * sizeof(tprof_sample_t) -
580 1.1 yamt tprof_reader_offset, uio->uio_resid);
581 1.1 yamt resid = uio->uio_resid;
582 1.1 yamt error = uiomove((char *)buf->b_data + tprof_reader_offset,
583 1.1 yamt bytes, uio);
584 1.1 yamt done = resid - uio->uio_resid;
585 1.1 yamt tprof_reader_offset += done;
586 1.1 yamt
587 1.1 yamt /*
588 1.1 yamt * if we didn't consume the whole buffer,
589 1.1 yamt * put it back to the list.
590 1.1 yamt */
591 1.1 yamt if (tprof_reader_offset <
592 1.1 yamt buf->b_used * sizeof(tprof_sample_t)) {
593 1.1 yamt mutex_enter(&tprof_lock);
594 1.1 yamt STAILQ_INSERT_HEAD(&tprof_list, buf, b_list);
595 1.1 yamt tprof_nbuf_on_list++;
596 1.1 yamt cv_broadcast(&tprof_reader_cv);
597 1.1 yamt mutex_exit(&tprof_lock);
598 1.1 yamt } else {
599 1.1 yamt tprof_buf_free(buf);
600 1.1 yamt tprof_reader_offset = 0;
601 1.1 yamt }
602 1.1 yamt }
603 1.1 yamt mutex_exit(&tprof_reader_lock);
604 1.1 yamt
605 1.1 yamt return error;
606 1.1 yamt }
607 1.1 yamt
608 1.1 yamt static int
609 1.1 yamt tprof_ioctl(dev_t dev, u_long cmd, void *data, int flags, struct lwp *l)
610 1.1 yamt {
611 1.1 yamt const struct tprof_param *param;
612 1.1 yamt int error = 0;
613 1.1 yamt
614 1.1 yamt KASSERT(minor(dev) == 0);
615 1.1 yamt
616 1.1 yamt switch (cmd) {
617 1.1 yamt case TPROF_IOC_GETVERSION:
618 1.1 yamt *(int *)data = TPROF_VERSION;
619 1.1 yamt break;
620 1.1 yamt case TPROF_IOC_START:
621 1.1 yamt param = data;
622 1.1 yamt mutex_enter(&tprof_startstop_lock);
623 1.1 yamt error = tprof_start(param);
624 1.1 yamt mutex_exit(&tprof_startstop_lock);
625 1.1 yamt break;
626 1.1 yamt case TPROF_IOC_STOP:
627 1.1 yamt mutex_enter(&tprof_startstop_lock);
628 1.1 yamt tprof_stop();
629 1.1 yamt mutex_exit(&tprof_startstop_lock);
630 1.1 yamt break;
631 1.1 yamt case TPROF_IOC_GETSTAT:
632 1.1 yamt mutex_enter(&tprof_lock);
633 1.1 yamt memcpy(data, &tprof_stat, sizeof(tprof_stat));
634 1.1 yamt mutex_exit(&tprof_lock);
635 1.1 yamt break;
636 1.1 yamt default:
637 1.1 yamt error = EINVAL;
638 1.1 yamt break;
639 1.1 yamt }
640 1.1 yamt
641 1.1 yamt return error;
642 1.1 yamt }
643 1.1 yamt
644 1.1 yamt const struct cdevsw tprof_cdevsw = {
645 1.1 yamt .d_open = tprof_open,
646 1.1 yamt .d_close = tprof_close,
647 1.1 yamt .d_read = tprof_read,
648 1.1 yamt .d_write = nowrite,
649 1.1 yamt .d_ioctl = tprof_ioctl,
650 1.1 yamt .d_stop = nostop,
651 1.1 yamt .d_tty = notty,
652 1.1 yamt .d_poll = nopoll,
653 1.1 yamt .d_mmap = nommap,
654 1.1 yamt .d_kqfilter = nokqfilter,
655 1.12 dholland .d_discard = nodiscard,
656 1.11 dholland .d_flag = D_OTHER | D_MPSAFE
657 1.1 yamt };
658 1.1 yamt
659 1.1 yamt void
660 1.1 yamt tprofattach(int nunits)
661 1.1 yamt {
662 1.1 yamt
663 1.4 yamt /* nothing */
664 1.4 yamt }
665 1.4 yamt
666 1.4 yamt MODULE(MODULE_CLASS_DRIVER, tprof, NULL);
667 1.4 yamt
668 1.4 yamt static void
669 1.4 yamt tprof_driver_init(void)
670 1.4 yamt {
671 1.10 yamt unsigned int i;
672 1.4 yamt
673 1.1 yamt mutex_init(&tprof_lock, MUTEX_DEFAULT, IPL_NONE);
674 1.1 yamt mutex_init(&tprof_reader_lock, MUTEX_DEFAULT, IPL_NONE);
675 1.1 yamt mutex_init(&tprof_startstop_lock, MUTEX_DEFAULT, IPL_NONE);
676 1.1 yamt cv_init(&tprof_cv, "tprof");
677 1.7 pgoyette cv_init(&tprof_reader_cv, "tprof_rd");
678 1.1 yamt STAILQ_INIT(&tprof_list);
679 1.10 yamt for (i = 0; i < __arraycount(tprof_cpus); i++) {
680 1.10 yamt tprof_cpu_t * const c = &tprof_cpus[i];
681 1.10 yamt
682 1.10 yamt c->c_buf = NULL;
683 1.10 yamt c->c_cpuid = i;
684 1.10 yamt }
685 1.1 yamt }
686 1.4 yamt
687 1.4 yamt static void
688 1.4 yamt tprof_driver_fini(void)
689 1.4 yamt {
690 1.4 yamt
691 1.4 yamt mutex_destroy(&tprof_lock);
692 1.4 yamt mutex_destroy(&tprof_reader_lock);
693 1.4 yamt mutex_destroy(&tprof_startstop_lock);
694 1.4 yamt cv_destroy(&tprof_cv);
695 1.4 yamt cv_destroy(&tprof_reader_cv);
696 1.4 yamt }
697 1.4 yamt
698 1.4 yamt static int
699 1.4 yamt tprof_modcmd(modcmd_t cmd, void *arg)
700 1.4 yamt {
701 1.4 yamt
702 1.4 yamt switch (cmd) {
703 1.4 yamt case MODULE_CMD_INIT:
704 1.4 yamt tprof_driver_init();
705 1.4 yamt #if defined(_MODULE)
706 1.4 yamt {
707 1.4 yamt devmajor_t bmajor = NODEVMAJOR;
708 1.4 yamt devmajor_t cmajor = NODEVMAJOR;
709 1.4 yamt int error;
710 1.4 yamt
711 1.4 yamt error = devsw_attach("tprof", NULL, &bmajor,
712 1.4 yamt &tprof_cdevsw, &cmajor);
713 1.4 yamt if (error) {
714 1.4 yamt tprof_driver_fini();
715 1.4 yamt return error;
716 1.4 yamt }
717 1.4 yamt }
718 1.4 yamt #endif /* defined(_MODULE) */
719 1.4 yamt return 0;
720 1.4 yamt
721 1.4 yamt case MODULE_CMD_FINI:
722 1.4 yamt #if defined(_MODULE)
723 1.4 yamt {
724 1.4 yamt int error;
725 1.4 yamt error = devsw_detach(NULL, &tprof_cdevsw);
726 1.4 yamt if (error) {
727 1.4 yamt return error;
728 1.4 yamt }
729 1.4 yamt }
730 1.4 yamt #endif /* defined(_MODULE) */
731 1.4 yamt tprof_driver_fini();
732 1.4 yamt return 0;
733 1.4 yamt
734 1.4 yamt default:
735 1.4 yamt return ENOTTY;
736 1.4 yamt }
737 1.4 yamt }
738