tprof.c revision 1.13.8.1 1 1.13.8.1 pgoyette /* $NetBSD: tprof.c,v 1.13.8.1 2017/04/29 09:17:59 pgoyette Exp $ */
2 1.1 yamt
3 1.1 yamt /*-
4 1.8 yamt * Copyright (c)2008,2009,2010 YAMAMOTO Takashi,
5 1.1 yamt * All rights reserved.
6 1.1 yamt *
7 1.1 yamt * Redistribution and use in source and binary forms, with or without
8 1.1 yamt * modification, are permitted provided that the following conditions
9 1.1 yamt * are met:
10 1.1 yamt * 1. Redistributions of source code must retain the above copyright
11 1.1 yamt * notice, this list of conditions and the following disclaimer.
12 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 yamt * notice, this list of conditions and the following disclaimer in the
14 1.1 yamt * documentation and/or other materials provided with the distribution.
15 1.1 yamt *
16 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.1 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.1 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.1 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.1 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.1 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.1 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1 yamt * SUCH DAMAGE.
27 1.1 yamt */
28 1.1 yamt
29 1.1 yamt #include <sys/cdefs.h>
30 1.13.8.1 pgoyette __KERNEL_RCSID(0, "$NetBSD: tprof.c,v 1.13.8.1 2017/04/29 09:17:59 pgoyette Exp $");
31 1.1 yamt
32 1.1 yamt #include <sys/param.h>
33 1.1 yamt #include <sys/systm.h>
34 1.1 yamt #include <sys/kernel.h>
35 1.1 yamt
36 1.1 yamt #include <sys/cpu.h>
37 1.13.8.1 pgoyette #include <sys/localcount.h>
38 1.1 yamt #include <sys/conf.h>
39 1.1 yamt #include <sys/callout.h>
40 1.1 yamt #include <sys/kmem.h>
41 1.4 yamt #include <sys/module.h>
42 1.8 yamt #include <sys/proc.h>
43 1.1 yamt #include <sys/workqueue.h>
44 1.1 yamt #include <sys/queue.h>
45 1.1 yamt
46 1.1 yamt #include <dev/tprof/tprof.h>
47 1.1 yamt #include <dev/tprof/tprof_ioctl.h>
48 1.1 yamt
49 1.13 christos #include "ioconf.h"
50 1.13 christos
51 1.4 yamt /*
52 1.4 yamt * locking order:
53 1.4 yamt * tprof_reader_lock -> tprof_lock
54 1.4 yamt * tprof_startstop_lock -> tprof_lock
55 1.4 yamt */
56 1.4 yamt
57 1.4 yamt /*
58 1.4 yamt * protected by:
59 1.4 yamt * L: tprof_lock
60 1.4 yamt * R: tprof_reader_lock
61 1.4 yamt * S: tprof_startstop_lock
62 1.8 yamt * s: writer should hold tprof_startstop_lock and tprof_lock
63 1.8 yamt * reader should hold tprof_startstop_lock or tprof_lock
64 1.4 yamt */
65 1.4 yamt
66 1.1 yamt typedef struct tprof_buf {
67 1.1 yamt u_int b_used;
68 1.1 yamt u_int b_size;
69 1.1 yamt u_int b_overflow;
70 1.1 yamt u_int b_unused;
71 1.1 yamt STAILQ_ENTRY(tprof_buf) b_list;
72 1.1 yamt tprof_sample_t b_data[];
73 1.1 yamt } tprof_buf_t;
74 1.1 yamt #define TPROF_BUF_BYTESIZE(sz) \
75 1.1 yamt (sizeof(tprof_buf_t) + (sz) * sizeof(tprof_sample_t))
76 1.1 yamt #define TPROF_MAX_SAMPLES_PER_BUF 10000
77 1.1 yamt
78 1.1 yamt #define TPROF_MAX_BUF 100
79 1.1 yamt
80 1.1 yamt typedef struct {
81 1.1 yamt tprof_buf_t *c_buf;
82 1.10 yamt uint32_t c_cpuid;
83 1.1 yamt struct work c_work;
84 1.1 yamt callout_t c_callout;
85 1.1 yamt } __aligned(CACHE_LINE_SIZE) tprof_cpu_t;
86 1.1 yamt
87 1.4 yamt typedef struct tprof_backend {
88 1.4 yamt const char *tb_name;
89 1.4 yamt const tprof_backend_ops_t *tb_ops;
90 1.4 yamt LIST_ENTRY(tprof_backend) tb_list;
91 1.4 yamt int tb_usecount; /* S: */
92 1.4 yamt } tprof_backend_t;
93 1.3 yamt
94 1.1 yamt static kmutex_t tprof_lock;
95 1.8 yamt static bool tprof_running; /* s: */
96 1.4 yamt static u_int tprof_nworker; /* L: # of running worker LWPs */
97 1.1 yamt static lwp_t *tprof_owner;
98 1.4 yamt static STAILQ_HEAD(, tprof_buf) tprof_list; /* L: global buffer list */
99 1.4 yamt static u_int tprof_nbuf_on_list; /* L: # of buffers on tprof_list */
100 1.1 yamt static struct workqueue *tprof_wq;
101 1.1 yamt static tprof_cpu_t tprof_cpus[MAXCPUS] __aligned(CACHE_LINE_SIZE);
102 1.1 yamt static u_int tprof_samples_per_buf;
103 1.1 yamt
104 1.4 yamt static tprof_backend_t *tprof_backend; /* S: */
105 1.4 yamt static LIST_HEAD(, tprof_backend) tprof_backends =
106 1.4 yamt LIST_HEAD_INITIALIZER(tprof_backend); /* S: */
107 1.4 yamt
108 1.1 yamt static kmutex_t tprof_reader_lock;
109 1.4 yamt static kcondvar_t tprof_reader_cv; /* L: */
110 1.4 yamt static off_t tprof_reader_offset; /* R: */
111 1.1 yamt
112 1.1 yamt static kmutex_t tprof_startstop_lock;
113 1.4 yamt static kcondvar_t tprof_cv; /* L: */
114 1.1 yamt
115 1.4 yamt static struct tprof_stat tprof_stat; /* L: */
116 1.1 yamt
117 1.1 yamt static tprof_cpu_t *
118 1.1 yamt tprof_cpu(struct cpu_info *ci)
119 1.1 yamt {
120 1.1 yamt
121 1.1 yamt return &tprof_cpus[cpu_index(ci)];
122 1.1 yamt }
123 1.1 yamt
124 1.1 yamt static tprof_cpu_t *
125 1.1 yamt tprof_curcpu(void)
126 1.1 yamt {
127 1.1 yamt
128 1.1 yamt return tprof_cpu(curcpu());
129 1.1 yamt }
130 1.1 yamt
131 1.1 yamt static tprof_buf_t *
132 1.1 yamt tprof_buf_alloc(void)
133 1.1 yamt {
134 1.1 yamt tprof_buf_t *new;
135 1.1 yamt u_int size = tprof_samples_per_buf;
136 1.1 yamt
137 1.1 yamt new = kmem_alloc(TPROF_BUF_BYTESIZE(size), KM_SLEEP);
138 1.1 yamt new->b_used = 0;
139 1.1 yamt new->b_size = size;
140 1.1 yamt new->b_overflow = 0;
141 1.1 yamt return new;
142 1.1 yamt }
143 1.1 yamt
144 1.1 yamt static void
145 1.1 yamt tprof_buf_free(tprof_buf_t *buf)
146 1.1 yamt {
147 1.1 yamt
148 1.1 yamt kmem_free(buf, TPROF_BUF_BYTESIZE(buf->b_size));
149 1.1 yamt }
150 1.1 yamt
151 1.1 yamt static tprof_buf_t *
152 1.1 yamt tprof_buf_switch(tprof_cpu_t *c, tprof_buf_t *new)
153 1.1 yamt {
154 1.1 yamt tprof_buf_t *old;
155 1.1 yamt
156 1.1 yamt old = c->c_buf;
157 1.1 yamt c->c_buf = new;
158 1.1 yamt return old;
159 1.1 yamt }
160 1.1 yamt
161 1.1 yamt static tprof_buf_t *
162 1.1 yamt tprof_buf_refresh(void)
163 1.1 yamt {
164 1.1 yamt tprof_cpu_t * const c = tprof_curcpu();
165 1.1 yamt tprof_buf_t *new;
166 1.1 yamt
167 1.1 yamt new = tprof_buf_alloc();
168 1.1 yamt return tprof_buf_switch(c, new);
169 1.1 yamt }
170 1.1 yamt
171 1.1 yamt static void
172 1.1 yamt tprof_worker(struct work *wk, void *dummy)
173 1.1 yamt {
174 1.1 yamt tprof_cpu_t * const c = tprof_curcpu();
175 1.1 yamt tprof_buf_t *buf;
176 1.1 yamt bool shouldstop;
177 1.1 yamt
178 1.1 yamt KASSERT(wk == &c->c_work);
179 1.1 yamt KASSERT(dummy == NULL);
180 1.1 yamt
181 1.1 yamt /*
182 1.1 yamt * get a per cpu buffer.
183 1.1 yamt */
184 1.1 yamt buf = tprof_buf_refresh();
185 1.1 yamt
186 1.1 yamt /*
187 1.1 yamt * and put it on the global list for read(2).
188 1.1 yamt */
189 1.1 yamt mutex_enter(&tprof_lock);
190 1.1 yamt shouldstop = !tprof_running;
191 1.1 yamt if (shouldstop) {
192 1.1 yamt KASSERT(tprof_nworker > 0);
193 1.1 yamt tprof_nworker--;
194 1.1 yamt cv_broadcast(&tprof_cv);
195 1.1 yamt cv_broadcast(&tprof_reader_cv);
196 1.1 yamt }
197 1.1 yamt if (buf->b_used == 0) {
198 1.1 yamt tprof_stat.ts_emptybuf++;
199 1.1 yamt } else if (tprof_nbuf_on_list < TPROF_MAX_BUF) {
200 1.1 yamt tprof_stat.ts_sample += buf->b_used;
201 1.1 yamt tprof_stat.ts_overflow += buf->b_overflow;
202 1.1 yamt tprof_stat.ts_buf++;
203 1.1 yamt STAILQ_INSERT_TAIL(&tprof_list, buf, b_list);
204 1.1 yamt tprof_nbuf_on_list++;
205 1.1 yamt buf = NULL;
206 1.1 yamt cv_broadcast(&tprof_reader_cv);
207 1.1 yamt } else {
208 1.1 yamt tprof_stat.ts_dropbuf_sample += buf->b_used;
209 1.1 yamt tprof_stat.ts_dropbuf++;
210 1.1 yamt }
211 1.1 yamt mutex_exit(&tprof_lock);
212 1.1 yamt if (buf) {
213 1.1 yamt tprof_buf_free(buf);
214 1.1 yamt }
215 1.1 yamt if (!shouldstop) {
216 1.1 yamt callout_schedule(&c->c_callout, hz);
217 1.1 yamt }
218 1.1 yamt }
219 1.1 yamt
220 1.1 yamt static void
221 1.1 yamt tprof_kick(void *vp)
222 1.1 yamt {
223 1.1 yamt struct cpu_info * const ci = vp;
224 1.1 yamt tprof_cpu_t * const c = tprof_cpu(ci);
225 1.1 yamt
226 1.1 yamt workqueue_enqueue(tprof_wq, &c->c_work, ci);
227 1.1 yamt }
228 1.1 yamt
229 1.1 yamt static void
230 1.1 yamt tprof_stop1(void)
231 1.1 yamt {
232 1.1 yamt CPU_INFO_ITERATOR cii;
233 1.1 yamt struct cpu_info *ci;
234 1.1 yamt
235 1.1 yamt KASSERT(mutex_owned(&tprof_startstop_lock));
236 1.6 yamt KASSERT(tprof_nworker == 0);
237 1.1 yamt
238 1.1 yamt for (CPU_INFO_FOREACH(cii, ci)) {
239 1.1 yamt tprof_cpu_t * const c = tprof_cpu(ci);
240 1.1 yamt tprof_buf_t *old;
241 1.1 yamt
242 1.1 yamt old = tprof_buf_switch(c, NULL);
243 1.1 yamt if (old != NULL) {
244 1.1 yamt tprof_buf_free(old);
245 1.1 yamt }
246 1.1 yamt callout_destroy(&c->c_callout);
247 1.1 yamt }
248 1.1 yamt workqueue_destroy(tprof_wq);
249 1.1 yamt }
250 1.1 yamt
251 1.1 yamt static int
252 1.1 yamt tprof_start(const struct tprof_param *param)
253 1.1 yamt {
254 1.1 yamt CPU_INFO_ITERATOR cii;
255 1.1 yamt struct cpu_info *ci;
256 1.1 yamt int error;
257 1.1 yamt uint64_t freq;
258 1.4 yamt tprof_backend_t *tb;
259 1.1 yamt
260 1.1 yamt KASSERT(mutex_owned(&tprof_startstop_lock));
261 1.1 yamt if (tprof_running) {
262 1.1 yamt error = EBUSY;
263 1.1 yamt goto done;
264 1.1 yamt }
265 1.1 yamt
266 1.4 yamt tb = tprof_backend;
267 1.4 yamt if (tb == NULL) {
268 1.4 yamt error = ENOENT;
269 1.4 yamt goto done;
270 1.4 yamt }
271 1.4 yamt if (tb->tb_usecount > 0) {
272 1.4 yamt error = EBUSY;
273 1.4 yamt goto done;
274 1.4 yamt }
275 1.4 yamt
276 1.4 yamt tb->tb_usecount++;
277 1.4 yamt freq = tb->tb_ops->tbo_estimate_freq();
278 1.1 yamt tprof_samples_per_buf = MIN(freq * 2, TPROF_MAX_SAMPLES_PER_BUF);
279 1.1 yamt
280 1.1 yamt error = workqueue_create(&tprof_wq, "tprofmv", tprof_worker, NULL,
281 1.2 yamt PRI_NONE, IPL_SOFTCLOCK, WQ_MPSAFE | WQ_PERCPU);
282 1.1 yamt if (error != 0) {
283 1.1 yamt goto done;
284 1.1 yamt }
285 1.1 yamt
286 1.1 yamt for (CPU_INFO_FOREACH(cii, ci)) {
287 1.1 yamt tprof_cpu_t * const c = tprof_cpu(ci);
288 1.1 yamt tprof_buf_t *new;
289 1.1 yamt tprof_buf_t *old;
290 1.1 yamt
291 1.1 yamt new = tprof_buf_alloc();
292 1.1 yamt old = tprof_buf_switch(c, new);
293 1.1 yamt if (old != NULL) {
294 1.1 yamt tprof_buf_free(old);
295 1.1 yamt }
296 1.1 yamt callout_init(&c->c_callout, CALLOUT_MPSAFE);
297 1.1 yamt callout_setfunc(&c->c_callout, tprof_kick, ci);
298 1.1 yamt }
299 1.1 yamt
300 1.4 yamt error = tb->tb_ops->tbo_start(NULL);
301 1.1 yamt if (error != 0) {
302 1.9 yamt KASSERT(tb->tb_usecount > 0);
303 1.9 yamt tb->tb_usecount--;
304 1.1 yamt tprof_stop1();
305 1.1 yamt goto done;
306 1.1 yamt }
307 1.1 yamt
308 1.1 yamt mutex_enter(&tprof_lock);
309 1.1 yamt tprof_running = true;
310 1.1 yamt mutex_exit(&tprof_lock);
311 1.1 yamt for (CPU_INFO_FOREACH(cii, ci)) {
312 1.1 yamt tprof_cpu_t * const c = tprof_cpu(ci);
313 1.1 yamt
314 1.1 yamt mutex_enter(&tprof_lock);
315 1.1 yamt tprof_nworker++;
316 1.1 yamt mutex_exit(&tprof_lock);
317 1.1 yamt workqueue_enqueue(tprof_wq, &c->c_work, ci);
318 1.1 yamt }
319 1.1 yamt done:
320 1.1 yamt return error;
321 1.1 yamt }
322 1.1 yamt
323 1.1 yamt static void
324 1.1 yamt tprof_stop(void)
325 1.1 yamt {
326 1.4 yamt tprof_backend_t *tb;
327 1.1 yamt
328 1.1 yamt KASSERT(mutex_owned(&tprof_startstop_lock));
329 1.1 yamt if (!tprof_running) {
330 1.1 yamt goto done;
331 1.1 yamt }
332 1.1 yamt
333 1.4 yamt tb = tprof_backend;
334 1.4 yamt KASSERT(tb->tb_usecount > 0);
335 1.4 yamt tb->tb_ops->tbo_stop(NULL);
336 1.4 yamt tb->tb_usecount--;
337 1.1 yamt
338 1.1 yamt mutex_enter(&tprof_lock);
339 1.1 yamt tprof_running = false;
340 1.1 yamt cv_broadcast(&tprof_reader_cv);
341 1.8 yamt while (tprof_nworker > 0) {
342 1.8 yamt cv_wait(&tprof_cv, &tprof_lock);
343 1.8 yamt }
344 1.1 yamt mutex_exit(&tprof_lock);
345 1.1 yamt
346 1.1 yamt tprof_stop1();
347 1.1 yamt done:
348 1.1 yamt ;
349 1.1 yamt }
350 1.1 yamt
351 1.4 yamt /*
352 1.4 yamt * tprof_clear: drain unread samples.
353 1.4 yamt */
354 1.4 yamt
355 1.1 yamt static void
356 1.1 yamt tprof_clear(void)
357 1.1 yamt {
358 1.1 yamt tprof_buf_t *buf;
359 1.1 yamt
360 1.1 yamt mutex_enter(&tprof_reader_lock);
361 1.1 yamt mutex_enter(&tprof_lock);
362 1.1 yamt while ((buf = STAILQ_FIRST(&tprof_list)) != NULL) {
363 1.1 yamt if (buf != NULL) {
364 1.1 yamt STAILQ_REMOVE_HEAD(&tprof_list, b_list);
365 1.1 yamt KASSERT(tprof_nbuf_on_list > 0);
366 1.1 yamt tprof_nbuf_on_list--;
367 1.1 yamt mutex_exit(&tprof_lock);
368 1.1 yamt tprof_buf_free(buf);
369 1.1 yamt mutex_enter(&tprof_lock);
370 1.1 yamt }
371 1.1 yamt }
372 1.1 yamt KASSERT(tprof_nbuf_on_list == 0);
373 1.1 yamt mutex_exit(&tprof_lock);
374 1.1 yamt tprof_reader_offset = 0;
375 1.1 yamt mutex_exit(&tprof_reader_lock);
376 1.1 yamt
377 1.1 yamt memset(&tprof_stat, 0, sizeof(tprof_stat));
378 1.1 yamt }
379 1.1 yamt
380 1.4 yamt static tprof_backend_t *
381 1.4 yamt tprof_backend_lookup(const char *name)
382 1.4 yamt {
383 1.4 yamt tprof_backend_t *tb;
384 1.4 yamt
385 1.4 yamt KASSERT(mutex_owned(&tprof_startstop_lock));
386 1.4 yamt
387 1.4 yamt LIST_FOREACH(tb, &tprof_backends, tb_list) {
388 1.4 yamt if (!strcmp(tb->tb_name, name)) {
389 1.4 yamt return tb;
390 1.4 yamt }
391 1.4 yamt }
392 1.4 yamt return NULL;
393 1.4 yamt }
394 1.4 yamt
395 1.1 yamt /* -------------------- backend interfaces */
396 1.1 yamt
397 1.1 yamt /*
398 1.1 yamt * tprof_sample: record a sample on the per-cpu buffer.
399 1.1 yamt *
400 1.1 yamt * be careful; can be called in NMI context.
401 1.10 yamt * we are bluntly assuming the followings are safe.
402 1.10 yamt * curcpu()
403 1.10 yamt * curlwp->l_lid
404 1.10 yamt * curlwp->l_proc->p_pid
405 1.1 yamt */
406 1.1 yamt
407 1.1 yamt void
408 1.5 yamt tprof_sample(tprof_backend_cookie_t *cookie, const tprof_frame_info_t *tfi)
409 1.1 yamt {
410 1.1 yamt tprof_cpu_t * const c = tprof_curcpu();
411 1.1 yamt tprof_buf_t * const buf = c->c_buf;
412 1.8 yamt tprof_sample_t *sp;
413 1.5 yamt const uintptr_t pc = tfi->tfi_pc;
414 1.10 yamt const lwp_t * const l = curlwp;
415 1.1 yamt u_int idx;
416 1.1 yamt
417 1.1 yamt idx = buf->b_used;
418 1.1 yamt if (__predict_false(idx >= buf->b_size)) {
419 1.1 yamt buf->b_overflow++;
420 1.1 yamt return;
421 1.1 yamt }
422 1.8 yamt sp = &buf->b_data[idx];
423 1.10 yamt sp->s_pid = l->l_proc->p_pid;
424 1.10 yamt sp->s_lwpid = l->l_lid;
425 1.10 yamt sp->s_cpuid = c->c_cpuid;
426 1.8 yamt sp->s_flags = (tfi->tfi_inkernel) ? TPROF_SAMPLE_INKERNEL : 0;
427 1.8 yamt sp->s_pc = pc;
428 1.1 yamt buf->b_used = idx + 1;
429 1.1 yamt }
430 1.1 yamt
431 1.4 yamt /*
432 1.4 yamt * tprof_backend_register:
433 1.4 yamt */
434 1.4 yamt
435 1.4 yamt int
436 1.4 yamt tprof_backend_register(const char *name, const tprof_backend_ops_t *ops,
437 1.4 yamt int vers)
438 1.4 yamt {
439 1.4 yamt tprof_backend_t *tb;
440 1.4 yamt
441 1.4 yamt if (vers != TPROF_BACKEND_VERSION) {
442 1.4 yamt return EINVAL;
443 1.4 yamt }
444 1.4 yamt
445 1.4 yamt mutex_enter(&tprof_startstop_lock);
446 1.4 yamt tb = tprof_backend_lookup(name);
447 1.4 yamt if (tb != NULL) {
448 1.4 yamt mutex_exit(&tprof_startstop_lock);
449 1.4 yamt return EEXIST;
450 1.4 yamt }
451 1.4 yamt #if 1 /* XXX for now */
452 1.4 yamt if (!LIST_EMPTY(&tprof_backends)) {
453 1.4 yamt mutex_exit(&tprof_startstop_lock);
454 1.4 yamt return ENOTSUP;
455 1.4 yamt }
456 1.4 yamt #endif
457 1.4 yamt tb = kmem_alloc(sizeof(*tb), KM_SLEEP);
458 1.4 yamt tb->tb_name = name;
459 1.4 yamt tb->tb_ops = ops;
460 1.4 yamt tb->tb_usecount = 0;
461 1.4 yamt LIST_INSERT_HEAD(&tprof_backends, tb, tb_list);
462 1.4 yamt #if 1 /* XXX for now */
463 1.4 yamt if (tprof_backend == NULL) {
464 1.4 yamt tprof_backend = tb;
465 1.4 yamt }
466 1.4 yamt #endif
467 1.4 yamt mutex_exit(&tprof_startstop_lock);
468 1.4 yamt
469 1.4 yamt return 0;
470 1.4 yamt }
471 1.4 yamt
472 1.4 yamt /*
473 1.4 yamt * tprof_backend_unregister:
474 1.4 yamt */
475 1.4 yamt
476 1.4 yamt int
477 1.4 yamt tprof_backend_unregister(const char *name)
478 1.4 yamt {
479 1.4 yamt tprof_backend_t *tb;
480 1.4 yamt
481 1.4 yamt mutex_enter(&tprof_startstop_lock);
482 1.4 yamt tb = tprof_backend_lookup(name);
483 1.4 yamt #if defined(DIAGNOSTIC)
484 1.4 yamt if (tb == NULL) {
485 1.4 yamt mutex_exit(&tprof_startstop_lock);
486 1.4 yamt panic("%s: not found '%s'", __func__, name);
487 1.4 yamt }
488 1.4 yamt #endif /* defined(DIAGNOSTIC) */
489 1.4 yamt if (tb->tb_usecount > 0) {
490 1.4 yamt mutex_exit(&tprof_startstop_lock);
491 1.4 yamt return EBUSY;
492 1.4 yamt }
493 1.4 yamt #if 1 /* XXX for now */
494 1.4 yamt if (tprof_backend == tb) {
495 1.4 yamt tprof_backend = NULL;
496 1.4 yamt }
497 1.4 yamt #endif
498 1.4 yamt LIST_REMOVE(tb, tb_list);
499 1.4 yamt mutex_exit(&tprof_startstop_lock);
500 1.4 yamt
501 1.4 yamt kmem_free(tb, sizeof(*tb));
502 1.4 yamt
503 1.4 yamt return 0;
504 1.4 yamt }
505 1.4 yamt
506 1.1 yamt /* -------------------- cdevsw interfaces */
507 1.1 yamt
508 1.1 yamt static int
509 1.1 yamt tprof_open(dev_t dev, int flags, int type, struct lwp *l)
510 1.1 yamt {
511 1.1 yamt
512 1.1 yamt if (minor(dev) != 0) {
513 1.1 yamt return EXDEV;
514 1.1 yamt }
515 1.1 yamt mutex_enter(&tprof_lock);
516 1.1 yamt if (tprof_owner != NULL) {
517 1.1 yamt mutex_exit(&tprof_lock);
518 1.1 yamt return EBUSY;
519 1.1 yamt }
520 1.1 yamt tprof_owner = curlwp;
521 1.1 yamt mutex_exit(&tprof_lock);
522 1.1 yamt
523 1.1 yamt return 0;
524 1.1 yamt }
525 1.1 yamt
526 1.1 yamt static int
527 1.1 yamt tprof_close(dev_t dev, int flags, int type, struct lwp *l)
528 1.1 yamt {
529 1.1 yamt
530 1.1 yamt KASSERT(minor(dev) == 0);
531 1.1 yamt
532 1.1 yamt mutex_enter(&tprof_startstop_lock);
533 1.1 yamt mutex_enter(&tprof_lock);
534 1.1 yamt tprof_owner = NULL;
535 1.1 yamt mutex_exit(&tprof_lock);
536 1.1 yamt tprof_stop();
537 1.1 yamt tprof_clear();
538 1.1 yamt mutex_exit(&tprof_startstop_lock);
539 1.1 yamt
540 1.1 yamt return 0;
541 1.1 yamt }
542 1.1 yamt
543 1.1 yamt static int
544 1.1 yamt tprof_read(dev_t dev, struct uio *uio, int flags)
545 1.1 yamt {
546 1.1 yamt tprof_buf_t *buf;
547 1.1 yamt size_t bytes;
548 1.1 yamt size_t resid;
549 1.1 yamt size_t done;
550 1.1 yamt int error = 0;
551 1.1 yamt
552 1.1 yamt KASSERT(minor(dev) == 0);
553 1.1 yamt mutex_enter(&tprof_reader_lock);
554 1.1 yamt while (uio->uio_resid > 0 && error == 0) {
555 1.1 yamt /*
556 1.1 yamt * take the first buffer from the list.
557 1.1 yamt */
558 1.1 yamt mutex_enter(&tprof_lock);
559 1.1 yamt buf = STAILQ_FIRST(&tprof_list);
560 1.1 yamt if (buf == NULL) {
561 1.1 yamt if (tprof_nworker == 0) {
562 1.1 yamt mutex_exit(&tprof_lock);
563 1.1 yamt error = 0;
564 1.1 yamt break;
565 1.1 yamt }
566 1.1 yamt mutex_exit(&tprof_reader_lock);
567 1.1 yamt error = cv_wait_sig(&tprof_reader_cv, &tprof_lock);
568 1.1 yamt mutex_exit(&tprof_lock);
569 1.1 yamt mutex_enter(&tprof_reader_lock);
570 1.1 yamt continue;
571 1.1 yamt }
572 1.1 yamt STAILQ_REMOVE_HEAD(&tprof_list, b_list);
573 1.1 yamt KASSERT(tprof_nbuf_on_list > 0);
574 1.1 yamt tprof_nbuf_on_list--;
575 1.1 yamt mutex_exit(&tprof_lock);
576 1.1 yamt
577 1.1 yamt /*
578 1.1 yamt * copy it out.
579 1.1 yamt */
580 1.1 yamt bytes = MIN(buf->b_used * sizeof(tprof_sample_t) -
581 1.1 yamt tprof_reader_offset, uio->uio_resid);
582 1.1 yamt resid = uio->uio_resid;
583 1.1 yamt error = uiomove((char *)buf->b_data + tprof_reader_offset,
584 1.1 yamt bytes, uio);
585 1.1 yamt done = resid - uio->uio_resid;
586 1.1 yamt tprof_reader_offset += done;
587 1.1 yamt
588 1.1 yamt /*
589 1.1 yamt * if we didn't consume the whole buffer,
590 1.1 yamt * put it back to the list.
591 1.1 yamt */
592 1.1 yamt if (tprof_reader_offset <
593 1.1 yamt buf->b_used * sizeof(tprof_sample_t)) {
594 1.1 yamt mutex_enter(&tprof_lock);
595 1.1 yamt STAILQ_INSERT_HEAD(&tprof_list, buf, b_list);
596 1.1 yamt tprof_nbuf_on_list++;
597 1.1 yamt cv_broadcast(&tprof_reader_cv);
598 1.1 yamt mutex_exit(&tprof_lock);
599 1.1 yamt } else {
600 1.1 yamt tprof_buf_free(buf);
601 1.1 yamt tprof_reader_offset = 0;
602 1.1 yamt }
603 1.1 yamt }
604 1.1 yamt mutex_exit(&tprof_reader_lock);
605 1.1 yamt
606 1.1 yamt return error;
607 1.1 yamt }
608 1.1 yamt
609 1.1 yamt static int
610 1.1 yamt tprof_ioctl(dev_t dev, u_long cmd, void *data, int flags, struct lwp *l)
611 1.1 yamt {
612 1.1 yamt const struct tprof_param *param;
613 1.1 yamt int error = 0;
614 1.1 yamt
615 1.1 yamt KASSERT(minor(dev) == 0);
616 1.1 yamt
617 1.1 yamt switch (cmd) {
618 1.1 yamt case TPROF_IOC_GETVERSION:
619 1.1 yamt *(int *)data = TPROF_VERSION;
620 1.1 yamt break;
621 1.1 yamt case TPROF_IOC_START:
622 1.1 yamt param = data;
623 1.1 yamt mutex_enter(&tprof_startstop_lock);
624 1.1 yamt error = tprof_start(param);
625 1.1 yamt mutex_exit(&tprof_startstop_lock);
626 1.1 yamt break;
627 1.1 yamt case TPROF_IOC_STOP:
628 1.1 yamt mutex_enter(&tprof_startstop_lock);
629 1.1 yamt tprof_stop();
630 1.1 yamt mutex_exit(&tprof_startstop_lock);
631 1.1 yamt break;
632 1.1 yamt case TPROF_IOC_GETSTAT:
633 1.1 yamt mutex_enter(&tprof_lock);
634 1.1 yamt memcpy(data, &tprof_stat, sizeof(tprof_stat));
635 1.1 yamt mutex_exit(&tprof_lock);
636 1.1 yamt break;
637 1.1 yamt default:
638 1.1 yamt error = EINVAL;
639 1.1 yamt break;
640 1.1 yamt }
641 1.1 yamt
642 1.1 yamt return error;
643 1.1 yamt }
644 1.1 yamt
645 1.1 yamt const struct cdevsw tprof_cdevsw = {
646 1.13.8.1 pgoyette DEVSW_MODULE_INIT
647 1.1 yamt .d_open = tprof_open,
648 1.1 yamt .d_close = tprof_close,
649 1.1 yamt .d_read = tprof_read,
650 1.1 yamt .d_write = nowrite,
651 1.1 yamt .d_ioctl = tprof_ioctl,
652 1.1 yamt .d_stop = nostop,
653 1.1 yamt .d_tty = notty,
654 1.1 yamt .d_poll = nopoll,
655 1.1 yamt .d_mmap = nommap,
656 1.1 yamt .d_kqfilter = nokqfilter,
657 1.12 dholland .d_discard = nodiscard,
658 1.11 dholland .d_flag = D_OTHER | D_MPSAFE
659 1.1 yamt };
660 1.1 yamt
661 1.1 yamt void
662 1.1 yamt tprofattach(int nunits)
663 1.1 yamt {
664 1.1 yamt
665 1.4 yamt /* nothing */
666 1.4 yamt }
667 1.4 yamt
668 1.4 yamt MODULE(MODULE_CLASS_DRIVER, tprof, NULL);
669 1.4 yamt
670 1.4 yamt static void
671 1.4 yamt tprof_driver_init(void)
672 1.4 yamt {
673 1.10 yamt unsigned int i;
674 1.4 yamt
675 1.1 yamt mutex_init(&tprof_lock, MUTEX_DEFAULT, IPL_NONE);
676 1.1 yamt mutex_init(&tprof_reader_lock, MUTEX_DEFAULT, IPL_NONE);
677 1.1 yamt mutex_init(&tprof_startstop_lock, MUTEX_DEFAULT, IPL_NONE);
678 1.1 yamt cv_init(&tprof_cv, "tprof");
679 1.7 pgoyette cv_init(&tprof_reader_cv, "tprof_rd");
680 1.1 yamt STAILQ_INIT(&tprof_list);
681 1.10 yamt for (i = 0; i < __arraycount(tprof_cpus); i++) {
682 1.10 yamt tprof_cpu_t * const c = &tprof_cpus[i];
683 1.10 yamt
684 1.10 yamt c->c_buf = NULL;
685 1.10 yamt c->c_cpuid = i;
686 1.10 yamt }
687 1.1 yamt }
688 1.4 yamt
689 1.4 yamt static void
690 1.4 yamt tprof_driver_fini(void)
691 1.4 yamt {
692 1.4 yamt
693 1.4 yamt mutex_destroy(&tprof_lock);
694 1.4 yamt mutex_destroy(&tprof_reader_lock);
695 1.4 yamt mutex_destroy(&tprof_startstop_lock);
696 1.4 yamt cv_destroy(&tprof_cv);
697 1.4 yamt cv_destroy(&tprof_reader_cv);
698 1.4 yamt }
699 1.4 yamt
700 1.4 yamt static int
701 1.4 yamt tprof_modcmd(modcmd_t cmd, void *arg)
702 1.4 yamt {
703 1.4 yamt
704 1.4 yamt switch (cmd) {
705 1.4 yamt case MODULE_CMD_INIT:
706 1.4 yamt tprof_driver_init();
707 1.4 yamt #if defined(_MODULE)
708 1.4 yamt {
709 1.4 yamt devmajor_t bmajor = NODEVMAJOR;
710 1.4 yamt devmajor_t cmajor = NODEVMAJOR;
711 1.4 yamt int error;
712 1.4 yamt
713 1.4 yamt error = devsw_attach("tprof", NULL, &bmajor,
714 1.4 yamt &tprof_cdevsw, &cmajor);
715 1.4 yamt if (error) {
716 1.4 yamt tprof_driver_fini();
717 1.4 yamt return error;
718 1.4 yamt }
719 1.4 yamt }
720 1.4 yamt #endif /* defined(_MODULE) */
721 1.4 yamt return 0;
722 1.4 yamt
723 1.4 yamt case MODULE_CMD_FINI:
724 1.4 yamt #if defined(_MODULE)
725 1.4 yamt {
726 1.4 yamt int error;
727 1.4 yamt error = devsw_detach(NULL, &tprof_cdevsw);
728 1.4 yamt if (error) {
729 1.4 yamt return error;
730 1.4 yamt }
731 1.4 yamt }
732 1.4 yamt #endif /* defined(_MODULE) */
733 1.4 yamt tprof_driver_fini();
734 1.4 yamt return 0;
735 1.4 yamt
736 1.4 yamt default:
737 1.4 yamt return ENOTTY;
738 1.4 yamt }
739 1.4 yamt }
740