tprof.c revision 1.9 1 1.9 yamt /* $NetBSD: tprof.c,v 1.9 2011/02/25 22:35:38 yamt Exp $ */
2 1.1 yamt
3 1.1 yamt /*-
4 1.8 yamt * Copyright (c)2008,2009,2010 YAMAMOTO Takashi,
5 1.1 yamt * All rights reserved.
6 1.1 yamt *
7 1.1 yamt * Redistribution and use in source and binary forms, with or without
8 1.1 yamt * modification, are permitted provided that the following conditions
9 1.1 yamt * are met:
10 1.1 yamt * 1. Redistributions of source code must retain the above copyright
11 1.1 yamt * notice, this list of conditions and the following disclaimer.
12 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 yamt * notice, this list of conditions and the following disclaimer in the
14 1.1 yamt * documentation and/or other materials provided with the distribution.
15 1.1 yamt *
16 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.1 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.1 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.1 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.1 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.1 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.1 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1 yamt * SUCH DAMAGE.
27 1.1 yamt */
28 1.1 yamt
29 1.1 yamt #include <sys/cdefs.h>
30 1.9 yamt __KERNEL_RCSID(0, "$NetBSD: tprof.c,v 1.9 2011/02/25 22:35:38 yamt Exp $");
31 1.1 yamt
32 1.1 yamt #include <sys/param.h>
33 1.1 yamt #include <sys/systm.h>
34 1.1 yamt #include <sys/kernel.h>
35 1.1 yamt
36 1.1 yamt #include <sys/cpu.h>
37 1.1 yamt #include <sys/conf.h>
38 1.1 yamt #include <sys/callout.h>
39 1.1 yamt #include <sys/kmem.h>
40 1.4 yamt #include <sys/module.h>
41 1.8 yamt #include <sys/proc.h>
42 1.1 yamt #include <sys/workqueue.h>
43 1.1 yamt #include <sys/queue.h>
44 1.1 yamt
45 1.1 yamt #include <dev/tprof/tprof.h>
46 1.1 yamt #include <dev/tprof/tprof_ioctl.h>
47 1.1 yamt
48 1.4 yamt /*
49 1.4 yamt * locking order:
50 1.4 yamt * tprof_reader_lock -> tprof_lock
51 1.4 yamt * tprof_startstop_lock -> tprof_lock
52 1.4 yamt */
53 1.4 yamt
54 1.4 yamt /*
55 1.4 yamt * protected by:
56 1.4 yamt * L: tprof_lock
57 1.4 yamt * R: tprof_reader_lock
58 1.4 yamt * S: tprof_startstop_lock
59 1.8 yamt * s: writer should hold tprof_startstop_lock and tprof_lock
60 1.8 yamt * reader should hold tprof_startstop_lock or tprof_lock
61 1.4 yamt */
62 1.4 yamt
63 1.1 yamt typedef struct tprof_buf {
64 1.1 yamt u_int b_used;
65 1.1 yamt u_int b_size;
66 1.1 yamt u_int b_overflow;
67 1.1 yamt u_int b_unused;
68 1.1 yamt STAILQ_ENTRY(tprof_buf) b_list;
69 1.1 yamt tprof_sample_t b_data[];
70 1.1 yamt } tprof_buf_t;
71 1.1 yamt #define TPROF_BUF_BYTESIZE(sz) \
72 1.1 yamt (sizeof(tprof_buf_t) + (sz) * sizeof(tprof_sample_t))
73 1.1 yamt #define TPROF_MAX_SAMPLES_PER_BUF 10000
74 1.1 yamt
75 1.1 yamt #define TPROF_MAX_BUF 100
76 1.1 yamt
77 1.1 yamt typedef struct {
78 1.1 yamt tprof_buf_t *c_buf;
79 1.1 yamt struct work c_work;
80 1.1 yamt callout_t c_callout;
81 1.1 yamt } __aligned(CACHE_LINE_SIZE) tprof_cpu_t;
82 1.1 yamt
83 1.4 yamt typedef struct tprof_backend {
84 1.4 yamt const char *tb_name;
85 1.4 yamt const tprof_backend_ops_t *tb_ops;
86 1.4 yamt LIST_ENTRY(tprof_backend) tb_list;
87 1.4 yamt int tb_usecount; /* S: */
88 1.4 yamt } tprof_backend_t;
89 1.3 yamt
90 1.1 yamt static kmutex_t tprof_lock;
91 1.8 yamt static bool tprof_running; /* s: */
92 1.4 yamt static u_int tprof_nworker; /* L: # of running worker LWPs */
93 1.1 yamt static lwp_t *tprof_owner;
94 1.4 yamt static STAILQ_HEAD(, tprof_buf) tprof_list; /* L: global buffer list */
95 1.4 yamt static u_int tprof_nbuf_on_list; /* L: # of buffers on tprof_list */
96 1.1 yamt static struct workqueue *tprof_wq;
97 1.1 yamt static tprof_cpu_t tprof_cpus[MAXCPUS] __aligned(CACHE_LINE_SIZE);
98 1.1 yamt static u_int tprof_samples_per_buf;
99 1.1 yamt
100 1.4 yamt static tprof_backend_t *tprof_backend; /* S: */
101 1.4 yamt static LIST_HEAD(, tprof_backend) tprof_backends =
102 1.4 yamt LIST_HEAD_INITIALIZER(tprof_backend); /* S: */
103 1.4 yamt
104 1.1 yamt static kmutex_t tprof_reader_lock;
105 1.4 yamt static kcondvar_t tprof_reader_cv; /* L: */
106 1.4 yamt static off_t tprof_reader_offset; /* R: */
107 1.1 yamt
108 1.1 yamt static kmutex_t tprof_startstop_lock;
109 1.4 yamt static kcondvar_t tprof_cv; /* L: */
110 1.1 yamt
111 1.4 yamt static struct tprof_stat tprof_stat; /* L: */
112 1.1 yamt
113 1.1 yamt static tprof_cpu_t *
114 1.1 yamt tprof_cpu(struct cpu_info *ci)
115 1.1 yamt {
116 1.1 yamt
117 1.1 yamt return &tprof_cpus[cpu_index(ci)];
118 1.1 yamt }
119 1.1 yamt
120 1.1 yamt static tprof_cpu_t *
121 1.1 yamt tprof_curcpu(void)
122 1.1 yamt {
123 1.1 yamt
124 1.1 yamt return tprof_cpu(curcpu());
125 1.1 yamt }
126 1.1 yamt
127 1.1 yamt static tprof_buf_t *
128 1.1 yamt tprof_buf_alloc(void)
129 1.1 yamt {
130 1.1 yamt tprof_buf_t *new;
131 1.1 yamt u_int size = tprof_samples_per_buf;
132 1.1 yamt
133 1.1 yamt new = kmem_alloc(TPROF_BUF_BYTESIZE(size), KM_SLEEP);
134 1.1 yamt new->b_used = 0;
135 1.1 yamt new->b_size = size;
136 1.1 yamt new->b_overflow = 0;
137 1.1 yamt return new;
138 1.1 yamt }
139 1.1 yamt
140 1.1 yamt static void
141 1.1 yamt tprof_buf_free(tprof_buf_t *buf)
142 1.1 yamt {
143 1.1 yamt
144 1.1 yamt kmem_free(buf, TPROF_BUF_BYTESIZE(buf->b_size));
145 1.1 yamt }
146 1.1 yamt
147 1.1 yamt static tprof_buf_t *
148 1.1 yamt tprof_buf_switch(tprof_cpu_t *c, tprof_buf_t *new)
149 1.1 yamt {
150 1.1 yamt tprof_buf_t *old;
151 1.1 yamt
152 1.1 yamt old = c->c_buf;
153 1.1 yamt c->c_buf = new;
154 1.1 yamt return old;
155 1.1 yamt }
156 1.1 yamt
157 1.1 yamt static tprof_buf_t *
158 1.1 yamt tprof_buf_refresh(void)
159 1.1 yamt {
160 1.1 yamt tprof_cpu_t * const c = tprof_curcpu();
161 1.1 yamt tprof_buf_t *new;
162 1.1 yamt
163 1.1 yamt new = tprof_buf_alloc();
164 1.1 yamt return tprof_buf_switch(c, new);
165 1.1 yamt }
166 1.1 yamt
167 1.1 yamt static void
168 1.1 yamt tprof_worker(struct work *wk, void *dummy)
169 1.1 yamt {
170 1.1 yamt tprof_cpu_t * const c = tprof_curcpu();
171 1.1 yamt tprof_buf_t *buf;
172 1.1 yamt bool shouldstop;
173 1.1 yamt
174 1.1 yamt KASSERT(wk == &c->c_work);
175 1.1 yamt KASSERT(dummy == NULL);
176 1.1 yamt
177 1.1 yamt /*
178 1.1 yamt * get a per cpu buffer.
179 1.1 yamt */
180 1.1 yamt buf = tprof_buf_refresh();
181 1.1 yamt
182 1.1 yamt /*
183 1.1 yamt * and put it on the global list for read(2).
184 1.1 yamt */
185 1.1 yamt mutex_enter(&tprof_lock);
186 1.1 yamt shouldstop = !tprof_running;
187 1.1 yamt if (shouldstop) {
188 1.1 yamt KASSERT(tprof_nworker > 0);
189 1.1 yamt tprof_nworker--;
190 1.1 yamt cv_broadcast(&tprof_cv);
191 1.1 yamt cv_broadcast(&tprof_reader_cv);
192 1.1 yamt }
193 1.1 yamt if (buf->b_used == 0) {
194 1.1 yamt tprof_stat.ts_emptybuf++;
195 1.1 yamt } else if (tprof_nbuf_on_list < TPROF_MAX_BUF) {
196 1.1 yamt tprof_stat.ts_sample += buf->b_used;
197 1.1 yamt tprof_stat.ts_overflow += buf->b_overflow;
198 1.1 yamt tprof_stat.ts_buf++;
199 1.1 yamt STAILQ_INSERT_TAIL(&tprof_list, buf, b_list);
200 1.1 yamt tprof_nbuf_on_list++;
201 1.1 yamt buf = NULL;
202 1.1 yamt cv_broadcast(&tprof_reader_cv);
203 1.1 yamt } else {
204 1.1 yamt tprof_stat.ts_dropbuf_sample += buf->b_used;
205 1.1 yamt tprof_stat.ts_dropbuf++;
206 1.1 yamt }
207 1.1 yamt mutex_exit(&tprof_lock);
208 1.1 yamt if (buf) {
209 1.1 yamt tprof_buf_free(buf);
210 1.1 yamt }
211 1.1 yamt if (!shouldstop) {
212 1.1 yamt callout_schedule(&c->c_callout, hz);
213 1.1 yamt }
214 1.1 yamt }
215 1.1 yamt
216 1.1 yamt static void
217 1.1 yamt tprof_kick(void *vp)
218 1.1 yamt {
219 1.1 yamt struct cpu_info * const ci = vp;
220 1.1 yamt tprof_cpu_t * const c = tprof_cpu(ci);
221 1.1 yamt
222 1.1 yamt workqueue_enqueue(tprof_wq, &c->c_work, ci);
223 1.1 yamt }
224 1.1 yamt
225 1.1 yamt static void
226 1.1 yamt tprof_stop1(void)
227 1.1 yamt {
228 1.1 yamt CPU_INFO_ITERATOR cii;
229 1.1 yamt struct cpu_info *ci;
230 1.1 yamt
231 1.1 yamt KASSERT(mutex_owned(&tprof_startstop_lock));
232 1.6 yamt KASSERT(tprof_nworker == 0);
233 1.1 yamt
234 1.1 yamt for (CPU_INFO_FOREACH(cii, ci)) {
235 1.1 yamt tprof_cpu_t * const c = tprof_cpu(ci);
236 1.1 yamt tprof_buf_t *old;
237 1.1 yamt
238 1.1 yamt old = tprof_buf_switch(c, NULL);
239 1.1 yamt if (old != NULL) {
240 1.1 yamt tprof_buf_free(old);
241 1.1 yamt }
242 1.1 yamt callout_destroy(&c->c_callout);
243 1.1 yamt }
244 1.1 yamt workqueue_destroy(tprof_wq);
245 1.1 yamt }
246 1.1 yamt
247 1.1 yamt static int
248 1.1 yamt tprof_start(const struct tprof_param *param)
249 1.1 yamt {
250 1.1 yamt CPU_INFO_ITERATOR cii;
251 1.1 yamt struct cpu_info *ci;
252 1.1 yamt int error;
253 1.1 yamt uint64_t freq;
254 1.4 yamt tprof_backend_t *tb;
255 1.1 yamt
256 1.1 yamt KASSERT(mutex_owned(&tprof_startstop_lock));
257 1.1 yamt if (tprof_running) {
258 1.1 yamt error = EBUSY;
259 1.1 yamt goto done;
260 1.1 yamt }
261 1.1 yamt
262 1.4 yamt tb = tprof_backend;
263 1.4 yamt if (tb == NULL) {
264 1.4 yamt error = ENOENT;
265 1.4 yamt goto done;
266 1.4 yamt }
267 1.4 yamt if (tb->tb_usecount > 0) {
268 1.4 yamt error = EBUSY;
269 1.4 yamt goto done;
270 1.4 yamt }
271 1.4 yamt
272 1.4 yamt tb->tb_usecount++;
273 1.4 yamt freq = tb->tb_ops->tbo_estimate_freq();
274 1.1 yamt tprof_samples_per_buf = MIN(freq * 2, TPROF_MAX_SAMPLES_PER_BUF);
275 1.1 yamt
276 1.1 yamt error = workqueue_create(&tprof_wq, "tprofmv", tprof_worker, NULL,
277 1.2 yamt PRI_NONE, IPL_SOFTCLOCK, WQ_MPSAFE | WQ_PERCPU);
278 1.1 yamt if (error != 0) {
279 1.1 yamt goto done;
280 1.1 yamt }
281 1.1 yamt
282 1.1 yamt for (CPU_INFO_FOREACH(cii, ci)) {
283 1.1 yamt tprof_cpu_t * const c = tprof_cpu(ci);
284 1.1 yamt tprof_buf_t *new;
285 1.1 yamt tprof_buf_t *old;
286 1.1 yamt
287 1.1 yamt new = tprof_buf_alloc();
288 1.1 yamt old = tprof_buf_switch(c, new);
289 1.1 yamt if (old != NULL) {
290 1.1 yamt tprof_buf_free(old);
291 1.1 yamt }
292 1.1 yamt callout_init(&c->c_callout, CALLOUT_MPSAFE);
293 1.1 yamt callout_setfunc(&c->c_callout, tprof_kick, ci);
294 1.1 yamt }
295 1.1 yamt
296 1.4 yamt error = tb->tb_ops->tbo_start(NULL);
297 1.1 yamt if (error != 0) {
298 1.9 yamt KASSERT(tb->tb_usecount > 0);
299 1.9 yamt tb->tb_usecount--;
300 1.1 yamt tprof_stop1();
301 1.1 yamt goto done;
302 1.1 yamt }
303 1.1 yamt
304 1.1 yamt mutex_enter(&tprof_lock);
305 1.1 yamt tprof_running = true;
306 1.1 yamt mutex_exit(&tprof_lock);
307 1.1 yamt for (CPU_INFO_FOREACH(cii, ci)) {
308 1.1 yamt tprof_cpu_t * const c = tprof_cpu(ci);
309 1.1 yamt
310 1.1 yamt mutex_enter(&tprof_lock);
311 1.1 yamt tprof_nworker++;
312 1.1 yamt mutex_exit(&tprof_lock);
313 1.1 yamt workqueue_enqueue(tprof_wq, &c->c_work, ci);
314 1.1 yamt }
315 1.1 yamt done:
316 1.1 yamt return error;
317 1.1 yamt }
318 1.1 yamt
319 1.1 yamt static void
320 1.1 yamt tprof_stop(void)
321 1.1 yamt {
322 1.4 yamt tprof_backend_t *tb;
323 1.1 yamt
324 1.1 yamt KASSERT(mutex_owned(&tprof_startstop_lock));
325 1.1 yamt if (!tprof_running) {
326 1.1 yamt goto done;
327 1.1 yamt }
328 1.1 yamt
329 1.4 yamt tb = tprof_backend;
330 1.4 yamt KASSERT(tb->tb_usecount > 0);
331 1.4 yamt tb->tb_ops->tbo_stop(NULL);
332 1.4 yamt tb->tb_usecount--;
333 1.1 yamt
334 1.1 yamt mutex_enter(&tprof_lock);
335 1.1 yamt tprof_running = false;
336 1.1 yamt cv_broadcast(&tprof_reader_cv);
337 1.8 yamt while (tprof_nworker > 0) {
338 1.8 yamt cv_wait(&tprof_cv, &tprof_lock);
339 1.8 yamt }
340 1.1 yamt mutex_exit(&tprof_lock);
341 1.1 yamt
342 1.1 yamt tprof_stop1();
343 1.1 yamt done:
344 1.1 yamt ;
345 1.1 yamt }
346 1.1 yamt
347 1.4 yamt /*
348 1.4 yamt * tprof_clear: drain unread samples.
349 1.4 yamt */
350 1.4 yamt
351 1.1 yamt static void
352 1.1 yamt tprof_clear(void)
353 1.1 yamt {
354 1.1 yamt tprof_buf_t *buf;
355 1.1 yamt
356 1.1 yamt mutex_enter(&tprof_reader_lock);
357 1.1 yamt mutex_enter(&tprof_lock);
358 1.1 yamt while ((buf = STAILQ_FIRST(&tprof_list)) != NULL) {
359 1.1 yamt if (buf != NULL) {
360 1.1 yamt STAILQ_REMOVE_HEAD(&tprof_list, b_list);
361 1.1 yamt KASSERT(tprof_nbuf_on_list > 0);
362 1.1 yamt tprof_nbuf_on_list--;
363 1.1 yamt mutex_exit(&tprof_lock);
364 1.1 yamt tprof_buf_free(buf);
365 1.1 yamt mutex_enter(&tprof_lock);
366 1.1 yamt }
367 1.1 yamt }
368 1.1 yamt KASSERT(tprof_nbuf_on_list == 0);
369 1.1 yamt mutex_exit(&tprof_lock);
370 1.1 yamt tprof_reader_offset = 0;
371 1.1 yamt mutex_exit(&tprof_reader_lock);
372 1.1 yamt
373 1.1 yamt memset(&tprof_stat, 0, sizeof(tprof_stat));
374 1.1 yamt }
375 1.1 yamt
376 1.4 yamt static tprof_backend_t *
377 1.4 yamt tprof_backend_lookup(const char *name)
378 1.4 yamt {
379 1.4 yamt tprof_backend_t *tb;
380 1.4 yamt
381 1.4 yamt KASSERT(mutex_owned(&tprof_startstop_lock));
382 1.4 yamt
383 1.4 yamt LIST_FOREACH(tb, &tprof_backends, tb_list) {
384 1.4 yamt if (!strcmp(tb->tb_name, name)) {
385 1.4 yamt return tb;
386 1.4 yamt }
387 1.4 yamt }
388 1.4 yamt return NULL;
389 1.4 yamt }
390 1.4 yamt
391 1.1 yamt /* -------------------- backend interfaces */
392 1.1 yamt
393 1.1 yamt /*
394 1.1 yamt * tprof_sample: record a sample on the per-cpu buffer.
395 1.1 yamt *
396 1.1 yamt * be careful; can be called in NMI context.
397 1.8 yamt * we are bluntly assuming that curcpu() and curlwp->l_proc->p_pid are safe.
398 1.1 yamt */
399 1.1 yamt
400 1.1 yamt void
401 1.5 yamt tprof_sample(tprof_backend_cookie_t *cookie, const tprof_frame_info_t *tfi)
402 1.1 yamt {
403 1.1 yamt tprof_cpu_t * const c = tprof_curcpu();
404 1.1 yamt tprof_buf_t * const buf = c->c_buf;
405 1.8 yamt tprof_sample_t *sp;
406 1.5 yamt const uintptr_t pc = tfi->tfi_pc;
407 1.1 yamt u_int idx;
408 1.1 yamt
409 1.1 yamt idx = buf->b_used;
410 1.1 yamt if (__predict_false(idx >= buf->b_size)) {
411 1.1 yamt buf->b_overflow++;
412 1.1 yamt return;
413 1.1 yamt }
414 1.8 yamt sp = &buf->b_data[idx];
415 1.8 yamt sp->s_pid = curlwp->l_proc->p_pid;
416 1.8 yamt sp->s_flags = (tfi->tfi_inkernel) ? TPROF_SAMPLE_INKERNEL : 0;
417 1.8 yamt sp->s_pc = pc;
418 1.1 yamt buf->b_used = idx + 1;
419 1.1 yamt }
420 1.1 yamt
421 1.4 yamt /*
422 1.4 yamt * tprof_backend_register:
423 1.4 yamt */
424 1.4 yamt
425 1.4 yamt int
426 1.4 yamt tprof_backend_register(const char *name, const tprof_backend_ops_t *ops,
427 1.4 yamt int vers)
428 1.4 yamt {
429 1.4 yamt tprof_backend_t *tb;
430 1.4 yamt
431 1.4 yamt if (vers != TPROF_BACKEND_VERSION) {
432 1.4 yamt return EINVAL;
433 1.4 yamt }
434 1.4 yamt
435 1.4 yamt mutex_enter(&tprof_startstop_lock);
436 1.4 yamt tb = tprof_backend_lookup(name);
437 1.4 yamt if (tb != NULL) {
438 1.4 yamt mutex_exit(&tprof_startstop_lock);
439 1.4 yamt return EEXIST;
440 1.4 yamt }
441 1.4 yamt #if 1 /* XXX for now */
442 1.4 yamt if (!LIST_EMPTY(&tprof_backends)) {
443 1.4 yamt mutex_exit(&tprof_startstop_lock);
444 1.4 yamt return ENOTSUP;
445 1.4 yamt }
446 1.4 yamt #endif
447 1.4 yamt tb = kmem_alloc(sizeof(*tb), KM_SLEEP);
448 1.4 yamt tb->tb_name = name;
449 1.4 yamt tb->tb_ops = ops;
450 1.4 yamt tb->tb_usecount = 0;
451 1.4 yamt LIST_INSERT_HEAD(&tprof_backends, tb, tb_list);
452 1.4 yamt #if 1 /* XXX for now */
453 1.4 yamt if (tprof_backend == NULL) {
454 1.4 yamt tprof_backend = tb;
455 1.4 yamt }
456 1.4 yamt #endif
457 1.4 yamt mutex_exit(&tprof_startstop_lock);
458 1.4 yamt
459 1.4 yamt return 0;
460 1.4 yamt }
461 1.4 yamt
462 1.4 yamt /*
463 1.4 yamt * tprof_backend_unregister:
464 1.4 yamt */
465 1.4 yamt
466 1.4 yamt int
467 1.4 yamt tprof_backend_unregister(const char *name)
468 1.4 yamt {
469 1.4 yamt tprof_backend_t *tb;
470 1.4 yamt
471 1.4 yamt mutex_enter(&tprof_startstop_lock);
472 1.4 yamt tb = tprof_backend_lookup(name);
473 1.4 yamt #if defined(DIAGNOSTIC)
474 1.4 yamt if (tb == NULL) {
475 1.4 yamt mutex_exit(&tprof_startstop_lock);
476 1.4 yamt panic("%s: not found '%s'", __func__, name);
477 1.4 yamt }
478 1.4 yamt #endif /* defined(DIAGNOSTIC) */
479 1.4 yamt if (tb->tb_usecount > 0) {
480 1.4 yamt mutex_exit(&tprof_startstop_lock);
481 1.4 yamt return EBUSY;
482 1.4 yamt }
483 1.4 yamt #if 1 /* XXX for now */
484 1.4 yamt if (tprof_backend == tb) {
485 1.4 yamt tprof_backend = NULL;
486 1.4 yamt }
487 1.4 yamt #endif
488 1.4 yamt LIST_REMOVE(tb, tb_list);
489 1.4 yamt mutex_exit(&tprof_startstop_lock);
490 1.4 yamt
491 1.4 yamt kmem_free(tb, sizeof(*tb));
492 1.4 yamt
493 1.4 yamt return 0;
494 1.4 yamt }
495 1.4 yamt
496 1.1 yamt /* -------------------- cdevsw interfaces */
497 1.1 yamt
498 1.1 yamt void tprofattach(int);
499 1.1 yamt
500 1.1 yamt static int
501 1.1 yamt tprof_open(dev_t dev, int flags, int type, struct lwp *l)
502 1.1 yamt {
503 1.1 yamt
504 1.1 yamt if (minor(dev) != 0) {
505 1.1 yamt return EXDEV;
506 1.1 yamt }
507 1.1 yamt mutex_enter(&tprof_lock);
508 1.1 yamt if (tprof_owner != NULL) {
509 1.1 yamt mutex_exit(&tprof_lock);
510 1.1 yamt return EBUSY;
511 1.1 yamt }
512 1.1 yamt tprof_owner = curlwp;
513 1.1 yamt mutex_exit(&tprof_lock);
514 1.1 yamt
515 1.1 yamt return 0;
516 1.1 yamt }
517 1.1 yamt
518 1.1 yamt static int
519 1.1 yamt tprof_close(dev_t dev, int flags, int type, struct lwp *l)
520 1.1 yamt {
521 1.1 yamt
522 1.1 yamt KASSERT(minor(dev) == 0);
523 1.1 yamt
524 1.1 yamt mutex_enter(&tprof_startstop_lock);
525 1.1 yamt mutex_enter(&tprof_lock);
526 1.1 yamt tprof_owner = NULL;
527 1.1 yamt mutex_exit(&tprof_lock);
528 1.1 yamt tprof_stop();
529 1.1 yamt tprof_clear();
530 1.1 yamt mutex_exit(&tprof_startstop_lock);
531 1.1 yamt
532 1.1 yamt return 0;
533 1.1 yamt }
534 1.1 yamt
535 1.1 yamt static int
536 1.1 yamt tprof_read(dev_t dev, struct uio *uio, int flags)
537 1.1 yamt {
538 1.1 yamt tprof_buf_t *buf;
539 1.1 yamt size_t bytes;
540 1.1 yamt size_t resid;
541 1.1 yamt size_t done;
542 1.1 yamt int error = 0;
543 1.1 yamt
544 1.1 yamt KASSERT(minor(dev) == 0);
545 1.1 yamt mutex_enter(&tprof_reader_lock);
546 1.1 yamt while (uio->uio_resid > 0 && error == 0) {
547 1.1 yamt /*
548 1.1 yamt * take the first buffer from the list.
549 1.1 yamt */
550 1.1 yamt mutex_enter(&tprof_lock);
551 1.1 yamt buf = STAILQ_FIRST(&tprof_list);
552 1.1 yamt if (buf == NULL) {
553 1.1 yamt if (tprof_nworker == 0) {
554 1.1 yamt mutex_exit(&tprof_lock);
555 1.1 yamt error = 0;
556 1.1 yamt break;
557 1.1 yamt }
558 1.1 yamt mutex_exit(&tprof_reader_lock);
559 1.1 yamt error = cv_wait_sig(&tprof_reader_cv, &tprof_lock);
560 1.1 yamt mutex_exit(&tprof_lock);
561 1.1 yamt mutex_enter(&tprof_reader_lock);
562 1.1 yamt continue;
563 1.1 yamt }
564 1.1 yamt STAILQ_REMOVE_HEAD(&tprof_list, b_list);
565 1.1 yamt KASSERT(tprof_nbuf_on_list > 0);
566 1.1 yamt tprof_nbuf_on_list--;
567 1.1 yamt mutex_exit(&tprof_lock);
568 1.1 yamt
569 1.1 yamt /*
570 1.1 yamt * copy it out.
571 1.1 yamt */
572 1.1 yamt bytes = MIN(buf->b_used * sizeof(tprof_sample_t) -
573 1.1 yamt tprof_reader_offset, uio->uio_resid);
574 1.1 yamt resid = uio->uio_resid;
575 1.1 yamt error = uiomove((char *)buf->b_data + tprof_reader_offset,
576 1.1 yamt bytes, uio);
577 1.1 yamt done = resid - uio->uio_resid;
578 1.1 yamt tprof_reader_offset += done;
579 1.1 yamt
580 1.1 yamt /*
581 1.1 yamt * if we didn't consume the whole buffer,
582 1.1 yamt * put it back to the list.
583 1.1 yamt */
584 1.1 yamt if (tprof_reader_offset <
585 1.1 yamt buf->b_used * sizeof(tprof_sample_t)) {
586 1.1 yamt mutex_enter(&tprof_lock);
587 1.1 yamt STAILQ_INSERT_HEAD(&tprof_list, buf, b_list);
588 1.1 yamt tprof_nbuf_on_list++;
589 1.1 yamt cv_broadcast(&tprof_reader_cv);
590 1.1 yamt mutex_exit(&tprof_lock);
591 1.1 yamt } else {
592 1.1 yamt tprof_buf_free(buf);
593 1.1 yamt tprof_reader_offset = 0;
594 1.1 yamt }
595 1.1 yamt }
596 1.1 yamt mutex_exit(&tprof_reader_lock);
597 1.1 yamt
598 1.1 yamt return error;
599 1.1 yamt }
600 1.1 yamt
601 1.1 yamt static int
602 1.1 yamt tprof_ioctl(dev_t dev, u_long cmd, void *data, int flags, struct lwp *l)
603 1.1 yamt {
604 1.1 yamt const struct tprof_param *param;
605 1.1 yamt int error = 0;
606 1.1 yamt
607 1.1 yamt KASSERT(minor(dev) == 0);
608 1.1 yamt
609 1.1 yamt switch (cmd) {
610 1.1 yamt case TPROF_IOC_GETVERSION:
611 1.1 yamt *(int *)data = TPROF_VERSION;
612 1.1 yamt break;
613 1.1 yamt case TPROF_IOC_START:
614 1.1 yamt param = data;
615 1.1 yamt mutex_enter(&tprof_startstop_lock);
616 1.1 yamt error = tprof_start(param);
617 1.1 yamt mutex_exit(&tprof_startstop_lock);
618 1.1 yamt break;
619 1.1 yamt case TPROF_IOC_STOP:
620 1.1 yamt mutex_enter(&tprof_startstop_lock);
621 1.1 yamt tprof_stop();
622 1.1 yamt mutex_exit(&tprof_startstop_lock);
623 1.1 yamt break;
624 1.1 yamt case TPROF_IOC_GETSTAT:
625 1.1 yamt mutex_enter(&tprof_lock);
626 1.1 yamt memcpy(data, &tprof_stat, sizeof(tprof_stat));
627 1.1 yamt mutex_exit(&tprof_lock);
628 1.1 yamt break;
629 1.1 yamt default:
630 1.1 yamt error = EINVAL;
631 1.1 yamt break;
632 1.1 yamt }
633 1.1 yamt
634 1.1 yamt return error;
635 1.1 yamt }
636 1.1 yamt
637 1.1 yamt const struct cdevsw tprof_cdevsw = {
638 1.1 yamt .d_open = tprof_open,
639 1.1 yamt .d_close = tprof_close,
640 1.1 yamt .d_read = tprof_read,
641 1.1 yamt .d_write = nowrite,
642 1.1 yamt .d_ioctl = tprof_ioctl,
643 1.1 yamt .d_stop = nostop,
644 1.1 yamt .d_tty = notty,
645 1.1 yamt .d_poll = nopoll,
646 1.1 yamt .d_mmap = nommap,
647 1.1 yamt .d_kqfilter = nokqfilter,
648 1.1 yamt .d_flag = D_OTHER | D_MPSAFE,
649 1.1 yamt };
650 1.1 yamt
651 1.1 yamt void
652 1.1 yamt tprofattach(int nunits)
653 1.1 yamt {
654 1.1 yamt
655 1.4 yamt /* nothing */
656 1.4 yamt }
657 1.4 yamt
658 1.4 yamt MODULE(MODULE_CLASS_DRIVER, tprof, NULL);
659 1.4 yamt
660 1.4 yamt static void
661 1.4 yamt tprof_driver_init(void)
662 1.4 yamt {
663 1.4 yamt
664 1.1 yamt mutex_init(&tprof_lock, MUTEX_DEFAULT, IPL_NONE);
665 1.1 yamt mutex_init(&tprof_reader_lock, MUTEX_DEFAULT, IPL_NONE);
666 1.1 yamt mutex_init(&tprof_startstop_lock, MUTEX_DEFAULT, IPL_NONE);
667 1.1 yamt cv_init(&tprof_cv, "tprof");
668 1.7 pgoyette cv_init(&tprof_reader_cv, "tprof_rd");
669 1.1 yamt STAILQ_INIT(&tprof_list);
670 1.1 yamt }
671 1.4 yamt
672 1.4 yamt static void
673 1.4 yamt tprof_driver_fini(void)
674 1.4 yamt {
675 1.4 yamt
676 1.4 yamt mutex_destroy(&tprof_lock);
677 1.4 yamt mutex_destroy(&tprof_reader_lock);
678 1.4 yamt mutex_destroy(&tprof_startstop_lock);
679 1.4 yamt cv_destroy(&tprof_cv);
680 1.4 yamt cv_destroy(&tprof_reader_cv);
681 1.4 yamt }
682 1.4 yamt
683 1.4 yamt static int
684 1.4 yamt tprof_modcmd(modcmd_t cmd, void *arg)
685 1.4 yamt {
686 1.4 yamt
687 1.4 yamt switch (cmd) {
688 1.4 yamt case MODULE_CMD_INIT:
689 1.4 yamt tprof_driver_init();
690 1.4 yamt #if defined(_MODULE)
691 1.4 yamt {
692 1.4 yamt devmajor_t bmajor = NODEVMAJOR;
693 1.4 yamt devmajor_t cmajor = NODEVMAJOR;
694 1.4 yamt int error;
695 1.4 yamt
696 1.4 yamt error = devsw_attach("tprof", NULL, &bmajor,
697 1.4 yamt &tprof_cdevsw, &cmajor);
698 1.4 yamt if (error) {
699 1.4 yamt tprof_driver_fini();
700 1.4 yamt return error;
701 1.4 yamt }
702 1.4 yamt }
703 1.4 yamt #endif /* defined(_MODULE) */
704 1.4 yamt return 0;
705 1.4 yamt
706 1.4 yamt case MODULE_CMD_FINI:
707 1.4 yamt #if defined(_MODULE)
708 1.4 yamt {
709 1.4 yamt int error;
710 1.4 yamt error = devsw_detach(NULL, &tprof_cdevsw);
711 1.4 yamt if (error) {
712 1.4 yamt return error;
713 1.4 yamt }
714 1.4 yamt }
715 1.4 yamt #endif /* defined(_MODULE) */
716 1.4 yamt tprof_driver_fini();
717 1.4 yamt return 0;
718 1.4 yamt
719 1.4 yamt default:
720 1.4 yamt return ENOTTY;
721 1.4 yamt }
722 1.4 yamt }
723