subr_evcnt.c revision 1.6.4.1 1 1.6.4.1 rmind /* $NetBSD: subr_evcnt.c,v 1.6.4.1 2011/03/05 20:55:18 rmind Exp $ */
2 1.1 rtr
3 1.1 rtr /*
4 1.1 rtr * Copyright (c) 1996, 2000 Christopher G. Demetriou
5 1.1 rtr * All rights reserved.
6 1.3 perry *
7 1.1 rtr * Redistribution and use in source and binary forms, with or without
8 1.1 rtr * modification, are permitted provided that the following conditions
9 1.1 rtr * are met:
10 1.1 rtr * 1. Redistributions of source code must retain the above copyright
11 1.1 rtr * notice, this list of conditions and the following disclaimer.
12 1.1 rtr * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 rtr * notice, this list of conditions and the following disclaimer in the
14 1.1 rtr * documentation and/or other materials provided with the distribution.
15 1.1 rtr * 3. All advertising materials mentioning features or use of this software
16 1.1 rtr * must display the following acknowledgement:
17 1.1 rtr * This product includes software developed for the
18 1.1 rtr * NetBSD Project. See http://www.NetBSD.org/ for
19 1.1 rtr * information about NetBSD.
20 1.1 rtr * 4. The name of the author may not be used to endorse or promote products
21 1.1 rtr * derived from this software without specific prior written permission.
22 1.3 perry *
23 1.1 rtr * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 1.1 rtr * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 1.1 rtr * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 1.1 rtr * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 1.1 rtr * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 1.1 rtr * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 1.1 rtr * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 1.1 rtr * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 1.1 rtr * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 1.1 rtr * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 1.3 perry *
34 1.1 rtr * --(license Id: LICENSE.proto,v 1.1 2000/06/13 21:40:26 cgd Exp )--
35 1.1 rtr */
36 1.1 rtr
37 1.1 rtr /*
38 1.1 rtr * Copyright (c) 1992, 1993
39 1.1 rtr * The Regents of the University of California. All rights reserved.
40 1.1 rtr *
41 1.1 rtr * This software was developed by the Computer Systems Engineering group
42 1.1 rtr * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
43 1.1 rtr * contributed to Berkeley.
44 1.1 rtr *
45 1.1 rtr * All advertising materials mentioning features or use of this software
46 1.1 rtr * must display the following acknowledgement:
47 1.1 rtr * This product includes software developed by the University of
48 1.1 rtr * California, Lawrence Berkeley Laboratories.
49 1.1 rtr *
50 1.1 rtr * Redistribution and use in source and binary forms, with or without
51 1.1 rtr * modification, are permitted provided that the following conditions
52 1.1 rtr * are met:
53 1.1 rtr * 1. Redistributions of source code must retain the above copyright
54 1.1 rtr * notice, this list of conditions and the following disclaimer.
55 1.1 rtr * 2. Redistributions in binary form must reproduce the above copyright
56 1.1 rtr * notice, this list of conditions and the following disclaimer in the
57 1.1 rtr * documentation and/or other materials provided with the distribution.
58 1.1 rtr * 3. Neither the name of the University nor the names of its contributors
59 1.1 rtr * may be used to endorse or promote products derived from this software
60 1.1 rtr * without specific prior written permission.
61 1.1 rtr *
62 1.1 rtr * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 1.1 rtr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 1.1 rtr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 1.1 rtr * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 1.1 rtr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 1.1 rtr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 1.1 rtr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 1.1 rtr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 1.1 rtr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 1.1 rtr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 1.1 rtr * SUCH DAMAGE.
73 1.1 rtr *
74 1.1 rtr * from: Header: subr_autoconf.c,v 1.12 93/02/01 19:31:48 torek Exp (LBL)
75 1.1 rtr *
76 1.1 rtr * @(#)subr_autoconf.c 8.3 (Berkeley) 5/17/94
77 1.1 rtr */
78 1.1 rtr
79 1.1 rtr #include <sys/cdefs.h>
80 1.6.4.1 rmind __KERNEL_RCSID(0, "$NetBSD: subr_evcnt.c,v 1.6.4.1 2011/03/05 20:55:18 rmind Exp $");
81 1.1 rtr
82 1.1 rtr #include <sys/param.h>
83 1.6.4.1 rmind #include <sys/evcnt.h>
84 1.6.4.1 rmind #include <sys/kmem.h>
85 1.6 pooka #include <sys/mutex.h>
86 1.6.4.1 rmind #include <sys/sysctl.h>
87 1.2 tron #include <sys/systm.h>
88 1.1 rtr
89 1.1 rtr /* list of all events */
90 1.1 rtr struct evcntlist allevents = TAILQ_HEAD_INITIALIZER(allevents);
91 1.6.4.1 rmind static kmutex_t evcnt_lock __cacheline_aligned;
92 1.6.4.1 rmind static bool init_done;
93 1.6.4.1 rmind static uint32_t evcnt_generation;
94 1.1 rtr
95 1.1 rtr /*
96 1.1 rtr * We need a dummy object to stuff into the evcnt link set to
97 1.1 rtr * ensure that there always is at least one object in the set.
98 1.1 rtr */
99 1.1 rtr static struct evcnt dummy_static_evcnt;
100 1.1 rtr __link_set_add_bss(evcnts, dummy_static_evcnt);
101 1.1 rtr
102 1.1 rtr /*
103 1.1 rtr * Initialize event counters. This does the attach procedure for
104 1.1 rtr * each of the static event counters in the "evcnts" link set.
105 1.1 rtr */
106 1.1 rtr void
107 1.1 rtr evcnt_init(void)
108 1.1 rtr {
109 1.1 rtr __link_set_decl(evcnts, struct evcnt);
110 1.1 rtr struct evcnt * const *evp;
111 1.1 rtr
112 1.6.4.1 rmind KASSERT(!init_done);
113 1.6.4.1 rmind
114 1.6.4.1 rmind mutex_init(&evcnt_lock, MUTEX_DEFAULT, IPL_NONE);
115 1.6.4.1 rmind
116 1.6.4.1 rmind init_done = true;
117 1.6 pooka
118 1.1 rtr __link_set_foreach(evp, evcnts) {
119 1.1 rtr if (*evp == &dummy_static_evcnt)
120 1.1 rtr continue;
121 1.1 rtr evcnt_attach_static(*evp);
122 1.1 rtr }
123 1.1 rtr }
124 1.1 rtr
125 1.1 rtr /*
126 1.1 rtr * Attach a statically-initialized event. The type and string pointers
127 1.1 rtr * are already set up.
128 1.1 rtr */
129 1.1 rtr void
130 1.1 rtr evcnt_attach_static(struct evcnt *ev)
131 1.1 rtr {
132 1.1 rtr int len;
133 1.1 rtr
134 1.6.4.1 rmind KASSERTMSG(init_done,
135 1.6.4.1 rmind ("%s: evcnt non initialized: group=<%s> name=<%s>",
136 1.6.4.1 rmind __func__, ev->ev_group, ev->ev_name));
137 1.6.4.1 rmind
138 1.1 rtr len = strlen(ev->ev_group);
139 1.1 rtr #ifdef DIAGNOSTIC
140 1.1 rtr if (len >= EVCNT_STRING_MAX) /* ..._MAX includes NUL */
141 1.1 rtr panic("evcnt_attach_static: group length (%s)", ev->ev_group);
142 1.1 rtr #endif
143 1.1 rtr ev->ev_grouplen = len;
144 1.1 rtr
145 1.1 rtr len = strlen(ev->ev_name);
146 1.1 rtr #ifdef DIAGNOSTIC
147 1.1 rtr if (len >= EVCNT_STRING_MAX) /* ..._MAX includes NUL */
148 1.1 rtr panic("evcnt_attach_static: name length (%s)", ev->ev_name);
149 1.1 rtr #endif
150 1.1 rtr ev->ev_namelen = len;
151 1.1 rtr
152 1.6.4.1 rmind mutex_enter(&evcnt_lock);
153 1.1 rtr TAILQ_INSERT_TAIL(&allevents, ev, ev_list);
154 1.6.4.1 rmind mutex_exit(&evcnt_lock);
155 1.1 rtr }
156 1.1 rtr
157 1.1 rtr /*
158 1.1 rtr * Attach a dynamically-initialized event. Zero it, set up the type
159 1.1 rtr * and string pointers and then act like it was statically initialized.
160 1.1 rtr */
161 1.1 rtr void
162 1.6.4.1 rmind evcnt_attach_dynamic_nozero(struct evcnt *ev, int type,
163 1.6.4.1 rmind const struct evcnt *parent, const char *group, const char *name)
164 1.1 rtr {
165 1.1 rtr
166 1.1 rtr ev->ev_type = type;
167 1.1 rtr ev->ev_parent = parent;
168 1.1 rtr ev->ev_group = group;
169 1.1 rtr ev->ev_name = name;
170 1.1 rtr evcnt_attach_static(ev);
171 1.1 rtr }
172 1.6.4.1 rmind /*
173 1.6.4.1 rmind * Attach a dynamically-initialized event. Zero it, set up the type
174 1.6.4.1 rmind * and string pointers and then act like it was statically initialized.
175 1.6.4.1 rmind */
176 1.6.4.1 rmind void
177 1.6.4.1 rmind evcnt_attach_dynamic(struct evcnt *ev, int type, const struct evcnt *parent,
178 1.6.4.1 rmind const char *group, const char *name)
179 1.6.4.1 rmind {
180 1.6.4.1 rmind
181 1.6.4.1 rmind memset(ev, 0, sizeof *ev);
182 1.6.4.1 rmind evcnt_attach_dynamic_nozero(ev, type, parent, group, name);
183 1.6.4.1 rmind }
184 1.1 rtr
185 1.1 rtr /*
186 1.1 rtr * Detach an event.
187 1.1 rtr */
188 1.1 rtr void
189 1.1 rtr evcnt_detach(struct evcnt *ev)
190 1.1 rtr {
191 1.1 rtr
192 1.6.4.1 rmind mutex_enter(&evcnt_lock);
193 1.1 rtr TAILQ_REMOVE(&allevents, ev, ev_list);
194 1.6.4.1 rmind evcnt_generation++;
195 1.6.4.1 rmind mutex_exit(&evcnt_lock);
196 1.6.4.1 rmind }
197 1.6.4.1 rmind
198 1.6.4.1 rmind struct xevcnt_sysctl {
199 1.6.4.1 rmind struct evcnt_sysctl evs;
200 1.6.4.1 rmind char ev_strings[2*EVCNT_STRING_MAX];
201 1.6.4.1 rmind };
202 1.6.4.1 rmind
203 1.6.4.1 rmind static size_t
204 1.6.4.1 rmind sysctl_fillevcnt(const struct evcnt *ev, struct xevcnt_sysctl *xevs,
205 1.6.4.1 rmind size_t *copylenp)
206 1.6.4.1 rmind {
207 1.6.4.1 rmind const size_t copylen = offsetof(struct evcnt_sysctl, ev_strings)
208 1.6.4.1 rmind + ev->ev_grouplen + 1 + ev->ev_namelen + 1;
209 1.6.4.1 rmind const size_t len = roundup2(copylen, sizeof(uint64_t));
210 1.6.4.1 rmind if (xevs != NULL) {
211 1.6.4.1 rmind xevs->evs.ev_count = ev->ev_count;
212 1.6.4.1 rmind xevs->evs.ev_addr = PTRTOUINT64(ev);
213 1.6.4.1 rmind xevs->evs.ev_parent = PTRTOUINT64(ev->ev_parent);
214 1.6.4.1 rmind xevs->evs.ev_type = ev->ev_type;
215 1.6.4.1 rmind xevs->evs.ev_grouplen = ev->ev_grouplen;
216 1.6.4.1 rmind xevs->evs.ev_namelen = ev->ev_namelen;
217 1.6.4.1 rmind xevs->evs.ev_len = len / sizeof(uint64_t);
218 1.6.4.1 rmind strcpy(xevs->evs.ev_strings, ev->ev_group);
219 1.6.4.1 rmind strcpy(xevs->evs.ev_strings + ev->ev_grouplen + 1, ev->ev_name);
220 1.6.4.1 rmind }
221 1.6.4.1 rmind
222 1.6.4.1 rmind *copylenp = copylen;
223 1.6.4.1 rmind return len;
224 1.6.4.1 rmind }
225 1.6.4.1 rmind
226 1.6.4.1 rmind static int
227 1.6.4.1 rmind sysctl_doevcnt(SYSCTLFN_ARGS)
228 1.6.4.1 rmind {
229 1.6.4.1 rmind struct xevcnt_sysctl *xevs0 = NULL, *xevs;
230 1.6.4.1 rmind const struct evcnt *ev;
231 1.6.4.1 rmind int error;
232 1.6.4.1 rmind int retries;
233 1.6.4.1 rmind size_t needed, len;
234 1.6.4.1 rmind char *dp;
235 1.6.4.1 rmind
236 1.6.4.1 rmind if (namelen == 1 && name[0] == CTL_QUERY)
237 1.6.4.1 rmind return (sysctl_query(SYSCTLFN_CALL(rnode)));
238 1.6.4.1 rmind
239 1.6.4.1 rmind if (namelen != 2)
240 1.6.4.1 rmind return (EINVAL);
241 1.6.4.1 rmind
242 1.6.4.1 rmind /*
243 1.6.4.1 rmind * We can filter on the type of evcnt.
244 1.6.4.1 rmind */
245 1.6.4.1 rmind const int filter = name[0];
246 1.6.4.1 rmind if (filter != EVCNT_TYPE_ANY
247 1.6.4.1 rmind && filter != EVCNT_TYPE_MISC
248 1.6.4.1 rmind && filter != EVCNT_TYPE_INTR
249 1.6.4.1 rmind && filter != EVCNT_TYPE_TRAP)
250 1.6.4.1 rmind return (EINVAL);
251 1.6.4.1 rmind
252 1.6.4.1 rmind const u_int count = name[1];
253 1.6.4.1 rmind if (count != KERN_EVCNT_COUNT_ANY
254 1.6.4.1 rmind && count != KERN_EVCNT_COUNT_NONZERO)
255 1.6.4.1 rmind return (EINVAL);
256 1.6.4.1 rmind
257 1.6.4.1 rmind sysctl_unlock();
258 1.6.4.1 rmind
259 1.6.4.1 rmind if (oldp != NULL && xevs0 == NULL)
260 1.6.4.1 rmind xevs0 = kmem_alloc(sizeof(*xevs0), KM_SLEEP);
261 1.6.4.1 rmind
262 1.6.4.1 rmind retries = 100;
263 1.6.4.1 rmind retry:
264 1.6.4.1 rmind dp = oldp;
265 1.6.4.1 rmind len = (oldp != NULL) ? *oldlenp : 0;
266 1.6.4.1 rmind xevs = xevs0;
267 1.6.4.1 rmind error = 0;
268 1.6.4.1 rmind needed = 0;
269 1.6.4.1 rmind
270 1.6.4.1 rmind mutex_enter(&evcnt_lock);
271 1.6.4.1 rmind TAILQ_FOREACH(ev, &allevents, ev_list) {
272 1.6.4.1 rmind if (filter != EVCNT_TYPE_ANY && filter != ev->ev_type)
273 1.6.4.1 rmind continue;
274 1.6.4.1 rmind if (count == KERN_EVCNT_COUNT_NONZERO && ev->ev_count == 0)
275 1.6.4.1 rmind continue;
276 1.6.4.1 rmind
277 1.6.4.1 rmind /*
278 1.6.4.1 rmind * Prepare to copy. If xevs is NULL, fillevcnt will just
279 1.6.4.1 rmind * how big the item is.
280 1.6.4.1 rmind */
281 1.6.4.1 rmind size_t copylen;
282 1.6.4.1 rmind const size_t elem_size = sysctl_fillevcnt(ev, xevs, ©len);
283 1.6.4.1 rmind needed += elem_size;
284 1.6.4.1 rmind
285 1.6.4.1 rmind if (len < elem_size) {
286 1.6.4.1 rmind xevs = NULL;
287 1.6.4.1 rmind continue;
288 1.6.4.1 rmind }
289 1.6.4.1 rmind
290 1.6.4.1 rmind KASSERT(xevs != NULL);
291 1.6.4.1 rmind KASSERT(xevs->evs.ev_grouplen != 0);
292 1.6.4.1 rmind KASSERT(xevs->evs.ev_namelen != 0);
293 1.6.4.1 rmind KASSERT(xevs->evs.ev_strings[0] != 0);
294 1.6.4.1 rmind
295 1.6.4.1 rmind const uint32_t last_generation = evcnt_generation;
296 1.6.4.1 rmind mutex_exit(&evcnt_lock);
297 1.6.4.1 rmind
298 1.6.4.1 rmind /*
299 1.6.4.1 rmind * Only copy the actual number of bytes, not the rounded
300 1.6.4.1 rmind * number. If we did the latter we'd have to zero them
301 1.6.4.1 rmind * first or we'd leak random kernel memory.
302 1.6.4.1 rmind */
303 1.6.4.1 rmind error = copyout(xevs, dp, copylen);
304 1.6.4.1 rmind
305 1.6.4.1 rmind mutex_enter(&evcnt_lock);
306 1.6.4.1 rmind if (error)
307 1.6.4.1 rmind break;
308 1.6.4.1 rmind
309 1.6.4.1 rmind if (__predict_false(last_generation != evcnt_generation)) {
310 1.6.4.1 rmind /*
311 1.6.4.1 rmind * This sysctl node is only for statistics.
312 1.6.4.1 rmind * Retry; if the queue keeps changing, then
313 1.6.4.1 rmind * bail out.
314 1.6.4.1 rmind */
315 1.6.4.1 rmind if (--retries == 0) {
316 1.6.4.1 rmind error = EAGAIN;
317 1.6.4.1 rmind break;
318 1.6.4.1 rmind }
319 1.6.4.1 rmind mutex_exit(&evcnt_lock);
320 1.6.4.1 rmind goto retry;
321 1.6.4.1 rmind }
322 1.6.4.1 rmind
323 1.6.4.1 rmind /*
324 1.6.4.1 rmind * Now we deal with the pointer/len since we aren't going to
325 1.6.4.1 rmind * toss their values away.
326 1.6.4.1 rmind */
327 1.6.4.1 rmind dp += elem_size;
328 1.6.4.1 rmind len -= elem_size;
329 1.6.4.1 rmind }
330 1.6.4.1 rmind mutex_exit(&evcnt_lock);
331 1.6.4.1 rmind
332 1.6.4.1 rmind if (xevs0 != NULL)
333 1.6.4.1 rmind kmem_free(xevs0, sizeof(*xevs0));
334 1.6.4.1 rmind
335 1.6.4.1 rmind sysctl_relock();
336 1.6.4.1 rmind
337 1.6.4.1 rmind *oldlenp = needed;
338 1.6.4.1 rmind if (oldp == NULL)
339 1.6.4.1 rmind *oldlenp += 1024;
340 1.6.4.1 rmind
341 1.6.4.1 rmind return (error);
342 1.6.4.1 rmind }
343 1.6.4.1 rmind
344 1.6.4.1 rmind
345 1.6.4.1 rmind
346 1.6.4.1 rmind SYSCTL_SETUP(sysctl_evcnt_setup, "sysctl kern.evcnt subtree setup")
347 1.6.4.1 rmind {
348 1.6.4.1 rmind sysctl_createv(clog, 0, NULL, NULL,
349 1.6.4.1 rmind CTLFLAG_PERMANENT,
350 1.6.4.1 rmind CTLTYPE_NODE, "kern", NULL,
351 1.6.4.1 rmind NULL, 0, NULL, 0,
352 1.6.4.1 rmind CTL_KERN, CTL_EOL);
353 1.6.4.1 rmind sysctl_createv(clog, 0, NULL, NULL,
354 1.6.4.1 rmind CTLFLAG_PERMANENT,
355 1.6.4.1 rmind CTLTYPE_STRUCT, "evcnt",
356 1.6.4.1 rmind SYSCTL_DESCR("Kernel evcnt information"),
357 1.6.4.1 rmind sysctl_doevcnt, 0, NULL, 0,
358 1.6.4.1 rmind CTL_KERN, KERN_EVCNT, CTL_EOL);
359 1.1 rtr }
360