subr_kcov.c revision 1.2 1 /* $NetBSD: subr_kcov.c,v 1.2 2019/02/23 12:03:07 kamil Exp $ */
2
3 /*
4 * Copyright (c) 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Siddharth Muralee.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33
34 #include <sys/module.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38
39 #include <sys/atomic.h>
40 #include <sys/conf.h>
41 #include <sys/condvar.h>
42 #include <sys/kmem.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45
46 #include <uvm/uvm_extern.h>
47 #include <sys/kcov.h>
48
49 #define KCOV_BUF_MAX_ENTRIES (256 << 10)
50
51 /*
52 * The KCOV descriptors are allocated during open(), and are associated with
53 * the calling proc. They are freed lazily when their refcount reaches zero,
54 * only when the process exits; this guarantees that kd->buf is not mmapped
55 * in a currently running LWP. A KCOV descriptor is active on only one LWP
56 * at the same time within the proc.
57 *
58 * In the refcount, one ref is for the proc, and one ref is for the LWP where
59 * the descriptor is active. In each case, the descriptor is pointed to in
60 * the proc's and LWP's specificdata.
61 */
62
63 typedef struct kcov_desc {
64 kmutex_t lock;
65 int refcnt;
66 kcov_int_t *buf;
67 size_t bufnent;
68 size_t bufsize;
69 TAILQ_ENTRY(kcov_desc) entry;
70 } kcov_t;
71
72 static specificdata_key_t kcov_proc_key;
73 static specificdata_key_t kcov_lwp_key;
74
75 static void
76 kcov_lock(kcov_t *kd)
77 {
78
79 mutex_enter(&kd->lock);
80 KASSERT(kd->refcnt > 0);
81 }
82
83 static void
84 kcov_unlock(kcov_t *kd)
85 {
86
87 mutex_exit(&kd->lock);
88 }
89
90 static void
91 kcov_lwp_take(kcov_t *kd)
92 {
93
94 kd->refcnt++;
95 KASSERT(kd->refcnt == 2);
96 lwp_setspecific(kcov_lwp_key, kd);
97 }
98
99 static void
100 kcov_lwp_release(kcov_t *kd)
101 {
102
103 KASSERT(kd->refcnt == 2);
104 kd->refcnt--;
105 lwp_setspecific(kcov_lwp_key, NULL);
106 }
107
108 static inline bool
109 kcov_is_owned(kcov_t *kd)
110 {
111
112 return (kd->refcnt > 1);
113 }
114
115 static void
116 kcov_free(void *arg)
117 {
118 kcov_t *kd = (kcov_t *)arg;
119 bool dofree;
120
121 if (kd == NULL) {
122 return;
123 }
124
125 kcov_lock(kd);
126 kd->refcnt--;
127 kcov_unlock(kd);
128 dofree = (kd->refcnt == 0);
129
130 if (!dofree) {
131 return;
132 }
133 if (kd->buf != NULL) {
134 uvm_km_free(kernel_map, (vaddr_t)kd->buf, kd->bufsize,
135 UVM_KMF_WIRED);
136 }
137 mutex_destroy(&kd->lock);
138 kmem_free(kd, sizeof(*kd));
139 }
140
141 static int
142 kcov_allocbuf(kcov_t *kd, uint64_t nent)
143 {
144 size_t size;
145
146 if (nent < 2 || nent > KCOV_BUF_MAX_ENTRIES)
147 return EINVAL;
148 if (kd->buf != NULL)
149 return EEXIST;
150
151 size = roundup(nent * KCOV_ENTRY_SIZE, PAGE_SIZE);
152 kd->buf = (kcov_int_t *)uvm_km_alloc(kernel_map, size, 0,
153 UVM_KMF_WIRED|UVM_KMF_ZERO);
154 if (kd->buf == NULL)
155 return ENOMEM;
156
157 kd->bufnent = nent - 1;
158 kd->bufsize = size;
159
160 return 0;
161 }
162
163 /* -------------------------------------------------------------------------- */
164
165 static int
166 kcov_open(dev_t dev, int flag, int mode, struct lwp *l)
167 {
168 struct proc *p = l->l_proc;
169 kcov_t *kd;
170
171 kd = proc_getspecific(p, kcov_proc_key);
172 if (kd != NULL)
173 return EBUSY;
174
175 kd = kmem_zalloc(sizeof(*kd), KM_SLEEP);
176 mutex_init(&kd->lock, MUTEX_DEFAULT, IPL_NONE);
177 kd->refcnt = 1;
178 proc_setspecific(p, kcov_proc_key, kd);
179
180 return 0;
181 }
182
183 static int
184 kcov_close(dev_t dev, int flag, int mode, struct lwp *l)
185 {
186
187 return 0;
188 }
189
190 static int
191 kcov_ioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
192 {
193 struct proc *p = l->l_proc;
194 int error = 0;
195 kcov_t *kd;
196
197 kd = proc_getspecific(p, kcov_proc_key);
198 if (kd == NULL)
199 return ENXIO;
200 kcov_lock(kd);
201
202 switch (cmd) {
203 case KCOV_IOC_SETBUFSIZE:
204 if (kcov_is_owned(kd)) {
205 error = EBUSY;
206 break;
207 }
208 error = kcov_allocbuf(kd, *((uint64_t *)addr));
209 break;
210 case KCOV_IOC_ENABLE:
211 if (kcov_is_owned(kd)) {
212 error = EBUSY;
213 break;
214 }
215 if (kd->buf == NULL) {
216 error = ENOBUFS;
217 break;
218 }
219 KASSERT(l == curlwp);
220 kcov_lwp_take(kd);
221 break;
222 case KCOV_IOC_DISABLE:
223 if (lwp_getspecific(kcov_lwp_key) == NULL) {
224 error = ENOENT;
225 break;
226 }
227 KASSERT(l == curlwp);
228 kcov_lwp_release(kd);
229 break;
230 default:
231 error = EINVAL;
232 }
233
234 kcov_unlock(kd);
235 return error;
236 }
237
238 static paddr_t
239 kcov_mmap(dev_t dev, off_t offset, int prot)
240 {
241 kcov_t *kd;
242 paddr_t pa;
243 vaddr_t va;
244
245 kd = proc_getspecific(curproc, kcov_proc_key);
246 KASSERT(kd != NULL);
247
248 if ((offset < 0) || (offset >= kd->bufnent * KCOV_ENTRY_SIZE)) {
249 return (paddr_t)-1;
250 }
251 if (offset & PAGE_MASK) {
252 return (paddr_t)-1;
253 }
254 va = (vaddr_t)kd->buf + offset;
255 if (!pmap_extract(pmap_kernel(), va, &pa)) {
256 return (paddr_t)-1;
257 }
258
259 return atop(pa);
260 }
261
262 static inline bool
263 in_interrupt(void)
264 {
265 return curcpu()->ci_idepth >= 0;
266 }
267
268 void __sanitizer_cov_trace_pc(void);
269
270 void
271 __sanitizer_cov_trace_pc(void)
272 {
273 extern int cold;
274 uint64_t idx;
275 kcov_t *kd;
276
277 if (__predict_false(cold)) {
278 /* Do not trace during boot. */
279 return;
280 }
281
282 if (in_interrupt()) {
283 /* Do not trace in interrupts. */
284 return;
285 }
286
287 kd = lwp_getspecific(kcov_lwp_key);
288 if (__predict_true(kd == NULL)) {
289 /* Not traced. */
290 return;
291 }
292
293 idx = kd->buf[0];
294 if (idx < kd->bufnent) {
295 kd->buf[idx+1] = (intptr_t)__builtin_return_address(0);
296 kd->buf[0]++;
297 }
298 }
299
300 /* -------------------------------------------------------------------------- */
301
302 const struct cdevsw kcov_cdevsw = {
303 .d_open = kcov_open,
304 .d_close = kcov_close,
305 .d_read = noread,
306 .d_write = nowrite,
307 .d_ioctl = kcov_ioctl,
308 .d_stop = nostop,
309 .d_tty = notty,
310 .d_poll = nopoll,
311 .d_mmap = kcov_mmap,
312 .d_kqfilter = nokqfilter,
313 .d_discard = nodiscard,
314 .d_flag = D_OTHER | D_MPSAFE
315 };
316
317 MODULE(MODULE_CLASS_ANY, kcov, NULL);
318
319 static void
320 kcov_init(void)
321 {
322
323 proc_specific_key_create(&kcov_proc_key, kcov_free);
324 lwp_specific_key_create(&kcov_lwp_key, kcov_free);
325 }
326
327 static int
328 kcov_modcmd(modcmd_t cmd, void *arg)
329 {
330
331 switch (cmd) {
332 case MODULE_CMD_INIT:
333 kcov_init();
334 return 0;
335 case MODULE_CMD_FINI:
336 return EINVAL;
337 default:
338 return ENOTTY;
339 }
340 }
341