subr_kcov.c revision 1.7 1 /* $NetBSD: subr_kcov.c,v 1.7 2019/04/07 21:01:43 kamil Exp $ */
2
3 /*
4 * Copyright (c) 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Siddharth Muralee.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33
34 #include <sys/module.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38
39 #include <sys/conf.h>
40 #include <sys/condvar.h>
41 #include <sys/file.h>
42 #include <sys/filedesc.h>
43 #include <sys/kmem.h>
44 #include <sys/mman.h>
45 #include <sys/mutex.h>
46 #include <sys/queue.h>
47
48 #include <uvm/uvm_extern.h>
49 #include <sys/kcov.h>
50
51 #define KCOV_BUF_MAX_ENTRIES (256 << 10)
52
53 #define KCOV_CMP_CONST 1
54 #define KCOV_CMP_SIZE(x) ((x) << 1)
55
56 static dev_type_open(kcov_open);
57
58 const struct cdevsw kcov_cdevsw = {
59 .d_open = kcov_open,
60 .d_close = noclose,
61 .d_read = noread,
62 .d_write = nowrite,
63 .d_ioctl = noioctl,
64 .d_stop = nostop,
65 .d_tty = notty,
66 .d_poll = nopoll,
67 .d_mmap = nommap,
68 .d_kqfilter = nokqfilter,
69 .d_discard = nodiscard,
70 .d_flag = D_OTHER | D_MPSAFE
71 };
72
73 static int kcov_fops_ioctl(file_t *, u_long, void *);
74 static int kcov_fops_close(file_t *);
75 static int kcov_fops_mmap(file_t *, off_t *, size_t, int, int *, int *,
76 struct uvm_object **, int *);
77
78 const struct fileops kcov_fileops = {
79 .fo_read = fbadop_read,
80 .fo_write = fbadop_write,
81 .fo_ioctl = kcov_fops_ioctl,
82 .fo_fcntl = fnullop_fcntl,
83 .fo_poll = fnullop_poll,
84 .fo_stat = fbadop_stat,
85 .fo_close = kcov_fops_close,
86 .fo_kqfilter = fnullop_kqfilter,
87 .fo_restart = fnullop_restart,
88 .fo_mmap = kcov_fops_mmap,
89 };
90
91 /*
92 * The KCOV descriptors (KD) are allocated during open(), and are associated
93 * with a file descriptor.
94 *
95 * An LWP can 'enable' a KD. When this happens, this LWP becomes the owner of
96 * the KD, and no LWP can 'disable' this KD except the owner.
97 *
98 * A KD is freed when its file descriptor is closed _iff_ the KD is not active
99 * on an LWP. If it is, we ask the LWP to free it when it exits.
100 *
101 * The buffers mmapped are in a dedicated uobj, therefore there is no risk
102 * that the kernel frees a buffer still mmapped in a process: the uobj
103 * refcount will be non-zero, so the backing is not freed until an munmap
104 * occurs on said process.
105 */
106
107 typedef struct kcov_desc {
108 kmutex_t lock;
109 kcov_int_t *buf;
110 struct uvm_object *uobj;
111 size_t bufnent;
112 size_t bufsize;
113 int mode;
114 bool enabled;
115 bool lwpfree;
116 } kcov_t;
117
118 static specificdata_key_t kcov_lwp_key;
119
120 static void
121 kcov_lock(kcov_t *kd)
122 {
123
124 mutex_enter(&kd->lock);
125 }
126
127 static void
128 kcov_unlock(kcov_t *kd)
129 {
130
131 mutex_exit(&kd->lock);
132 }
133
134 static void
135 kcov_free(kcov_t *kd)
136 {
137
138 KASSERT(kd != NULL);
139 if (kd->buf != NULL) {
140 uvm_deallocate(kernel_map, (vaddr_t)kd->buf, kd->bufsize);
141 }
142 mutex_destroy(&kd->lock);
143 kmem_free(kd, sizeof(*kd));
144 }
145
146 static void
147 kcov_lwp_free(void *arg)
148 {
149 kcov_t *kd = (kcov_t *)arg;
150
151 if (kd == NULL) {
152 return;
153 }
154 kcov_lock(kd);
155 kd->enabled = false;
156 kcov_unlock(kd);
157 if (kd->lwpfree) {
158 kcov_free(kd);
159 }
160 }
161
162 static int
163 kcov_allocbuf(kcov_t *kd, uint64_t nent)
164 {
165 size_t size;
166 int error;
167
168 if (nent < 2 || nent > KCOV_BUF_MAX_ENTRIES)
169 return EINVAL;
170 if (kd->buf != NULL)
171 return EEXIST;
172
173 size = roundup(nent * KCOV_ENTRY_SIZE, PAGE_SIZE);
174 kd->bufnent = nent - 1;
175 kd->bufsize = size;
176 kd->uobj = uao_create(kd->bufsize, 0);
177
178 /* Map the uobj into the kernel address space, as wired. */
179 kd->buf = NULL;
180 error = uvm_map(kernel_map, (vaddr_t *)&kd->buf, kd->bufsize, kd->uobj,
181 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE,
182 UVM_ADV_RANDOM, 0));
183 if (error) {
184 uao_detach(kd->uobj);
185 return error;
186 }
187 error = uvm_map_pageable(kernel_map, (vaddr_t)kd->buf,
188 (vaddr_t)kd->buf + size, false, 0);
189 if (error) {
190 uvm_deallocate(kernel_map, (vaddr_t)kd->buf, size);
191 return error;
192 }
193
194 return 0;
195 }
196
197 /* -------------------------------------------------------------------------- */
198
199 static int
200 kcov_open(dev_t dev, int flag, int mode, struct lwp *l)
201 {
202 struct file *fp;
203 int error, fd;
204 kcov_t *kd;
205
206 error = fd_allocfile(&fp, &fd);
207 if (error)
208 return error;
209
210 kd = kmem_zalloc(sizeof(*kd), KM_SLEEP);
211 mutex_init(&kd->lock, MUTEX_DEFAULT, IPL_NONE);
212
213 return fd_clone(fp, fd, flag, &kcov_fileops, kd);
214 }
215
216 static int
217 kcov_fops_close(file_t *fp)
218 {
219 kcov_t *kd = fp->f_data;
220
221 kcov_lock(kd);
222 if (kd->enabled) {
223 kd->lwpfree = true;
224 kcov_unlock(kd);
225 } else {
226 kcov_unlock(kd);
227 kcov_free(kd);
228 }
229 fp->f_data = NULL;
230
231 return 0;
232 }
233
234 static int
235 kcov_fops_ioctl(file_t *fp, u_long cmd, void *addr)
236 {
237 int error = 0;
238 int mode;
239 kcov_t *kd;
240
241 kd = fp->f_data;
242 if (kd == NULL)
243 return ENXIO;
244 kcov_lock(kd);
245
246 switch (cmd) {
247 case KCOV_IOC_SETBUFSIZE:
248 if (kd->enabled) {
249 error = EBUSY;
250 break;
251 }
252 error = kcov_allocbuf(kd, *((uint64_t *)addr));
253 break;
254 case KCOV_IOC_ENABLE:
255 if (kd->enabled) {
256 error = EBUSY;
257 break;
258 }
259 if (lwp_getspecific(kcov_lwp_key) != NULL) {
260 error = EBUSY;
261 break;
262 }
263 if (kd->buf == NULL) {
264 error = ENOBUFS;
265 break;
266 }
267
268 mode = *((int *)addr);
269 switch (mode) {
270 case KCOV_MODE_NONE:
271 case KCOV_MODE_TRACE_PC:
272 case KCOV_MODE_TRACE_CMP:
273 kd->mode = mode;
274 break;
275 default:
276 error = EINVAL;
277 }
278 if (error)
279 break;
280
281 lwp_setspecific(kcov_lwp_key, kd);
282 kd->enabled = true;
283 break;
284 case KCOV_IOC_DISABLE:
285 if (!kd->enabled) {
286 error = ENOENT;
287 break;
288 }
289 if (lwp_getspecific(kcov_lwp_key) != kd) {
290 error = ENOENT;
291 break;
292 }
293 lwp_setspecific(kcov_lwp_key, NULL);
294 kd->enabled = false;
295 break;
296 default:
297 error = EINVAL;
298 }
299
300 kcov_unlock(kd);
301 return error;
302 }
303
304 static int
305 kcov_fops_mmap(file_t *fp, off_t *offp, size_t size, int prot, int *flagsp,
306 int *advicep, struct uvm_object **uobjp, int *maxprotp)
307 {
308 off_t off = *offp;
309 kcov_t *kd;
310 int error = 0;
311
312 if (prot & PROT_EXEC)
313 return EACCES;
314 if (off < 0)
315 return EINVAL;
316 if (size > KCOV_BUF_MAX_ENTRIES * KCOV_ENTRY_SIZE)
317 return EINVAL;
318 if (off > KCOV_BUF_MAX_ENTRIES * KCOV_ENTRY_SIZE)
319 return EINVAL;
320
321 kd = fp->f_data;
322 if (kd == NULL)
323 return ENXIO;
324 kcov_lock(kd);
325
326 if ((size + off) > kd->bufsize) {
327 error = ENOMEM;
328 goto out;
329 }
330
331 uao_reference(kd->uobj);
332
333 *uobjp = kd->uobj;
334 *maxprotp = prot;
335 *advicep = UVM_ADV_RANDOM;
336
337 out:
338 kcov_unlock(kd);
339 return error;
340 }
341
342 static inline bool
343 in_interrupt(void)
344 {
345 return curcpu()->ci_idepth >= 0;
346 }
347
348 void __sanitizer_cov_trace_pc(void);
349
350 void
351 __sanitizer_cov_trace_pc(void)
352 {
353 extern int cold;
354 uint64_t idx;
355 kcov_t *kd;
356
357 if (__predict_false(cold)) {
358 /* Do not trace during boot. */
359 return;
360 }
361
362 if (in_interrupt()) {
363 /* Do not trace in interrupts. */
364 return;
365 }
366
367 kd = lwp_getspecific(kcov_lwp_key);
368 if (__predict_true(kd == NULL)) {
369 /* Not traced. */
370 return;
371 }
372
373 if (!kd->enabled) {
374 /* Tracing not enabled */
375 return;
376 }
377
378 if (kd->mode != KCOV_MODE_TRACE_PC) {
379 /* PC tracing mode not enabled */
380 return;
381 }
382
383 idx = KCOV_LOAD(kd->buf[0]);
384 if (idx < kd->bufnent) {
385 KCOV_STORE(kd->buf[idx+1],
386 (intptr_t)__builtin_return_address(0));
387 KCOV_STORE(kd->buf[0], idx + 1);
388 }
389 }
390
391 static void
392 trace_cmp(uint64_t type, uint64_t arg1, uint64_t arg2, intptr_t pc)
393 {
394 extern int cold;
395 uint64_t idx;
396 kcov_t *kd;
397
398 if (__predict_false(cold)) {
399 /* Do not trace during boot. */
400 return;
401 }
402
403 if (in_interrupt()) {
404 /* Do not trace in interrupts. */
405 return;
406 }
407
408 kd = lwp_getspecific(kcov_lwp_key);
409 if (__predict_true(kd == NULL)) {
410 /* Not traced. */
411 return;
412 }
413
414 if (!kd->enabled) {
415 /* Tracing not enabled */
416 return;
417 }
418
419 if (kd->mode != KCOV_MODE_TRACE_CMP) {
420 /* CMP tracing mode not enabled */
421 return;
422 }
423
424 idx = KCOV_LOAD(kd->buf[0]);
425 if ((idx * 4 + 4) <= kd->bufnent) {
426 KCOV_STORE(kd->buf[idx * 4 + 1], type);
427 KCOV_STORE(kd->buf[idx * 4 + 2], arg1);
428 KCOV_STORE(kd->buf[idx * 4 + 3], arg2);
429 KCOV_STORE(kd->buf[idx * 4 + 4], pc);
430 KCOV_STORE(kd->buf[0], idx + 1);
431 }
432 }
433
434 void __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2);
435
436 void
437 __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
438 {
439
440 trace_cmp(KCOV_CMP_SIZE(0), arg1, arg2,
441 (intptr_t)__builtin_return_address(0));
442 }
443
444 void __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2);
445
446 void
447 __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
448 {
449
450 trace_cmp(KCOV_CMP_SIZE(1), arg1, arg2,
451 (intptr_t)__builtin_return_address(0));
452 }
453
454 void __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2);
455
456 void
457 __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
458 {
459
460 trace_cmp(KCOV_CMP_SIZE(2), arg1, arg2,
461 (intptr_t)__builtin_return_address(0));
462 }
463
464 void __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2);
465
466 void
467 __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
468 {
469
470 trace_cmp(KCOV_CMP_SIZE(3), arg1, arg2,
471 (intptr_t)__builtin_return_address(0));
472 }
473
474 void __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2);
475
476 void
477 __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
478 {
479
480 trace_cmp(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
481 (intptr_t)__builtin_return_address(0));
482 }
483
484 void __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2);
485
486 void
487 __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
488 {
489
490 trace_cmp(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
491 (intptr_t)__builtin_return_address(0));
492 }
493
494 void __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2);
495
496 void
497 __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
498 {
499
500 trace_cmp(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
501 (intptr_t)__builtin_return_address(0));
502 }
503
504 void __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2);
505
506 void
507 __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
508 {
509
510 trace_cmp(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
511 (intptr_t)__builtin_return_address(0));
512 }
513
514 void __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases);
515
516 void
517 __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
518 {
519 uint64_t i, nbits, ncases, type;
520 intptr_t pc;
521
522 pc = (intptr_t)__builtin_return_address(0);
523 ncases = cases[0];
524 nbits = cases[1];
525
526 switch (nbits) {
527 case 8:
528 type = KCOV_CMP_SIZE(0);
529 break;
530 case 16:
531 type = KCOV_CMP_SIZE(1);
532 break;
533 case 32:
534 type = KCOV_CMP_SIZE(2);
535 break;
536 case 64:
537 type = KCOV_CMP_SIZE(3);
538 break;
539 default:
540 return;
541 }
542 type |= KCOV_CMP_CONST;
543
544 for (i = 0; i < ncases; i++)
545 trace_cmp(type, cases[i + 2], val, pc);
546 }
547
548 /* -------------------------------------------------------------------------- */
549
550 MODULE(MODULE_CLASS_MISC, kcov, NULL);
551
552 static void
553 kcov_init(void)
554 {
555
556 lwp_specific_key_create(&kcov_lwp_key, kcov_lwp_free);
557 }
558
559 static int
560 kcov_modcmd(modcmd_t cmd, void *arg)
561 {
562
563 switch (cmd) {
564 case MODULE_CMD_INIT:
565 kcov_init();
566 return 0;
567 case MODULE_CMD_FINI:
568 return EINVAL;
569 default:
570 return ENOTTY;
571 }
572 }
573