Home | History | Annotate | Line # | Download | only in kern
subr_kcov.c revision 1.12
      1 /*	$NetBSD: subr_kcov.c,v 1.12 2020/04/04 06:51:46 maxv Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2019-2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Siddharth Muralee.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 
     34 #include <sys/module.h>
     35 #include <sys/param.h>
     36 #include <sys/systm.h>
     37 #include <sys/kernel.h>
     38 
     39 #include <sys/conf.h>
     40 #include <sys/condvar.h>
     41 #include <sys/file.h>
     42 #include <sys/filedesc.h>
     43 #include <sys/kmem.h>
     44 #include <sys/mman.h>
     45 #include <sys/mutex.h>
     46 #include <sys/queue.h>
     47 
     48 #include <uvm/uvm_extern.h>
     49 #include <sys/kcov.h>
     50 
     51 #define KCOV_BUF_MAX_ENTRIES	(256 << 10)
     52 
     53 #define KCOV_CMP_CONST		1
     54 #define KCOV_CMP_SIZE(x)	((x) << 1)
     55 
     56 static dev_type_open(kcov_open);
     57 
     58 const struct cdevsw kcov_cdevsw = {
     59 	.d_open = kcov_open,
     60 	.d_close = noclose,
     61 	.d_read = noread,
     62 	.d_write = nowrite,
     63 	.d_ioctl = noioctl,
     64 	.d_stop = nostop,
     65 	.d_tty = notty,
     66 	.d_poll = nopoll,
     67 	.d_mmap = nommap,
     68 	.d_kqfilter = nokqfilter,
     69 	.d_discard = nodiscard,
     70 	.d_flag = D_OTHER | D_MPSAFE
     71 };
     72 
     73 static int kcov_fops_ioctl(file_t *, u_long, void *);
     74 static int kcov_fops_close(file_t *);
     75 static int kcov_fops_mmap(file_t *, off_t *, size_t, int, int *, int *,
     76     struct uvm_object **, int *);
     77 
     78 const struct fileops kcov_fileops = {
     79 	.fo_read = fbadop_read,
     80 	.fo_write = fbadop_write,
     81 	.fo_ioctl = kcov_fops_ioctl,
     82 	.fo_fcntl = fnullop_fcntl,
     83 	.fo_poll = fnullop_poll,
     84 	.fo_stat = fbadop_stat,
     85 	.fo_close = kcov_fops_close,
     86 	.fo_kqfilter = fnullop_kqfilter,
     87 	.fo_restart = fnullop_restart,
     88 	.fo_mmap = kcov_fops_mmap,
     89 };
     90 
     91 /*
     92  * The KCOV descriptors (KD) are allocated during open(), and are associated
     93  * with a file descriptor.
     94  *
     95  * An LWP can 'enable' a KD. When this happens, this LWP becomes the owner of
     96  * the KD, and no LWP can 'disable' this KD except the owner.
     97  *
     98  * A KD is freed when its file descriptor is closed _iff_ the KD is not active
     99  * on an LWP. If it is, we ask the LWP to free it when it exits.
    100  *
    101  * The buffers mmapped are in a dedicated uobj, therefore there is no risk
    102  * that the kernel frees a buffer still mmapped in a process: the uobj
    103  * refcount will be non-zero, so the backing is not freed until an munmap
    104  * occurs on said process.
    105  */
    106 
    107 typedef struct kcov_desc {
    108 	kmutex_t lock;
    109 	kcov_int_t *buf;
    110 	struct uvm_object *uobj;
    111 	size_t bufnent;
    112 	size_t bufsize;
    113 	int mode;
    114 	bool enabled;
    115 	bool lwpfree;
    116 } kcov_t;
    117 
    118 static void
    119 kcov_lock(kcov_t *kd)
    120 {
    121 
    122 	mutex_enter(&kd->lock);
    123 }
    124 
    125 static void
    126 kcov_unlock(kcov_t *kd)
    127 {
    128 
    129 	mutex_exit(&kd->lock);
    130 }
    131 
    132 static void
    133 kcov_free(kcov_t *kd)
    134 {
    135 
    136 	KASSERT(kd != NULL);
    137 	if (kd->buf != NULL) {
    138 		uvm_deallocate(kernel_map, (vaddr_t)kd->buf, kd->bufsize);
    139 	}
    140 	mutex_destroy(&kd->lock);
    141 	kmem_free(kd, sizeof(*kd));
    142 }
    143 
    144 void
    145 kcov_lwp_free(struct lwp *l)
    146 {
    147 	kcov_t *kd = (kcov_t *)l->l_kcov;
    148 
    149 	if (kd == NULL) {
    150 		return;
    151 	}
    152 	kcov_lock(kd);
    153 	kd->enabled = false;
    154 	kcov_unlock(kd);
    155 	if (kd->lwpfree) {
    156 		kcov_free(kd);
    157 	}
    158 }
    159 
    160 static int
    161 kcov_allocbuf(kcov_t *kd, uint64_t nent)
    162 {
    163 	size_t size;
    164 	int error;
    165 
    166 	if (nent < 2 || nent > KCOV_BUF_MAX_ENTRIES)
    167 		return EINVAL;
    168 	if (kd->buf != NULL)
    169 		return EEXIST;
    170 
    171 	size = roundup(nent * KCOV_ENTRY_SIZE, PAGE_SIZE);
    172 	kd->bufnent = nent - 1;
    173 	kd->bufsize = size;
    174 	kd->uobj = uao_create(kd->bufsize, 0);
    175 
    176 	/* Map the uobj into the kernel address space, as wired. */
    177 	kd->buf = NULL;
    178 	error = uvm_map(kernel_map, (vaddr_t *)&kd->buf, kd->bufsize, kd->uobj,
    179 	    0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE,
    180 	    UVM_ADV_RANDOM, 0));
    181 	if (error) {
    182 		uao_detach(kd->uobj);
    183 		return error;
    184 	}
    185 	error = uvm_map_pageable(kernel_map, (vaddr_t)kd->buf,
    186 	    (vaddr_t)kd->buf + size, false, 0);
    187 	if (error) {
    188 		uvm_deallocate(kernel_map, (vaddr_t)kd->buf, size);
    189 		return error;
    190 	}
    191 
    192 	return 0;
    193 }
    194 
    195 /* -------------------------------------------------------------------------- */
    196 
    197 static int
    198 kcov_open(dev_t dev, int flag, int mode, struct lwp *l)
    199 {
    200 	struct file *fp;
    201 	int error, fd;
    202 	kcov_t *kd;
    203 
    204 	error = fd_allocfile(&fp, &fd);
    205 	if (error)
    206 		return error;
    207 
    208 	kd = kmem_zalloc(sizeof(*kd), KM_SLEEP);
    209 	mutex_init(&kd->lock, MUTEX_DEFAULT, IPL_NONE);
    210 
    211 	return fd_clone(fp, fd, flag, &kcov_fileops, kd);
    212 }
    213 
    214 static int
    215 kcov_fops_close(file_t *fp)
    216 {
    217 	kcov_t *kd = fp->f_data;
    218 
    219 	kcov_lock(kd);
    220 	if (kd->enabled) {
    221 		kd->lwpfree = true;
    222 		kcov_unlock(kd);
    223 	} else {
    224 		kcov_unlock(kd);
    225 		kcov_free(kd);
    226 	}
    227 	fp->f_data = NULL;
    228 
    229    	return 0;
    230 }
    231 
    232 static int
    233 kcov_fops_ioctl(file_t *fp, u_long cmd, void *addr)
    234 {
    235 	struct lwp *l = curlwp;
    236 	int error = 0;
    237 	int mode;
    238 	kcov_t *kd;
    239 
    240 	kd = fp->f_data;
    241 	if (kd == NULL)
    242 		return ENXIO;
    243 	kcov_lock(kd);
    244 
    245 	switch (cmd) {
    246 	case KCOV_IOC_SETBUFSIZE:
    247 		if (kd->enabled) {
    248 			error = EBUSY;
    249 			break;
    250 		}
    251 		error = kcov_allocbuf(kd, *((uint64_t *)addr));
    252 		break;
    253 	case KCOV_IOC_ENABLE:
    254 		if (kd->enabled) {
    255 			error = EBUSY;
    256 			break;
    257 		}
    258 		if (l->l_kcov != NULL) {
    259 			error = EBUSY;
    260 			break;
    261 		}
    262 		if (kd->buf == NULL) {
    263 			error = ENOBUFS;
    264 			break;
    265 		}
    266 
    267 		mode = *((int *)addr);
    268 		switch (mode) {
    269 		case KCOV_MODE_NONE:
    270 		case KCOV_MODE_TRACE_PC:
    271 		case KCOV_MODE_TRACE_CMP:
    272 			kd->mode = mode;
    273 			break;
    274 		default:
    275 			error = EINVAL;
    276 		}
    277 		if (error)
    278 			break;
    279 
    280 		l->l_kcov = kd;
    281 		kd->enabled = true;
    282 		break;
    283 	case KCOV_IOC_DISABLE:
    284 		if (!kd->enabled) {
    285 			error = ENOENT;
    286 			break;
    287 		}
    288 		if (l->l_kcov != kd) {
    289 			error = ENOENT;
    290 			break;
    291 		}
    292 		l->l_kcov = NULL;
    293 		kd->enabled = false;
    294 		break;
    295 	default:
    296 		error = EINVAL;
    297 	}
    298 
    299 	kcov_unlock(kd);
    300 	return error;
    301 }
    302 
    303 static int
    304 kcov_fops_mmap(file_t *fp, off_t *offp, size_t size, int prot, int *flagsp,
    305     int *advicep, struct uvm_object **uobjp, int *maxprotp)
    306 {
    307 	off_t off = *offp;
    308 	kcov_t *kd;
    309 	int error = 0;
    310 
    311 	if (prot & PROT_EXEC)
    312 		return EACCES;
    313 	if (off < 0)
    314 		return EINVAL;
    315 	if (size > KCOV_BUF_MAX_ENTRIES * KCOV_ENTRY_SIZE)
    316 		return EINVAL;
    317 	if (off > KCOV_BUF_MAX_ENTRIES * KCOV_ENTRY_SIZE)
    318 		return EINVAL;
    319 
    320 	kd = fp->f_data;
    321 	if (kd == NULL)
    322 		return ENXIO;
    323 	kcov_lock(kd);
    324 
    325 	if ((size + off) > kd->bufsize) {
    326 		error = ENOMEM;
    327 		goto out;
    328 	}
    329 
    330 	uao_reference(kd->uobj);
    331 
    332 	*uobjp = kd->uobj;
    333 	*maxprotp = prot;
    334 	*advicep = UVM_ADV_RANDOM;
    335 
    336 out:
    337 	kcov_unlock(kd);
    338 	return error;
    339 }
    340 
    341 /* -------------------------------------------------------------------------- */
    342 
    343 /*
    344  * Constraints on the functions here: they must be marked with __nomsan, and
    345  * must not make any external call.
    346  */
    347 
    348 static inline bool __nomsan
    349 in_interrupt(void)
    350 {
    351 	return curcpu()->ci_idepth >= 0;
    352 }
    353 
    354 void __sanitizer_cov_trace_pc(void);
    355 
    356 void __nomsan
    357 __sanitizer_cov_trace_pc(void)
    358 {
    359 	extern int cold;
    360 	uint64_t idx;
    361 	kcov_t *kd;
    362 
    363 	if (__predict_false(cold)) {
    364 		/* Do not trace during boot. */
    365 		return;
    366 	}
    367 
    368 	if (in_interrupt()) {
    369 		/* Do not trace in interrupts. */
    370 		return;
    371 	}
    372 
    373 	kd = curlwp->l_kcov;
    374 	if (__predict_true(kd == NULL)) {
    375 		/* Not traced. */
    376 		return;
    377 	}
    378 
    379 	if (!kd->enabled) {
    380 		/* Tracing not enabled */
    381 		return;
    382 	}
    383 
    384 	if (kd->mode != KCOV_MODE_TRACE_PC) {
    385 		/* PC tracing mode not enabled */
    386 		return;
    387 	}
    388 
    389 	idx = kd->buf[0];
    390 	if (idx < kd->bufnent) {
    391 		kd->buf[idx+1] =
    392 		    (intptr_t)__builtin_return_address(0);
    393 		kd->buf[0] = idx + 1;
    394 	}
    395 }
    396 
    397 static void __nomsan
    398 trace_cmp(uint64_t type, uint64_t arg1, uint64_t arg2, intptr_t pc)
    399 {
    400 	extern int cold;
    401 	uint64_t idx;
    402 	kcov_t *kd;
    403 
    404 	if (__predict_false(cold)) {
    405 		/* Do not trace during boot. */
    406 		return;
    407 	}
    408 
    409 	if (in_interrupt()) {
    410 		/* Do not trace in interrupts. */
    411 		return;
    412 	}
    413 
    414 	kd = curlwp->l_kcov;
    415 	if (__predict_true(kd == NULL)) {
    416 		/* Not traced. */
    417 		return;
    418 	}
    419 
    420 	if (!kd->enabled) {
    421 		/* Tracing not enabled */
    422 		return;
    423 	}
    424 
    425 	if (kd->mode != KCOV_MODE_TRACE_CMP) {
    426 		/* CMP tracing mode not enabled */
    427 		return;
    428 	}
    429 
    430 	idx = kd->buf[0];
    431 	if ((idx * 4 + 4) <= kd->bufnent) {
    432 		kd->buf[idx * 4 + 1] = type;
    433 		kd->buf[idx * 4 + 2] = arg1;
    434 		kd->buf[idx * 4 + 3] = arg2;
    435 		kd->buf[idx * 4 + 4] = pc;
    436 		kd->buf[0] = idx + 1;
    437 	}
    438 }
    439 
    440 void __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2);
    441 
    442 void __nomsan
    443 __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
    444 {
    445 
    446 	trace_cmp(KCOV_CMP_SIZE(0), arg1, arg2,
    447 	    (intptr_t)__builtin_return_address(0));
    448 }
    449 
    450 void __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2);
    451 
    452 void __nomsan
    453 __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
    454 {
    455 
    456 	trace_cmp(KCOV_CMP_SIZE(1), arg1, arg2,
    457 	    (intptr_t)__builtin_return_address(0));
    458 }
    459 
    460 void __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2);
    461 
    462 void __nomsan
    463 __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
    464 {
    465 
    466 	trace_cmp(KCOV_CMP_SIZE(2), arg1, arg2,
    467 	    (intptr_t)__builtin_return_address(0));
    468 }
    469 
    470 void __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2);
    471 
    472 void __nomsan
    473 __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
    474 {
    475 
    476 	trace_cmp(KCOV_CMP_SIZE(3), arg1, arg2,
    477 	    (intptr_t)__builtin_return_address(0));
    478 }
    479 
    480 void __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2);
    481 
    482 void __nomsan
    483 __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
    484 {
    485 
    486 	trace_cmp(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
    487 	    (intptr_t)__builtin_return_address(0));
    488 }
    489 
    490 void __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2);
    491 
    492 void __nomsan
    493 __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
    494 {
    495 
    496 	trace_cmp(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
    497 	    (intptr_t)__builtin_return_address(0));
    498 }
    499 
    500 void __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2);
    501 
    502 void __nomsan
    503 __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
    504 {
    505 
    506 	trace_cmp(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
    507 	    (intptr_t)__builtin_return_address(0));
    508 }
    509 
    510 void __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2);
    511 
    512 void __nomsan
    513 __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
    514 {
    515 
    516 	trace_cmp(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
    517 	    (intptr_t)__builtin_return_address(0));
    518 }
    519 
    520 void __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases);
    521 
    522 void __nomsan
    523 __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
    524 {
    525 	uint64_t i, nbits, ncases, type;
    526 	intptr_t pc;
    527 
    528 	pc = (intptr_t)__builtin_return_address(0);
    529 	ncases = cases[0];
    530 	nbits = cases[1];
    531 
    532 	switch (nbits) {
    533 	case 8:
    534 		type = KCOV_CMP_SIZE(0);
    535 		break;
    536 	case 16:
    537 		type = KCOV_CMP_SIZE(1);
    538 		break;
    539 	case 32:
    540 		type = KCOV_CMP_SIZE(2);
    541 		break;
    542 	case 64:
    543 		type = KCOV_CMP_SIZE(3);
    544 		break;
    545 	default:
    546 		return;
    547 	}
    548 	type |= KCOV_CMP_CONST;
    549 
    550 	for (i = 0; i < ncases; i++)
    551 		trace_cmp(type, cases[i + 2], val, pc);
    552 }
    553 
    554 /* -------------------------------------------------------------------------- */
    555 
    556 MODULE(MODULE_CLASS_MISC, kcov, NULL);
    557 
    558 static int
    559 kcov_modcmd(modcmd_t cmd, void *arg)
    560 {
    561 
    562    	switch (cmd) {
    563 	case MODULE_CMD_INIT:
    564 		return 0;
    565 	case MODULE_CMD_FINI:
    566 		return EINVAL;
    567 	default:
    568 		return ENOTTY;
    569 	}
    570 }
    571