Home | History | Annotate | Line # | Download | only in kern
subr_kcov.c revision 1.4
      1 /*	$NetBSD: subr_kcov.c,v 1.4 2019/03/10 12:54:39 kamil Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2019 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Siddharth Muralee.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 
     34 #include <sys/module.h>
     35 #include <sys/param.h>
     36 #include <sys/systm.h>
     37 #include <sys/kernel.h>
     38 
     39 #include <sys/conf.h>
     40 #include <sys/condvar.h>
     41 #include <sys/file.h>
     42 #include <sys/filedesc.h>
     43 #include <sys/kmem.h>
     44 #include <sys/mman.h>
     45 #include <sys/mutex.h>
     46 #include <sys/queue.h>
     47 
     48 #include <uvm/uvm_extern.h>
     49 #include <sys/kcov.h>
     50 
     51 #define KCOV_BUF_MAX_ENTRIES	(256 << 10)
     52 
     53 static dev_type_open(kcov_open);
     54 
     55 const struct cdevsw kcov_cdevsw = {
     56 	.d_open = kcov_open,
     57 	.d_close = noclose,
     58 	.d_read = noread,
     59 	.d_write = nowrite,
     60 	.d_ioctl = noioctl,
     61 	.d_stop = nostop,
     62 	.d_tty = notty,
     63 	.d_poll = nopoll,
     64 	.d_mmap = nommap,
     65 	.d_kqfilter = nokqfilter,
     66 	.d_discard = nodiscard,
     67 	.d_flag = D_OTHER | D_MPSAFE
     68 };
     69 
     70 static int kcov_fops_ioctl(file_t *, u_long, void *);
     71 static int kcov_fops_close(file_t *);
     72 static int kcov_fops_mmap(file_t *, off_t *, size_t, int, int *, int *,
     73     struct uvm_object **, int *);
     74 
     75 const struct fileops kcov_fileops = {
     76 	.fo_read = fbadop_read,
     77 	.fo_write = fbadop_write,
     78 	.fo_ioctl = kcov_fops_ioctl,
     79 	.fo_fcntl = fnullop_fcntl,
     80 	.fo_poll = fnullop_poll,
     81 	.fo_stat = fbadop_stat,
     82 	.fo_close = kcov_fops_close,
     83 	.fo_kqfilter = fnullop_kqfilter,
     84 	.fo_restart = fnullop_restart,
     85 	.fo_mmap = kcov_fops_mmap,
     86 };
     87 
     88 /*
     89  * The KCOV descriptors (KD) are allocated during open(), and are associated
     90  * with a file descriptor.
     91  *
     92  * An LWP can 'enable' a KD. When this happens, this LWP becomes the owner of
     93  * the KD, and no LWP can 'disable' this KD except the owner.
     94  *
     95  * A KD is freed when its file descriptor is closed _iff_ the KD is not active
     96  * on an LWP. If it is, we ask the LWP to free it when it exits.
     97  *
     98  * The buffers mmapped are in a dedicated uobj, therefore there is no risk
     99  * that the kernel frees a buffer still mmapped in a process: the uobj
    100  * refcount will be non-zero, so the backing is not freed until an munmap
    101  * occurs on said process.
    102  */
    103 
    104 typedef struct kcov_desc {
    105 	kmutex_t lock;
    106 	kcov_int_t *buf;
    107 	struct uvm_object *uobj;
    108 	size_t bufnent;
    109 	size_t bufsize;
    110 	bool enabled;
    111 	bool lwpfree;
    112 } kcov_t;
    113 
    114 static specificdata_key_t kcov_lwp_key;
    115 
    116 static void
    117 kcov_lock(kcov_t *kd)
    118 {
    119 
    120 	mutex_enter(&kd->lock);
    121 }
    122 
    123 static void
    124 kcov_unlock(kcov_t *kd)
    125 {
    126 
    127 	mutex_exit(&kd->lock);
    128 }
    129 
    130 static void
    131 kcov_free(kcov_t *kd)
    132 {
    133 
    134 	KASSERT(kd != NULL);
    135 	if (kd->buf != NULL) {
    136 		uvm_deallocate(kernel_map, (vaddr_t)kd->buf, kd->bufsize);
    137 	}
    138 	mutex_destroy(&kd->lock);
    139 	kmem_free(kd, sizeof(*kd));
    140 }
    141 
    142 static void
    143 kcov_lwp_free(void *arg)
    144 {
    145 	kcov_t *kd = (kcov_t *)arg;
    146 
    147 	if (kd == NULL) {
    148 		return;
    149 	}
    150 	kcov_lock(kd);
    151 	kd->enabled = false;
    152 	kcov_unlock(kd);
    153 	if (kd->lwpfree) {
    154 		kcov_free(kd);
    155 	}
    156 }
    157 
    158 static int
    159 kcov_allocbuf(kcov_t *kd, uint64_t nent)
    160 {
    161 	size_t size;
    162 	int error;
    163 
    164 	if (nent < 2 || nent > KCOV_BUF_MAX_ENTRIES)
    165 		return EINVAL;
    166 	if (kd->buf != NULL)
    167 		return EEXIST;
    168 
    169 	size = roundup(nent * KCOV_ENTRY_SIZE, PAGE_SIZE);
    170 	kd->bufnent = nent - 1;
    171 	kd->bufsize = size;
    172 	kd->uobj = uao_create(kd->bufsize, 0);
    173 
    174 	/* Map the uobj into the kernel address space, as wired. */
    175 	kd->buf = NULL;
    176 	error = uvm_map(kernel_map, (vaddr_t *)&kd->buf, kd->bufsize, kd->uobj,
    177 	    0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE,
    178 	    UVM_ADV_RANDOM, 0));
    179 	if (error) {
    180 		uao_detach(kd->uobj);
    181 		return error;
    182 	}
    183 	error = uvm_map_pageable(kernel_map, (vaddr_t)kd->buf,
    184 	    (vaddr_t)kd->buf + size, false, 0);
    185 	if (error) {
    186 		uvm_deallocate(kernel_map, (vaddr_t)kd->buf, size);
    187 		return error;
    188 	}
    189 
    190 	return 0;
    191 }
    192 
    193 /* -------------------------------------------------------------------------- */
    194 
    195 static int
    196 kcov_open(dev_t dev, int flag, int mode, struct lwp *l)
    197 {
    198 	struct file *fp;
    199 	int error, fd;
    200 	kcov_t *kd;
    201 
    202 	error = fd_allocfile(&fp, &fd);
    203 	if (error)
    204 		return error;
    205 
    206 	kd = kmem_zalloc(sizeof(*kd), KM_SLEEP);
    207 	mutex_init(&kd->lock, MUTEX_DEFAULT, IPL_NONE);
    208 
    209 	return fd_clone(fp, fd, flag, &kcov_fileops, kd);
    210 }
    211 
    212 static int
    213 kcov_fops_close(file_t *fp)
    214 {
    215 	kcov_t *kd = fp->f_data;
    216 
    217 	kcov_lock(kd);
    218 	if (kd->enabled) {
    219 		kd->lwpfree = true;
    220 		kcov_unlock(kd);
    221 	} else {
    222 		kcov_unlock(kd);
    223 		kcov_free(kd);
    224 	}
    225 	fp->f_data = NULL;
    226 
    227    	return 0;
    228 }
    229 
    230 static int
    231 kcov_fops_ioctl(file_t *fp, u_long cmd, void *addr)
    232 {
    233 	int error = 0;
    234 	kcov_t *kd;
    235 
    236 	kd = fp->f_data;
    237 	if (kd == NULL)
    238 		return ENXIO;
    239 	kcov_lock(kd);
    240 
    241 	switch (cmd) {
    242 	case KCOV_IOC_SETBUFSIZE:
    243 		if (kd->enabled) {
    244 			error = EBUSY;
    245 			break;
    246 		}
    247 		error = kcov_allocbuf(kd, *((uint64_t *)addr));
    248 		break;
    249 	case KCOV_IOC_ENABLE:
    250 		if (kd->enabled) {
    251 			error = EBUSY;
    252 			break;
    253 		}
    254 		if (lwp_getspecific(kcov_lwp_key) != NULL) {
    255 			error = EBUSY;
    256 			break;
    257 		}
    258 		if (kd->buf == NULL) {
    259 			error = ENOBUFS;
    260 			break;
    261 		}
    262 		lwp_setspecific(kcov_lwp_key, kd);
    263 		kd->enabled = true;
    264 		break;
    265 	case KCOV_IOC_DISABLE:
    266 		if (!kd->enabled) {
    267 			error = ENOENT;
    268 			break;
    269 		}
    270 		if (lwp_getspecific(kcov_lwp_key) != kd) {
    271 			error = ENOENT;
    272 			break;
    273 		}
    274 		lwp_setspecific(kcov_lwp_key, NULL);
    275 		kd->enabled = false;
    276 		break;
    277 	default:
    278 		error = EINVAL;
    279 	}
    280 
    281 	kcov_unlock(kd);
    282 	return error;
    283 }
    284 
    285 static int
    286 kcov_fops_mmap(file_t *fp, off_t *offp, size_t size, int prot, int *flagsp,
    287     int *advicep, struct uvm_object **uobjp, int *maxprotp)
    288 {
    289 	off_t off = *offp;
    290 	kcov_t *kd;
    291 	int error = 0;
    292 
    293 	if (prot & PROT_EXEC)
    294 		return EACCES;
    295 	if (off < 0)
    296 		return EINVAL;
    297 	if (size > KCOV_BUF_MAX_ENTRIES * KCOV_ENTRY_SIZE)
    298 		return EINVAL;
    299 	if (off > KCOV_BUF_MAX_ENTRIES * KCOV_ENTRY_SIZE)
    300 		return EINVAL;
    301 
    302 	kd = fp->f_data;
    303 	if (kd == NULL)
    304 		return ENXIO;
    305 	kcov_lock(kd);
    306 
    307 	if ((size + off) > kd->bufsize) {
    308 		error = ENOMEM;
    309 		goto out;
    310 	}
    311 
    312 	uao_reference(kd->uobj);
    313 
    314 	*uobjp = kd->uobj;
    315 	*maxprotp = prot;
    316 	*advicep = UVM_ADV_RANDOM;
    317 
    318 out:
    319 	kcov_unlock(kd);
    320 	return error;
    321 }
    322 
    323 static inline bool
    324 in_interrupt(void)
    325 {
    326 	return curcpu()->ci_idepth >= 0;
    327 }
    328 
    329 void __sanitizer_cov_trace_pc(void);
    330 
    331 void
    332 __sanitizer_cov_trace_pc(void)
    333 {
    334 	extern int cold;
    335 	uint64_t idx;
    336 	kcov_t *kd;
    337 
    338 	if (__predict_false(cold)) {
    339 		/* Do not trace during boot. */
    340 		return;
    341 	}
    342 
    343 	if (in_interrupt()) {
    344 		/* Do not trace in interrupts. */
    345 		return;
    346 	}
    347 
    348 	kd = lwp_getspecific(kcov_lwp_key);
    349 	if (__predict_true(kd == NULL)) {
    350 		/* Not traced. */
    351 		return;
    352 	}
    353 
    354 	if (!kd->enabled) {
    355 		/* Tracing not enabled */
    356 		return;
    357 	}
    358 
    359 	idx = KCOV_LOAD(kd->buf[0]);
    360 	if (idx < kd->bufnent) {
    361 		KCOV_STORE(kd->buf[idx+1],
    362 		    (intptr_t)__builtin_return_address(0));
    363 		KCOV_STORE(kd->buf[0], idx + 1);
    364 	}
    365 }
    366 
    367 /* -------------------------------------------------------------------------- */
    368 
    369 MODULE(MODULE_CLASS_ANY, kcov, NULL);
    370 
    371 static void
    372 kcov_init(void)
    373 {
    374 
    375 	lwp_specific_key_create(&kcov_lwp_key, kcov_lwp_free);
    376 }
    377 
    378 static int
    379 kcov_modcmd(modcmd_t cmd, void *arg)
    380 {
    381 
    382    	switch (cmd) {
    383 	case MODULE_CMD_INIT:
    384 		kcov_init();
    385 		return 0;
    386 	case MODULE_CMD_FINI:
    387 		return EINVAL;
    388 	default:
    389 		return ENOTTY;
    390 	}
    391 }
    392