Home | History | Annotate | Line # | Download | only in kern
subr_kcov.c revision 1.15
      1 /*	$NetBSD: subr_kcov.c,v 1.15 2020/05/16 17:42:06 hannken Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2019-2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Siddharth Muralee.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 
     34 #include <sys/module.h>
     35 #include <sys/param.h>
     36 #include <sys/systm.h>
     37 #include <sys/kernel.h>
     38 
     39 #include <sys/conf.h>
     40 #include <sys/condvar.h>
     41 #include <sys/file.h>
     42 #include <sys/filedesc.h>
     43 #include <sys/kmem.h>
     44 #include <sys/mman.h>
     45 #include <sys/mutex.h>
     46 #include <sys/queue.h>
     47 
     48 #include <uvm/uvm_extern.h>
     49 #include <sys/kcov.h>
     50 
     51 #define KCOV_BUF_MAX_ENTRIES	(256 << 10)
     52 
     53 #define KCOV_CMP_CONST		1
     54 #define KCOV_CMP_SIZE(x)	((x) << 1)
     55 
     56 static dev_type_open(kcov_open);
     57 
     58 const struct cdevsw kcov_cdevsw = {
     59 	.d_open = kcov_open,
     60 	.d_close = noclose,
     61 	.d_read = noread,
     62 	.d_write = nowrite,
     63 	.d_ioctl = noioctl,
     64 	.d_stop = nostop,
     65 	.d_tty = notty,
     66 	.d_poll = nopoll,
     67 	.d_mmap = nommap,
     68 	.d_kqfilter = nokqfilter,
     69 	.d_discard = nodiscard,
     70 	.d_flag = D_OTHER | D_MPSAFE
     71 };
     72 
     73 static int kcov_fops_ioctl(file_t *, u_long, void *);
     74 static int kcov_fops_close(file_t *);
     75 static int kcov_fops_mmap(file_t *, off_t *, size_t, int, int *, int *,
     76     struct uvm_object **, int *);
     77 
     78 const struct fileops kcov_fileops = {
     79 	.fo_read = fbadop_read,
     80 	.fo_write = fbadop_write,
     81 	.fo_ioctl = kcov_fops_ioctl,
     82 	.fo_fcntl = fnullop_fcntl,
     83 	.fo_poll = fnullop_poll,
     84 	.fo_stat = fbadop_stat,
     85 	.fo_close = kcov_fops_close,
     86 	.fo_kqfilter = fnullop_kqfilter,
     87 	.fo_restart = fnullop_restart,
     88 	.fo_mmap = kcov_fops_mmap,
     89 };
     90 
     91 /*
     92  * The KCOV descriptors (KD) are allocated during open(), and are associated
     93  * with a file descriptor.
     94  *
     95  * An LWP can 'enable' a KD. When this happens, this LWP becomes the owner of
     96  * the KD, and no LWP can 'disable' this KD except the owner.
     97  *
     98  * A KD is freed when its file descriptor is closed _iff_ the KD is not active
     99  * on an LWP. If it is, we ask the LWP to free it when it exits.
    100  *
    101  * The buffers mmapped are in a dedicated uobj, therefore there is no risk
    102  * that the kernel frees a buffer still mmapped in a process: the uobj
    103  * refcount will be non-zero, so the backing is not freed until an munmap
    104  * occurs on said process.
    105  */
    106 
    107 typedef struct kcov_desc {
    108 	/* Local only */
    109 	kmutex_t lock;
    110 	bool lwpfree;
    111 	bool silenced;
    112 
    113 	/* Pointer to the end of the structure, if any */
    114 	struct kcov_desc *remote;
    115 
    116 	/* Can be remote */
    117 	kcov_int_t *buf;
    118 	struct uvm_object *uobj;
    119 	size_t bufnent;
    120 	size_t bufsize;
    121 	int mode;
    122 	bool enabled;
    123 } kcov_t;
    124 
    125 /* -------------------------------------------------------------------------- */
    126 
    127 static void
    128 kcov_lock(kcov_t *kd)
    129 {
    130 
    131 	mutex_enter(&kd->lock);
    132 }
    133 
    134 static void
    135 kcov_unlock(kcov_t *kd)
    136 {
    137 
    138 	mutex_exit(&kd->lock);
    139 }
    140 
    141 static bool
    142 kcov_mode_is_valid(int mode)
    143 {
    144 	switch (mode) {
    145 	case KCOV_MODE_NONE:
    146 	case KCOV_MODE_TRACE_PC:
    147 	case KCOV_MODE_TRACE_CMP:
    148 		return true;
    149 	default:
    150 		return false;
    151 	}
    152 }
    153 
    154 /* -------------------------------------------------------------------------- */
    155 
    156 static void
    157 kcov_free(kcov_t *kd)
    158 {
    159 
    160 	KASSERT(kd != NULL);
    161 	if (kd->buf != NULL) {
    162 		uvm_deallocate(kernel_map, (vaddr_t)kd->buf, kd->bufsize);
    163 	}
    164 	mutex_destroy(&kd->lock);
    165 	kmem_free(kd, sizeof(*kd));
    166 }
    167 
    168 void
    169 kcov_lwp_free(struct lwp *l)
    170 {
    171 	kcov_t *kd = (kcov_t *)l->l_kcov;
    172 
    173 	if (kd == NULL) {
    174 		return;
    175 	}
    176 	kcov_lock(kd);
    177 	kd->enabled = false;
    178 	kcov_unlock(kd);
    179 	if (kd->lwpfree) {
    180 		kcov_free(kd);
    181 	}
    182 }
    183 
    184 static int
    185 kcov_allocbuf(kcov_t *kd, uint64_t nent)
    186 {
    187 	size_t size;
    188 	int error;
    189 
    190 	if (nent < 2 || nent > KCOV_BUF_MAX_ENTRIES)
    191 		return EINVAL;
    192 	if (kd->buf != NULL)
    193 		return EEXIST;
    194 
    195 	size = roundup(nent * KCOV_ENTRY_SIZE, PAGE_SIZE);
    196 	kd->bufnent = nent - 1;
    197 	kd->bufsize = size;
    198 	kd->uobj = uao_create(kd->bufsize, 0);
    199 
    200 	/* Map the uobj into the kernel address space, as wired. */
    201 	kd->buf = NULL;
    202 	error = uvm_map(kernel_map, (vaddr_t *)&kd->buf, kd->bufsize, kd->uobj,
    203 	    0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE,
    204 	    UVM_ADV_RANDOM, 0));
    205 	if (error) {
    206 		uao_detach(kd->uobj);
    207 		return error;
    208 	}
    209 	error = uvm_map_pageable(kernel_map, (vaddr_t)kd->buf,
    210 	    (vaddr_t)kd->buf + size, false, 0);
    211 	if (error) {
    212 		uvm_deallocate(kernel_map, (vaddr_t)kd->buf, size);
    213 		return error;
    214 	}
    215 
    216 	return 0;
    217 }
    218 
    219 /* -------------------------------------------------------------------------- */
    220 
    221 typedef struct kcov_remote {
    222 	LIST_ENTRY(kcov_remote) list;
    223 	uint64_t subsystem;
    224 	uint64_t id;
    225 	u_int refcount;
    226 	kcov_t kd;
    227 } kcov_remote_t;
    228 
    229 typedef LIST_HEAD(, kcov_remote) kcov_remote_list_t;
    230 
    231 static kcov_remote_list_t kcov_remote_list;
    232 
    233 static kcov_remote_t *
    234 kcov_remote_find(uint64_t subsystem, uint64_t id)
    235 {
    236 	kcov_remote_t *kr;
    237 
    238 	LIST_FOREACH(kr, &kcov_remote_list, list) {
    239 		if (kr->subsystem == subsystem && kr->id == id)
    240 			return kr;
    241 	}
    242 
    243 	return NULL;
    244 }
    245 
    246 void
    247 kcov_remote_register(uint64_t subsystem, uint64_t id)
    248 {
    249 	kcov_remote_t *kr;
    250 	kcov_t *kd;
    251 	int error;
    252 
    253 	if (kcov_remote_find(subsystem, id) != NULL) {
    254 		panic("%s: kr already exists", __func__);
    255 	}
    256 
    257 	kr = kmem_zalloc(sizeof(*kr), KM_SLEEP);
    258 	kr->subsystem = subsystem;
    259 	kr->id = id;
    260 	kr->refcount = 0;
    261 	kd = &kr->kd;
    262 
    263 	mutex_init(&kd->lock, MUTEX_DEFAULT, IPL_NONE);
    264 	error = kcov_allocbuf(kd, KCOV_BUF_MAX_ENTRIES);
    265 	if (error != 0)
    266 		panic("%s: failed to allocate buffer", __func__);
    267 
    268 	LIST_INSERT_HEAD(&kcov_remote_list, kr, list);
    269 }
    270 
    271 void
    272 kcov_remote_enter(uint64_t subsystem, uint64_t id)
    273 {
    274 	struct lwp *l = curlwp;
    275 	kcov_remote_t *kr;
    276 	kcov_t *kd;
    277 	u_int refs __diagused;
    278 
    279 	kr = kcov_remote_find(subsystem, id);
    280 	if (__predict_false(kr == NULL)) {
    281 		panic("%s: unable to find kr", __func__);
    282 	}
    283 
    284 	refs = atomic_inc_uint_nv(&kr->refcount);
    285 	KASSERT(refs == 1);
    286 
    287 	KASSERT(l->l_kcov == NULL);
    288 	kd = &kr->kd;
    289 	if (atomic_load_relaxed(&kd->enabled)) {
    290 		l->l_kcov = kd;
    291 	}
    292 }
    293 
    294 void
    295 kcov_remote_leave(uint64_t subsystem, uint64_t id)
    296 {
    297 	struct lwp *l = curlwp;
    298 	kcov_remote_t *kr;
    299 	u_int refs __diagused;
    300 
    301 	kr = kcov_remote_find(subsystem, id);
    302 	if (__predict_false(kr == NULL)) {
    303 		panic("%s: unable to find kr", __func__);
    304 	}
    305 
    306 	refs = atomic_dec_uint_nv(&kr->refcount);
    307 	KASSERT(refs == 0);
    308 
    309 	l->l_kcov = NULL;
    310 }
    311 
    312 static int
    313 kcov_remote_enable(kcov_t *kd, int mode)
    314 {
    315 	kcov_lock(kd);
    316 	if (kd->enabled) {
    317 		kcov_unlock(kd);
    318 		return EBUSY;
    319 	}
    320 	kd->mode = mode;
    321 	atomic_store_relaxed(&kd->enabled, true);
    322 	kcov_unlock(kd);
    323 
    324 	return 0;
    325 }
    326 
    327 static int
    328 kcov_remote_disable(kcov_t *kd)
    329 {
    330 	kcov_lock(kd);
    331 	if (!kd->enabled) {
    332 		kcov_unlock(kd);
    333 		return ENOENT;
    334 	}
    335 	atomic_store_relaxed(&kd->enabled, false);
    336 	kcov_unlock(kd);
    337 
    338 	return 0;
    339 }
    340 
    341 static int
    342 kcov_remote_attach(kcov_t *kd, struct kcov_ioc_remote_attach *args)
    343 {
    344 	kcov_remote_t *kr;
    345 
    346 	if (kd->enabled)
    347 		return EEXIST;
    348 
    349 	kr = kcov_remote_find(args->subsystem, args->id);
    350 	if (kr == NULL)
    351 		return ENOENT;
    352 	kd->remote = &kr->kd;
    353 
    354 	return 0;
    355 }
    356 
    357 static int
    358 kcov_remote_detach(kcov_t *kd)
    359 {
    360 	if (kd->enabled)
    361 		return EEXIST;
    362 	if (kd->remote == NULL)
    363 		return ENOENT;
    364 	(void)kcov_remote_disable(kd->remote);
    365 	kd->remote = NULL;
    366 	return 0;
    367 }
    368 
    369 /* -------------------------------------------------------------------------- */
    370 
    371 static int
    372 kcov_setbufsize(kcov_t *kd, uint64_t *args)
    373 {
    374 	if (kd->remote != NULL)
    375 		return 0; /* buffer allocated remotely */
    376 	if (kd->enabled)
    377 		return EBUSY;
    378 	return kcov_allocbuf(kd, *((uint64_t *)args));
    379 }
    380 
    381 static int
    382 kcov_enable(kcov_t *kd, uint64_t *args)
    383 {
    384 	struct lwp *l = curlwp;
    385 	int mode;
    386 
    387 	mode = *((int *)args);
    388 	if (!kcov_mode_is_valid(mode))
    389 		return EINVAL;
    390 
    391 	if (kd->remote != NULL)
    392 		return kcov_remote_enable(kd->remote, mode);
    393 
    394 	if (kd->enabled)
    395 		return EBUSY;
    396 	if (l->l_kcov != NULL)
    397 		return EBUSY;
    398 	if (kd->buf == NULL)
    399 		return ENOBUFS;
    400 
    401 	l->l_kcov = kd;
    402 	kd->mode = mode;
    403 	kd->enabled = true;
    404 	return 0;
    405 }
    406 
    407 static int
    408 kcov_disable(kcov_t *kd)
    409 {
    410 	struct lwp *l = curlwp;
    411 
    412 	if (kd->remote != NULL)
    413 		return kcov_remote_disable(kd->remote);
    414 
    415 	if (!kd->enabled)
    416 		return ENOENT;
    417 	if (l->l_kcov != kd)
    418 		return ENOENT;
    419 
    420 	l->l_kcov = NULL;
    421 	kd->enabled = false;
    422 	return 0;
    423 }
    424 
    425 /* -------------------------------------------------------------------------- */
    426 
    427 void
    428 kcov_silence_enter(void)
    429 {
    430 	kcov_t *kd = curlwp->l_kcov;
    431 
    432 	if (kd != NULL)
    433 		kd->silenced = true;
    434 }
    435 
    436 void
    437 kcov_silence_leave(void)
    438 {
    439 	kcov_t *kd = curlwp->l_kcov;
    440 
    441 	if (kd != NULL)
    442 		kd->silenced = false;
    443 }
    444 
    445 /* -------------------------------------------------------------------------- */
    446 
    447 static int
    448 kcov_open(dev_t dev, int flag, int mode, struct lwp *l)
    449 {
    450 	struct file *fp;
    451 	int error, fd;
    452 	kcov_t *kd;
    453 
    454 	error = fd_allocfile(&fp, &fd);
    455 	if (error)
    456 		return error;
    457 
    458 	kd = kmem_zalloc(sizeof(*kd), KM_SLEEP);
    459 	mutex_init(&kd->lock, MUTEX_DEFAULT, IPL_NONE);
    460 
    461 	return fd_clone(fp, fd, flag, &kcov_fileops, kd);
    462 }
    463 
    464 static int
    465 kcov_fops_close(file_t *fp)
    466 {
    467 	kcov_t *kd = fp->f_data;
    468 
    469 	kcov_lock(kd);
    470 	if (kd->remote != NULL)
    471 		(void)kcov_remote_disable(kd->remote);
    472 	if (kd->enabled) {
    473 		kd->lwpfree = true;
    474 		kcov_unlock(kd);
    475 	} else {
    476 		kcov_unlock(kd);
    477 		kcov_free(kd);
    478 	}
    479 	fp->f_data = NULL;
    480 
    481    	return 0;
    482 }
    483 
    484 static int
    485 kcov_fops_ioctl(file_t *fp, u_long cmd, void *addr)
    486 {
    487 	kcov_t *kd;
    488 	int error;
    489 
    490 	kd = fp->f_data;
    491 	if (kd == NULL)
    492 		return ENXIO;
    493 	kcov_lock(kd);
    494 
    495 	switch (cmd) {
    496 	case KCOV_IOC_SETBUFSIZE:
    497 		error = kcov_setbufsize(kd, addr);
    498 		break;
    499 	case KCOV_IOC_ENABLE:
    500 		error = kcov_enable(kd, addr);
    501 		break;
    502 	case KCOV_IOC_DISABLE:
    503 		error = kcov_disable(kd);
    504 		break;
    505 	case KCOV_IOC_REMOTE_ATTACH:
    506 		error = kcov_remote_attach(kd, addr);
    507 		break;
    508 	case KCOV_IOC_REMOTE_DETACH:
    509 		error = kcov_remote_detach(kd);
    510 		break;
    511 	default:
    512 		error = EINVAL;
    513 	}
    514 
    515 	kcov_unlock(kd);
    516 	return error;
    517 }
    518 
    519 static int
    520 kcov_fops_mmap(file_t *fp, off_t *offp, size_t size, int prot, int *flagsp,
    521     int *advicep, struct uvm_object **uobjp, int *maxprotp)
    522 {
    523 	off_t off = *offp;
    524 	kcov_t *kd, *kdbuf;
    525 	int error = 0;
    526 
    527 	if (prot & PROT_EXEC)
    528 		return EACCES;
    529 	if (off < 0)
    530 		return EINVAL;
    531 	if (size > KCOV_BUF_MAX_ENTRIES * KCOV_ENTRY_SIZE)
    532 		return EINVAL;
    533 	if (off > KCOV_BUF_MAX_ENTRIES * KCOV_ENTRY_SIZE)
    534 		return EINVAL;
    535 
    536 	kd = fp->f_data;
    537 	if (kd == NULL)
    538 		return ENXIO;
    539 	kcov_lock(kd);
    540 
    541 	if (kd->remote != NULL)
    542 		kdbuf = kd->remote;
    543 	else
    544 		kdbuf = kd;
    545 
    546 	if ((size + off) > kdbuf->bufsize) {
    547 		error = ENOMEM;
    548 		goto out;
    549 	}
    550 
    551 	uao_reference(kdbuf->uobj);
    552 
    553 	*uobjp = kdbuf->uobj;
    554 	*maxprotp = prot;
    555 	*advicep = UVM_ADV_RANDOM;
    556 
    557 out:
    558 	kcov_unlock(kd);
    559 	return error;
    560 }
    561 
    562 /* -------------------------------------------------------------------------- */
    563 
    564 /*
    565  * Constraints on the functions here: they must be marked with __nomsan, and
    566  * must not make any external call.
    567  */
    568 
    569 static inline bool __nomsan
    570 in_interrupt(void)
    571 {
    572 	return curcpu()->ci_idepth >= 0;
    573 }
    574 
    575 void __sanitizer_cov_trace_pc(void);
    576 
    577 void __nomsan
    578 __sanitizer_cov_trace_pc(void)
    579 {
    580 	extern int cold;
    581 	uint64_t idx;
    582 	kcov_t *kd;
    583 
    584 	if (__predict_false(cold)) {
    585 		/* Do not trace during boot. */
    586 		return;
    587 	}
    588 
    589 	if (in_interrupt()) {
    590 		/* Do not trace in interrupts. */
    591 		return;
    592 	}
    593 
    594 	kd = curlwp->l_kcov;
    595 	if (__predict_true(kd == NULL)) {
    596 		/* Not traced. */
    597 		return;
    598 	}
    599 
    600 	if (!kd->enabled) {
    601 		/* Tracing not enabled */
    602 		return;
    603 	}
    604 
    605 	if (__predict_false(kd->silenced)) {
    606 		/* Silenced. */
    607 		return;
    608 	}
    609 
    610 	if (kd->mode != KCOV_MODE_TRACE_PC) {
    611 		/* PC tracing mode not enabled */
    612 		return;
    613 	}
    614 	KASSERT(kd->remote == NULL);
    615 
    616 	idx = kd->buf[0];
    617 	if (idx < kd->bufnent) {
    618 		kd->buf[idx+1] =
    619 		    (intptr_t)__builtin_return_address(0);
    620 		kd->buf[0] = idx + 1;
    621 	}
    622 }
    623 
    624 static void __nomsan
    625 trace_cmp(uint64_t type, uint64_t arg1, uint64_t arg2, intptr_t pc)
    626 {
    627 	extern int cold;
    628 	uint64_t idx;
    629 	kcov_t *kd;
    630 
    631 	if (__predict_false(cold)) {
    632 		/* Do not trace during boot. */
    633 		return;
    634 	}
    635 
    636 	if (in_interrupt()) {
    637 		/* Do not trace in interrupts. */
    638 		return;
    639 	}
    640 
    641 	kd = curlwp->l_kcov;
    642 	if (__predict_true(kd == NULL)) {
    643 		/* Not traced. */
    644 		return;
    645 	}
    646 
    647 	if (!kd->enabled) {
    648 		/* Tracing not enabled */
    649 		return;
    650 	}
    651 
    652 	if (kd->mode != KCOV_MODE_TRACE_CMP) {
    653 		/* CMP tracing mode not enabled */
    654 		return;
    655 	}
    656 
    657 	idx = kd->buf[0];
    658 	if ((idx * 4 + 4) <= kd->bufnent) {
    659 		kd->buf[idx * 4 + 1] = type;
    660 		kd->buf[idx * 4 + 2] = arg1;
    661 		kd->buf[idx * 4 + 3] = arg2;
    662 		kd->buf[idx * 4 + 4] = pc;
    663 		kd->buf[0] = idx + 1;
    664 	}
    665 }
    666 
    667 void __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2);
    668 
    669 void __nomsan
    670 __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
    671 {
    672 
    673 	trace_cmp(KCOV_CMP_SIZE(0), arg1, arg2,
    674 	    (intptr_t)__builtin_return_address(0));
    675 }
    676 
    677 void __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2);
    678 
    679 void __nomsan
    680 __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
    681 {
    682 
    683 	trace_cmp(KCOV_CMP_SIZE(1), arg1, arg2,
    684 	    (intptr_t)__builtin_return_address(0));
    685 }
    686 
    687 void __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2);
    688 
    689 void __nomsan
    690 __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
    691 {
    692 
    693 	trace_cmp(KCOV_CMP_SIZE(2), arg1, arg2,
    694 	    (intptr_t)__builtin_return_address(0));
    695 }
    696 
    697 void __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2);
    698 
    699 void __nomsan
    700 __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
    701 {
    702 
    703 	trace_cmp(KCOV_CMP_SIZE(3), arg1, arg2,
    704 	    (intptr_t)__builtin_return_address(0));
    705 }
    706 
    707 void __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2);
    708 
    709 void __nomsan
    710 __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
    711 {
    712 
    713 	trace_cmp(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
    714 	    (intptr_t)__builtin_return_address(0));
    715 }
    716 
    717 void __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2);
    718 
    719 void __nomsan
    720 __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
    721 {
    722 
    723 	trace_cmp(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
    724 	    (intptr_t)__builtin_return_address(0));
    725 }
    726 
    727 void __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2);
    728 
    729 void __nomsan
    730 __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
    731 {
    732 
    733 	trace_cmp(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
    734 	    (intptr_t)__builtin_return_address(0));
    735 }
    736 
    737 void __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2);
    738 
    739 void __nomsan
    740 __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
    741 {
    742 
    743 	trace_cmp(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
    744 	    (intptr_t)__builtin_return_address(0));
    745 }
    746 
    747 void __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases);
    748 
    749 void __nomsan
    750 __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
    751 {
    752 	uint64_t i, nbits, ncases, type;
    753 	intptr_t pc;
    754 
    755 	pc = (intptr_t)__builtin_return_address(0);
    756 	ncases = cases[0];
    757 	nbits = cases[1];
    758 
    759 	switch (nbits) {
    760 	case 8:
    761 		type = KCOV_CMP_SIZE(0);
    762 		break;
    763 	case 16:
    764 		type = KCOV_CMP_SIZE(1);
    765 		break;
    766 	case 32:
    767 		type = KCOV_CMP_SIZE(2);
    768 		break;
    769 	case 64:
    770 		type = KCOV_CMP_SIZE(3);
    771 		break;
    772 	default:
    773 		return;
    774 	}
    775 	type |= KCOV_CMP_CONST;
    776 
    777 	for (i = 0; i < ncases; i++)
    778 		trace_cmp(type, cases[i + 2], val, pc);
    779 }
    780 
    781 /* -------------------------------------------------------------------------- */
    782 
    783 MODULE(MODULE_CLASS_MISC, kcov, NULL);
    784 
    785 static int
    786 kcov_modcmd(modcmd_t cmd, void *arg)
    787 {
    788 
    789    	switch (cmd) {
    790 	case MODULE_CMD_INIT:
    791 		return 0;
    792 	case MODULE_CMD_FINI:
    793 		return EINVAL;
    794 	default:
    795 		return ENOTTY;
    796 	}
    797 }
    798