Home | History | Annotate | Line # | Download | only in kern
subr_kcov.c revision 1.13
      1 /*	$NetBSD: subr_kcov.c,v 1.13 2020/05/15 12:34:52 maxv Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2019-2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Siddharth Muralee.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 
     34 #include <sys/module.h>
     35 #include <sys/param.h>
     36 #include <sys/systm.h>
     37 #include <sys/kernel.h>
     38 
     39 #include <sys/conf.h>
     40 #include <sys/condvar.h>
     41 #include <sys/file.h>
     42 #include <sys/filedesc.h>
     43 #include <sys/kmem.h>
     44 #include <sys/mman.h>
     45 #include <sys/mutex.h>
     46 #include <sys/queue.h>
     47 
     48 #include <uvm/uvm_extern.h>
     49 #include <sys/kcov.h>
     50 
     51 #define KCOV_BUF_MAX_ENTRIES	(256 << 10)
     52 
     53 #define KCOV_CMP_CONST		1
     54 #define KCOV_CMP_SIZE(x)	((x) << 1)
     55 
     56 static dev_type_open(kcov_open);
     57 
     58 const struct cdevsw kcov_cdevsw = {
     59 	.d_open = kcov_open,
     60 	.d_close = noclose,
     61 	.d_read = noread,
     62 	.d_write = nowrite,
     63 	.d_ioctl = noioctl,
     64 	.d_stop = nostop,
     65 	.d_tty = notty,
     66 	.d_poll = nopoll,
     67 	.d_mmap = nommap,
     68 	.d_kqfilter = nokqfilter,
     69 	.d_discard = nodiscard,
     70 	.d_flag = D_OTHER | D_MPSAFE
     71 };
     72 
     73 static int kcov_fops_ioctl(file_t *, u_long, void *);
     74 static int kcov_fops_close(file_t *);
     75 static int kcov_fops_mmap(file_t *, off_t *, size_t, int, int *, int *,
     76     struct uvm_object **, int *);
     77 
     78 const struct fileops kcov_fileops = {
     79 	.fo_read = fbadop_read,
     80 	.fo_write = fbadop_write,
     81 	.fo_ioctl = kcov_fops_ioctl,
     82 	.fo_fcntl = fnullop_fcntl,
     83 	.fo_poll = fnullop_poll,
     84 	.fo_stat = fbadop_stat,
     85 	.fo_close = kcov_fops_close,
     86 	.fo_kqfilter = fnullop_kqfilter,
     87 	.fo_restart = fnullop_restart,
     88 	.fo_mmap = kcov_fops_mmap,
     89 };
     90 
     91 /*
     92  * The KCOV descriptors (KD) are allocated during open(), and are associated
     93  * with a file descriptor.
     94  *
     95  * An LWP can 'enable' a KD. When this happens, this LWP becomes the owner of
     96  * the KD, and no LWP can 'disable' this KD except the owner.
     97  *
     98  * A KD is freed when its file descriptor is closed _iff_ the KD is not active
     99  * on an LWP. If it is, we ask the LWP to free it when it exits.
    100  *
    101  * The buffers mmapped are in a dedicated uobj, therefore there is no risk
    102  * that the kernel frees a buffer still mmapped in a process: the uobj
    103  * refcount will be non-zero, so the backing is not freed until an munmap
    104  * occurs on said process.
    105  */
    106 
    107 typedef struct kcov_desc {
    108 	/* Local only */
    109 	kmutex_t lock;
    110 	bool lwpfree;
    111 
    112 	/* Pointer to the end of the structure, if any */
    113 	struct kcov_desc *remote;
    114 
    115 	/* Can be remote */
    116 	kcov_int_t *buf;
    117 	struct uvm_object *uobj;
    118 	size_t bufnent;
    119 	size_t bufsize;
    120 	int mode;
    121 	bool enabled;
    122 } kcov_t;
    123 
    124 /* -------------------------------------------------------------------------- */
    125 
    126 static void
    127 kcov_lock(kcov_t *kd)
    128 {
    129 
    130 	mutex_enter(&kd->lock);
    131 }
    132 
    133 static void
    134 kcov_unlock(kcov_t *kd)
    135 {
    136 
    137 	mutex_exit(&kd->lock);
    138 }
    139 
    140 static bool
    141 kcov_mode_is_valid(int mode)
    142 {
    143 	switch (mode) {
    144 	case KCOV_MODE_NONE:
    145 	case KCOV_MODE_TRACE_PC:
    146 	case KCOV_MODE_TRACE_CMP:
    147 		return true;
    148 	default:
    149 		return false;
    150 	}
    151 }
    152 
    153 /* -------------------------------------------------------------------------- */
    154 
    155 static void
    156 kcov_free(kcov_t *kd)
    157 {
    158 
    159 	KASSERT(kd != NULL);
    160 	if (kd->buf != NULL) {
    161 		uvm_deallocate(kernel_map, (vaddr_t)kd->buf, kd->bufsize);
    162 	}
    163 	mutex_destroy(&kd->lock);
    164 	kmem_free(kd, sizeof(*kd));
    165 }
    166 
    167 void
    168 kcov_lwp_free(struct lwp *l)
    169 {
    170 	kcov_t *kd = (kcov_t *)l->l_kcov;
    171 
    172 	if (kd == NULL) {
    173 		return;
    174 	}
    175 	kcov_lock(kd);
    176 	kd->enabled = false;
    177 	kcov_unlock(kd);
    178 	if (kd->lwpfree) {
    179 		kcov_free(kd);
    180 	}
    181 }
    182 
    183 static int
    184 kcov_allocbuf(kcov_t *kd, uint64_t nent)
    185 {
    186 	size_t size;
    187 	int error;
    188 
    189 	if (nent < 2 || nent > KCOV_BUF_MAX_ENTRIES)
    190 		return EINVAL;
    191 	if (kd->buf != NULL)
    192 		return EEXIST;
    193 
    194 	size = roundup(nent * KCOV_ENTRY_SIZE, PAGE_SIZE);
    195 	kd->bufnent = nent - 1;
    196 	kd->bufsize = size;
    197 	kd->uobj = uao_create(kd->bufsize, 0);
    198 
    199 	/* Map the uobj into the kernel address space, as wired. */
    200 	kd->buf = NULL;
    201 	error = uvm_map(kernel_map, (vaddr_t *)&kd->buf, kd->bufsize, kd->uobj,
    202 	    0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE,
    203 	    UVM_ADV_RANDOM, 0));
    204 	if (error) {
    205 		uao_detach(kd->uobj);
    206 		return error;
    207 	}
    208 	error = uvm_map_pageable(kernel_map, (vaddr_t)kd->buf,
    209 	    (vaddr_t)kd->buf + size, false, 0);
    210 	if (error) {
    211 		uvm_deallocate(kernel_map, (vaddr_t)kd->buf, size);
    212 		return error;
    213 	}
    214 
    215 	return 0;
    216 }
    217 
    218 /* -------------------------------------------------------------------------- */
    219 
    220 typedef struct kcov_remote {
    221 	LIST_ENTRY(kcov_remote) list;
    222 	uint64_t subsystem;
    223 	uint64_t id;
    224 	u_int refcount;
    225 	kcov_t kd;
    226 } kcov_remote_t;
    227 
    228 typedef LIST_HEAD(, kcov_remote) kcov_remote_list_t;
    229 
    230 static kcov_remote_list_t kcov_remote_list;
    231 
    232 static kcov_remote_t *
    233 kcov_remote_find(uint64_t subsystem, uint64_t id)
    234 {
    235 	kcov_remote_t *kr;
    236 
    237 	LIST_FOREACH(kr, &kcov_remote_list, list) {
    238 		if (kr->subsystem == subsystem && kr->id == id)
    239 			return kr;
    240 	}
    241 
    242 	return NULL;
    243 }
    244 
    245 void
    246 kcov_remote_register(uint64_t subsystem, uint64_t id)
    247 {
    248 	kcov_remote_t *kr;
    249 	kcov_t *kd;
    250 	int error;
    251 
    252 	if (kcov_remote_find(subsystem, id) != NULL) {
    253 		panic("%s: kr already exists", __func__);
    254 	}
    255 
    256 	kr = kmem_zalloc(sizeof(*kr), KM_SLEEP);
    257 	kr->subsystem = subsystem;
    258 	kr->id = id;
    259 	kr->refcount = 0;
    260 	kd = &kr->kd;
    261 
    262 	mutex_init(&kd->lock, MUTEX_DEFAULT, IPL_NONE);
    263 	error = kcov_allocbuf(kd, KCOV_BUF_MAX_ENTRIES);
    264 	if (error != 0)
    265 		panic("%s: failed to allocate buffer", __func__);
    266 
    267 	LIST_INSERT_HEAD(&kcov_remote_list, kr, list);
    268 }
    269 
    270 void
    271 kcov_remote_enter(uint64_t subsystem, uint64_t id)
    272 {
    273 	struct lwp *l = curlwp;
    274 	kcov_remote_t *kr;
    275 	kcov_t *kd;
    276 	u_int refs;
    277 
    278 	kr = kcov_remote_find(subsystem, id);
    279 	if (__predict_false(kr == NULL)) {
    280 		panic("%s: unable to find kr", __func__);
    281 	}
    282 
    283 	refs = atomic_inc_uint_nv(&kr->refcount);
    284 	KASSERT(refs == 1);
    285 
    286 	KASSERT(l->l_kcov == NULL);
    287 	kd = &kr->kd;
    288 	if (atomic_load_relaxed(&kd->enabled)) {
    289 		l->l_kcov = kd;
    290 	}
    291 }
    292 
    293 void
    294 kcov_remote_leave(uint64_t subsystem, uint64_t id)
    295 {
    296 	struct lwp *l = curlwp;
    297 	kcov_remote_t *kr;
    298 	u_int refs;
    299 
    300 	kr = kcov_remote_find(subsystem, id);
    301 	if (__predict_false(kr == NULL)) {
    302 		panic("%s: unable to find kr", __func__);
    303 	}
    304 
    305 	refs = atomic_dec_uint_nv(&kr->refcount);
    306 	KASSERT(refs == 0);
    307 
    308 	l->l_kcov = NULL;
    309 }
    310 
    311 static int
    312 kcov_remote_enable(kcov_t *kd, int mode)
    313 {
    314 	kcov_lock(kd);
    315 	if (kd->enabled) {
    316 		kcov_unlock(kd);
    317 		return EBUSY;
    318 	}
    319 	kd->mode = mode;
    320 	atomic_store_relaxed(&kd->enabled, true);
    321 	kcov_unlock(kd);
    322 
    323 	return 0;
    324 }
    325 
    326 static int
    327 kcov_remote_disable(kcov_t *kd)
    328 {
    329 	kcov_lock(kd);
    330 	if (!kd->enabled) {
    331 		kcov_unlock(kd);
    332 		return ENOENT;
    333 	}
    334 	atomic_store_relaxed(&kd->enabled, false);
    335 	kcov_unlock(kd);
    336 
    337 	return 0;
    338 }
    339 
    340 static int
    341 kcov_remote_attach(kcov_t *kd, struct kcov_ioc_remote_attach *args)
    342 {
    343 	kcov_remote_t *kr;
    344 
    345 	if (kd->enabled)
    346 		return EEXIST;
    347 
    348 	kr = kcov_remote_find(args->subsystem, args->id);
    349 	if (kr == NULL)
    350 		return ENOENT;
    351 	kd->remote = &kr->kd;
    352 
    353 	return 0;
    354 }
    355 
    356 static int
    357 kcov_remote_detach(kcov_t *kd)
    358 {
    359 	if (kd->enabled)
    360 		return EEXIST;
    361 	if (kd->remote == NULL)
    362 		return ENOENT;
    363 	(void)kcov_remote_disable(kd->remote);
    364 	kd->remote = NULL;
    365 	return 0;
    366 }
    367 
    368 /* -------------------------------------------------------------------------- */
    369 
    370 static int
    371 kcov_setbufsize(kcov_t *kd, uint64_t *args)
    372 {
    373 	if (kd->remote != NULL)
    374 		return 0; /* buffer allocated remotely */
    375 	if (kd->enabled)
    376 		return EBUSY;
    377 	return kcov_allocbuf(kd, *((uint64_t *)args));
    378 }
    379 
    380 static int
    381 kcov_enable(kcov_t *kd, uint64_t *args)
    382 {
    383 	struct lwp *l = curlwp;
    384 	int mode;
    385 
    386 	mode = *((int *)args);
    387 	if (!kcov_mode_is_valid(mode))
    388 		return EINVAL;
    389 
    390 	if (kd->remote != NULL)
    391 		return kcov_remote_enable(kd->remote, mode);
    392 
    393 	if (kd->enabled)
    394 		return EBUSY;
    395 	if (l->l_kcov != NULL)
    396 		return EBUSY;
    397 	if (kd->buf == NULL)
    398 		return ENOBUFS;
    399 
    400 	l->l_kcov = kd;
    401 	kd->mode = mode;
    402 	kd->enabled = true;
    403 	return 0;
    404 }
    405 
    406 static int
    407 kcov_disable(kcov_t *kd)
    408 {
    409 	struct lwp *l = curlwp;
    410 
    411 	if (kd->remote != NULL)
    412 		return kcov_remote_disable(kd->remote);
    413 
    414 	if (!kd->enabled)
    415 		return ENOENT;
    416 	if (l->l_kcov != kd)
    417 		return ENOENT;
    418 
    419 	l->l_kcov = NULL;
    420 	kd->enabled = false;
    421 	return 0;
    422 }
    423 
    424 /* -------------------------------------------------------------------------- */
    425 
    426 static int
    427 kcov_open(dev_t dev, int flag, int mode, struct lwp *l)
    428 {
    429 	struct file *fp;
    430 	int error, fd;
    431 	kcov_t *kd;
    432 
    433 	error = fd_allocfile(&fp, &fd);
    434 	if (error)
    435 		return error;
    436 
    437 	kd = kmem_zalloc(sizeof(*kd), KM_SLEEP);
    438 	mutex_init(&kd->lock, MUTEX_DEFAULT, IPL_NONE);
    439 
    440 	return fd_clone(fp, fd, flag, &kcov_fileops, kd);
    441 }
    442 
    443 static int
    444 kcov_fops_close(file_t *fp)
    445 {
    446 	kcov_t *kd = fp->f_data;
    447 
    448 	kcov_lock(kd);
    449 	if (kd->remote != NULL)
    450 		(void)kcov_remote_disable(kd->remote);
    451 	if (kd->enabled) {
    452 		kd->lwpfree = true;
    453 		kcov_unlock(kd);
    454 	} else {
    455 		kcov_unlock(kd);
    456 		kcov_free(kd);
    457 	}
    458 	fp->f_data = NULL;
    459 
    460    	return 0;
    461 }
    462 
    463 static int
    464 kcov_fops_ioctl(file_t *fp, u_long cmd, void *addr)
    465 {
    466 	kcov_t *kd;
    467 	int error;
    468 
    469 	kd = fp->f_data;
    470 	if (kd == NULL)
    471 		return ENXIO;
    472 	kcov_lock(kd);
    473 
    474 	switch (cmd) {
    475 	case KCOV_IOC_SETBUFSIZE:
    476 		error = kcov_setbufsize(kd, addr);
    477 		break;
    478 	case KCOV_IOC_ENABLE:
    479 		error = kcov_enable(kd, addr);
    480 		break;
    481 	case KCOV_IOC_DISABLE:
    482 		error = kcov_disable(kd);
    483 		break;
    484 	case KCOV_IOC_REMOTE_ATTACH:
    485 		error = kcov_remote_attach(kd, addr);
    486 		break;
    487 	case KCOV_IOC_REMOTE_DETACH:
    488 		error = kcov_remote_detach(kd);
    489 		break;
    490 	default:
    491 		error = EINVAL;
    492 	}
    493 
    494 	kcov_unlock(kd);
    495 	return error;
    496 }
    497 
    498 static int
    499 kcov_fops_mmap(file_t *fp, off_t *offp, size_t size, int prot, int *flagsp,
    500     int *advicep, struct uvm_object **uobjp, int *maxprotp)
    501 {
    502 	off_t off = *offp;
    503 	kcov_t *kd, *kdbuf;
    504 	int error = 0;
    505 
    506 	if (prot & PROT_EXEC)
    507 		return EACCES;
    508 	if (off < 0)
    509 		return EINVAL;
    510 	if (size > KCOV_BUF_MAX_ENTRIES * KCOV_ENTRY_SIZE)
    511 		return EINVAL;
    512 	if (off > KCOV_BUF_MAX_ENTRIES * KCOV_ENTRY_SIZE)
    513 		return EINVAL;
    514 
    515 	kd = fp->f_data;
    516 	if (kd == NULL)
    517 		return ENXIO;
    518 	kcov_lock(kd);
    519 
    520 	if (kd->remote != NULL)
    521 		kdbuf = kd->remote;
    522 	else
    523 		kdbuf = kd;
    524 
    525 	if ((size + off) > kdbuf->bufsize) {
    526 		error = ENOMEM;
    527 		goto out;
    528 	}
    529 
    530 	uao_reference(kdbuf->uobj);
    531 
    532 	*uobjp = kdbuf->uobj;
    533 	*maxprotp = prot;
    534 	*advicep = UVM_ADV_RANDOM;
    535 
    536 out:
    537 	kcov_unlock(kd);
    538 	return error;
    539 }
    540 
    541 /* -------------------------------------------------------------------------- */
    542 
    543 /*
    544  * Constraints on the functions here: they must be marked with __nomsan, and
    545  * must not make any external call.
    546  */
    547 
    548 static inline bool __nomsan
    549 in_interrupt(void)
    550 {
    551 	return curcpu()->ci_idepth >= 0;
    552 }
    553 
    554 void __sanitizer_cov_trace_pc(void);
    555 
    556 void __nomsan
    557 __sanitizer_cov_trace_pc(void)
    558 {
    559 	extern int cold;
    560 	uint64_t idx;
    561 	kcov_t *kd;
    562 
    563 	if (__predict_false(cold)) {
    564 		/* Do not trace during boot. */
    565 		return;
    566 	}
    567 
    568 	if (in_interrupt()) {
    569 		/* Do not trace in interrupts. */
    570 		return;
    571 	}
    572 
    573 	kd = curlwp->l_kcov;
    574 	if (__predict_true(kd == NULL)) {
    575 		/* Not traced. */
    576 		return;
    577 	}
    578 
    579 	if (!kd->enabled) {
    580 		/* Tracing not enabled */
    581 		return;
    582 	}
    583 
    584 	if (kd->mode != KCOV_MODE_TRACE_PC) {
    585 		/* PC tracing mode not enabled */
    586 		return;
    587 	}
    588 	KASSERT(kd->remote == NULL);
    589 
    590 	idx = kd->buf[0];
    591 	if (idx < kd->bufnent) {
    592 		kd->buf[idx+1] =
    593 		    (intptr_t)__builtin_return_address(0);
    594 		kd->buf[0] = idx + 1;
    595 	}
    596 }
    597 
    598 static void __nomsan
    599 trace_cmp(uint64_t type, uint64_t arg1, uint64_t arg2, intptr_t pc)
    600 {
    601 	extern int cold;
    602 	uint64_t idx;
    603 	kcov_t *kd;
    604 
    605 	if (__predict_false(cold)) {
    606 		/* Do not trace during boot. */
    607 		return;
    608 	}
    609 
    610 	if (in_interrupt()) {
    611 		/* Do not trace in interrupts. */
    612 		return;
    613 	}
    614 
    615 	kd = curlwp->l_kcov;
    616 	if (__predict_true(kd == NULL)) {
    617 		/* Not traced. */
    618 		return;
    619 	}
    620 
    621 	if (!kd->enabled) {
    622 		/* Tracing not enabled */
    623 		return;
    624 	}
    625 
    626 	if (kd->mode != KCOV_MODE_TRACE_CMP) {
    627 		/* CMP tracing mode not enabled */
    628 		return;
    629 	}
    630 
    631 	idx = kd->buf[0];
    632 	if ((idx * 4 + 4) <= kd->bufnent) {
    633 		kd->buf[idx * 4 + 1] = type;
    634 		kd->buf[idx * 4 + 2] = arg1;
    635 		kd->buf[idx * 4 + 3] = arg2;
    636 		kd->buf[idx * 4 + 4] = pc;
    637 		kd->buf[0] = idx + 1;
    638 	}
    639 }
    640 
    641 void __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2);
    642 
    643 void __nomsan
    644 __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
    645 {
    646 
    647 	trace_cmp(KCOV_CMP_SIZE(0), arg1, arg2,
    648 	    (intptr_t)__builtin_return_address(0));
    649 }
    650 
    651 void __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2);
    652 
    653 void __nomsan
    654 __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
    655 {
    656 
    657 	trace_cmp(KCOV_CMP_SIZE(1), arg1, arg2,
    658 	    (intptr_t)__builtin_return_address(0));
    659 }
    660 
    661 void __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2);
    662 
    663 void __nomsan
    664 __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
    665 {
    666 
    667 	trace_cmp(KCOV_CMP_SIZE(2), arg1, arg2,
    668 	    (intptr_t)__builtin_return_address(0));
    669 }
    670 
    671 void __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2);
    672 
    673 void __nomsan
    674 __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
    675 {
    676 
    677 	trace_cmp(KCOV_CMP_SIZE(3), arg1, arg2,
    678 	    (intptr_t)__builtin_return_address(0));
    679 }
    680 
    681 void __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2);
    682 
    683 void __nomsan
    684 __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
    685 {
    686 
    687 	trace_cmp(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
    688 	    (intptr_t)__builtin_return_address(0));
    689 }
    690 
    691 void __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2);
    692 
    693 void __nomsan
    694 __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
    695 {
    696 
    697 	trace_cmp(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
    698 	    (intptr_t)__builtin_return_address(0));
    699 }
    700 
    701 void __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2);
    702 
    703 void __nomsan
    704 __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
    705 {
    706 
    707 	trace_cmp(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
    708 	    (intptr_t)__builtin_return_address(0));
    709 }
    710 
    711 void __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2);
    712 
    713 void __nomsan
    714 __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
    715 {
    716 
    717 	trace_cmp(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
    718 	    (intptr_t)__builtin_return_address(0));
    719 }
    720 
    721 void __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases);
    722 
    723 void __nomsan
    724 __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
    725 {
    726 	uint64_t i, nbits, ncases, type;
    727 	intptr_t pc;
    728 
    729 	pc = (intptr_t)__builtin_return_address(0);
    730 	ncases = cases[0];
    731 	nbits = cases[1];
    732 
    733 	switch (nbits) {
    734 	case 8:
    735 		type = KCOV_CMP_SIZE(0);
    736 		break;
    737 	case 16:
    738 		type = KCOV_CMP_SIZE(1);
    739 		break;
    740 	case 32:
    741 		type = KCOV_CMP_SIZE(2);
    742 		break;
    743 	case 64:
    744 		type = KCOV_CMP_SIZE(3);
    745 		break;
    746 	default:
    747 		return;
    748 	}
    749 	type |= KCOV_CMP_CONST;
    750 
    751 	for (i = 0; i < ncases; i++)
    752 		trace_cmp(type, cases[i + 2], val, pc);
    753 }
    754 
    755 /* -------------------------------------------------------------------------- */
    756 
    757 MODULE(MODULE_CLASS_MISC, kcov, NULL);
    758 
    759 static int
    760 kcov_modcmd(modcmd_t cmd, void *arg)
    761 {
    762 
    763    	switch (cmd) {
    764 	case MODULE_CMD_INIT:
    765 		return 0;
    766 	case MODULE_CMD_FINI:
    767 		return EINVAL;
    768 	default:
    769 		return ENOTTY;
    770 	}
    771 }
    772