1 /* $NetBSD: subr_kcov.c,v 1.20 2026/01/04 03:19:01 riastradh Exp $ */ 2 3 /* 4 * Copyright (c) 2019-2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Siddharth Muralee. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 34 #include <sys/param.h> 35 #include <sys/types.h> 36 37 #include <sys/condvar.h> 38 #include <sys/conf.h> 39 #include <sys/file.h> 40 #include <sys/filedesc.h> 41 #include <sys/kcov.h> 42 #include <sys/kernel.h> 43 #include <sys/kmem.h> 44 #include <sys/mman.h> 45 #include <sys/module.h> 46 #include <sys/mutex.h> 47 #include <sys/queue.h> 48 #include <sys/sdt.h> 49 #include <sys/systm.h> 50 51 #include <uvm/uvm_extern.h> 52 53 #define KCOV_BUF_MAX_ENTRIES (256 << 10) 54 55 #define KCOV_CMP_CONST 1 56 #define KCOV_CMP_SIZE(x) ((x) << 1) 57 58 static dev_type_open(kcov_open); 59 60 const struct cdevsw kcov_cdevsw = { 61 .d_open = kcov_open, 62 .d_close = noclose, 63 .d_read = noread, 64 .d_write = nowrite, 65 .d_ioctl = noioctl, 66 .d_stop = nostop, 67 .d_tty = notty, 68 .d_poll = nopoll, 69 .d_mmap = nommap, 70 .d_kqfilter = nokqfilter, 71 .d_discard = nodiscard, 72 .d_flag = D_OTHER | D_MPSAFE 73 }; 74 75 static int kcov_fops_ioctl(file_t *, u_long, void *); 76 static int kcov_fops_close(file_t *); 77 static int kcov_fops_mmap(file_t *, off_t *, size_t, int, int *, int *, 78 struct uvm_object **, int *); 79 80 const struct fileops kcov_fileops = { 81 .fo_read = fbadop_read, 82 .fo_write = fbadop_write, 83 .fo_ioctl = kcov_fops_ioctl, 84 .fo_fcntl = fnullop_fcntl, 85 .fo_poll = fnullop_poll, 86 .fo_stat = fbadop_stat, 87 .fo_close = kcov_fops_close, 88 .fo_kqfilter = fnullop_kqfilter, 89 .fo_restart = fnullop_restart, 90 .fo_mmap = kcov_fops_mmap, 91 }; 92 93 /* 94 * The KCOV descriptors (KD) are allocated during open(), and are associated 95 * with a file descriptor. 96 * 97 * An LWP can 'enable' a KD. When this happens, this LWP becomes the owner of 98 * the KD, and no LWP can 'disable' this KD except the owner. 99 * 100 * A KD is freed when its file descriptor is closed _iff_ the KD is not active 101 * on an LWP. If it is, we ask the LWP to free it when it exits. 102 * 103 * The buffers mmapped are in a dedicated uobj, therefore there is no risk 104 * that the kernel frees a buffer still mmapped in a process: the uobj 105 * refcount will be non-zero, so the backing is not freed until an munmap 106 * occurs on said process. 107 */ 108 109 typedef struct kcov_desc { 110 /* Local only */ 111 kmutex_t lock; 112 bool lwpfree; 113 bool silenced; 114 115 /* Pointer to the end of the structure, if any */ 116 struct kcov_desc *remote; 117 118 /* Can be remote */ 119 kcov_int_t *buf; 120 struct uvm_object *uobj; 121 size_t bufnent; 122 size_t bufsize; 123 int mode; 124 bool enabled; 125 } kcov_t; 126 127 /* -------------------------------------------------------------------------- */ 128 129 static void 130 kcov_lock(kcov_t *kd) 131 { 132 133 mutex_enter(&kd->lock); 134 } 135 136 static void 137 kcov_unlock(kcov_t *kd) 138 { 139 140 mutex_exit(&kd->lock); 141 } 142 143 static bool 144 kcov_mode_is_valid(int mode) 145 { 146 switch (mode) { 147 case KCOV_MODE_NONE: 148 case KCOV_MODE_TRACE_PC: 149 case KCOV_MODE_TRACE_CMP: 150 return true; 151 default: 152 return false; 153 } 154 } 155 156 /* -------------------------------------------------------------------------- */ 157 158 static void 159 kcov_free(kcov_t *kd) 160 { 161 162 KASSERT(kd != NULL); 163 if (kd->buf != NULL) { 164 uvm_deallocate(kernel_map, (vaddr_t)kd->buf, kd->bufsize); 165 } 166 mutex_destroy(&kd->lock); 167 kmem_free(kd, sizeof(*kd)); 168 } 169 170 void 171 kcov_lwp_free(struct lwp *l) 172 { 173 kcov_t *kd = (kcov_t *)l->l_kcov; 174 175 if (kd == NULL) { 176 return; 177 } 178 kcov_lock(kd); 179 kd->enabled = false; 180 kcov_unlock(kd); 181 if (kd->lwpfree) { 182 kcov_free(kd); 183 } 184 } 185 186 static int 187 kcov_allocbuf(kcov_t *kd, uint64_t nent) 188 { 189 size_t size; 190 int error; 191 192 if (nent < 2 || nent > KCOV_BUF_MAX_ENTRIES) 193 return SET_ERROR(EINVAL); 194 if (kd->buf != NULL) 195 return SET_ERROR(EEXIST); 196 197 size = roundup(nent * KCOV_ENTRY_SIZE, PAGE_SIZE); 198 kd->bufnent = nent - 1; 199 kd->bufsize = size; 200 kd->uobj = uao_create(kd->bufsize, 0); 201 202 /* Map the uobj into the kernel address space, as wired. */ 203 kd->buf = NULL; 204 error = uvm_map(kernel_map, (vaddr_t *)&kd->buf, kd->bufsize, kd->uobj, 205 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE, 206 UVM_ADV_RANDOM, 0)); 207 if (error) { 208 uao_detach(kd->uobj); 209 return error; 210 } 211 error = uvm_map_pageable(kernel_map, (vaddr_t)kd->buf, 212 (vaddr_t)kd->buf + size, false, 0); 213 if (error) { 214 uvm_deallocate(kernel_map, (vaddr_t)kd->buf, size); 215 return error; 216 } 217 218 return 0; 219 } 220 221 /* -------------------------------------------------------------------------- */ 222 223 typedef struct kcov_remote { 224 LIST_ENTRY(kcov_remote) list; 225 uint64_t subsystem; 226 uint64_t id; 227 u_int refcount; 228 kcov_t kd; 229 } kcov_remote_t; 230 231 typedef LIST_HEAD(, kcov_remote) kcov_remote_list_t; 232 233 static kcov_remote_list_t kcov_remote_list; 234 235 static kcov_remote_t * 236 kcov_remote_find(uint64_t subsystem, uint64_t id) 237 { 238 kcov_remote_t *kr; 239 240 LIST_FOREACH(kr, &kcov_remote_list, list) { 241 if (kr->subsystem == subsystem && kr->id == id) 242 return kr; 243 } 244 245 return NULL; 246 } 247 248 void 249 kcov_remote_register(uint64_t subsystem, uint64_t id) 250 { 251 kcov_remote_t *kr; 252 kcov_t *kd; 253 int error; 254 255 if (kcov_remote_find(subsystem, id) != NULL) { 256 panic("%s: kr already exists", __func__); 257 } 258 259 kr = kmem_zalloc(sizeof(*kr), KM_SLEEP); 260 kr->subsystem = subsystem; 261 kr->id = id; 262 kr->refcount = 0; 263 kd = &kr->kd; 264 265 mutex_init(&kd->lock, MUTEX_DEFAULT, IPL_NONE); 266 error = kcov_allocbuf(kd, KCOV_BUF_MAX_ENTRIES); 267 if (error != 0) 268 panic("%s: failed to allocate buffer", __func__); 269 270 LIST_INSERT_HEAD(&kcov_remote_list, kr, list); 271 } 272 273 void 274 kcov_remote_enter(uint64_t subsystem, uint64_t id) 275 { 276 struct lwp *l = curlwp; 277 kcov_remote_t *kr; 278 kcov_t *kd; 279 u_int refs __diagused; 280 281 kr = kcov_remote_find(subsystem, id); 282 if (__predict_false(kr == NULL)) { 283 panic("%s: unable to find kr", __func__); 284 } 285 286 refs = atomic_inc_uint_nv(&kr->refcount); 287 KASSERT(refs == 1); 288 289 KASSERT(l->l_kcov == NULL); 290 kd = &kr->kd; 291 if (atomic_load_relaxed(&kd->enabled)) { 292 l->l_kcov = kd; 293 } 294 } 295 296 void 297 kcov_remote_leave(uint64_t subsystem, uint64_t id) 298 { 299 struct lwp *l = curlwp; 300 kcov_remote_t *kr; 301 u_int refs __diagused; 302 303 kr = kcov_remote_find(subsystem, id); 304 if (__predict_false(kr == NULL)) { 305 panic("%s: unable to find kr", __func__); 306 } 307 308 refs = atomic_dec_uint_nv(&kr->refcount); 309 KASSERT(refs == 0); 310 311 l->l_kcov = NULL; 312 } 313 314 static int 315 kcov_remote_enable(kcov_t *kd, int mode) 316 { 317 kcov_lock(kd); 318 if (kd->enabled) { 319 kcov_unlock(kd); 320 return SET_ERROR(EBUSY); 321 } 322 kd->mode = mode; 323 atomic_store_relaxed(&kd->enabled, true); 324 kcov_unlock(kd); 325 326 return 0; 327 } 328 329 static int 330 kcov_remote_disable(kcov_t *kd) 331 { 332 kcov_lock(kd); 333 if (!kd->enabled) { 334 kcov_unlock(kd); 335 return SET_ERROR(ENOENT); 336 } 337 atomic_store_relaxed(&kd->enabled, false); 338 kcov_unlock(kd); 339 340 return 0; 341 } 342 343 static int 344 kcov_remote_attach(kcov_t *kd, struct kcov_ioc_remote_attach *args) 345 { 346 kcov_remote_t *kr; 347 348 if (kd->enabled) 349 return SET_ERROR(EEXIST); 350 351 kr = kcov_remote_find(args->subsystem, args->id); 352 if (kr == NULL) 353 return SET_ERROR(ENOENT); 354 kd->remote = &kr->kd; 355 356 return 0; 357 } 358 359 static int 360 kcov_remote_detach(kcov_t *kd) 361 { 362 if (kd->enabled) 363 return SET_ERROR(EEXIST); 364 if (kd->remote == NULL) 365 return SET_ERROR(ENOENT); 366 (void)kcov_remote_disable(kd->remote); 367 kd->remote = NULL; 368 return 0; 369 } 370 371 /* -------------------------------------------------------------------------- */ 372 373 static int 374 kcov_setbufsize(kcov_t *kd, uint64_t *args) 375 { 376 if (kd->remote != NULL) 377 return 0; /* buffer allocated remotely */ 378 if (kd->enabled) 379 return SET_ERROR(EBUSY); 380 return kcov_allocbuf(kd, *((uint64_t *)args)); 381 } 382 383 static int 384 kcov_enable(kcov_t *kd, uint64_t *args) 385 { 386 struct lwp *l = curlwp; 387 int mode; 388 389 mode = *((int *)args); 390 if (!kcov_mode_is_valid(mode)) 391 return SET_ERROR(EINVAL); 392 393 if (kd->remote != NULL) 394 return kcov_remote_enable(kd->remote, mode); 395 396 if (kd->enabled) 397 return SET_ERROR(EBUSY); 398 if (l->l_kcov != NULL) 399 return SET_ERROR(EBUSY); 400 if (kd->buf == NULL) 401 return SET_ERROR(ENOBUFS); 402 403 l->l_kcov = kd; 404 kd->mode = mode; 405 kd->enabled = true; 406 return 0; 407 } 408 409 static int 410 kcov_disable(kcov_t *kd) 411 { 412 struct lwp *l = curlwp; 413 414 if (kd->remote != NULL) 415 return kcov_remote_disable(kd->remote); 416 417 if (!kd->enabled) 418 return SET_ERROR(ENOENT); 419 if (l->l_kcov != kd) 420 return SET_ERROR(ENOENT); 421 422 l->l_kcov = NULL; 423 kd->enabled = false; 424 return 0; 425 } 426 427 /* -------------------------------------------------------------------------- */ 428 429 void 430 kcov_silence_enter(void) 431 { 432 kcov_t *kd = curlwp->l_kcov; 433 434 if (kd != NULL) 435 kd->silenced = true; 436 } 437 438 void 439 kcov_silence_leave(void) 440 { 441 kcov_t *kd = curlwp->l_kcov; 442 443 if (kd != NULL) 444 kd->silenced = false; 445 } 446 447 /* -------------------------------------------------------------------------- */ 448 449 static int 450 kcov_open(dev_t dev, int flag, int mode, struct lwp *l) 451 { 452 struct file *fp; 453 int error, fd; 454 kcov_t *kd; 455 456 error = fd_allocfile(&fp, &fd); 457 if (error) 458 return error; 459 460 kd = kmem_zalloc(sizeof(*kd), KM_SLEEP); 461 mutex_init(&kd->lock, MUTEX_DEFAULT, IPL_NONE); 462 463 return fd_clone(fp, fd, flag, &kcov_fileops, kd); 464 } 465 466 static int 467 kcov_fops_close(file_t *fp) 468 { 469 kcov_t *kd = fp->f_data; 470 471 kcov_lock(kd); 472 if (kd->remote != NULL) 473 (void)kcov_remote_disable(kd->remote); 474 if (kd->enabled) { 475 kd->lwpfree = true; 476 kcov_unlock(kd); 477 } else { 478 kcov_unlock(kd); 479 kcov_free(kd); 480 } 481 fp->f_data = NULL; 482 483 return 0; 484 } 485 486 static int 487 kcov_fops_ioctl(file_t *fp, u_long cmd, void *addr) 488 { 489 kcov_t *kd; 490 int error; 491 492 kd = fp->f_data; 493 if (kd == NULL) 494 return SET_ERROR(ENXIO); 495 kcov_lock(kd); 496 497 switch (cmd) { 498 case KCOV_IOC_SETBUFSIZE: 499 error = kcov_setbufsize(kd, addr); 500 break; 501 case KCOV_IOC_ENABLE: 502 error = kcov_enable(kd, addr); 503 break; 504 case KCOV_IOC_DISABLE: 505 error = kcov_disable(kd); 506 break; 507 case KCOV_IOC_REMOTE_ATTACH: 508 error = kcov_remote_attach(kd, addr); 509 break; 510 case KCOV_IOC_REMOTE_DETACH: 511 error = kcov_remote_detach(kd); 512 break; 513 default: 514 error = SET_ERROR(EINVAL); 515 } 516 517 kcov_unlock(kd); 518 return error; 519 } 520 521 static int 522 kcov_fops_mmap(file_t *fp, off_t *offp, size_t size, int prot, int *flagsp, 523 int *advicep, struct uvm_object **uobjp, int *maxprotp) 524 { 525 off_t off = *offp; 526 kcov_t *kd, *kdbuf; 527 int error = 0; 528 529 KASSERT(size > 0); 530 531 if (prot & PROT_EXEC) 532 return SET_ERROR(EACCES); 533 if (off < 0) 534 return SET_ERROR(EINVAL); 535 if (size > KCOV_BUF_MAX_ENTRIES * KCOV_ENTRY_SIZE) 536 return SET_ERROR(EINVAL); 537 if (off > KCOV_BUF_MAX_ENTRIES * KCOV_ENTRY_SIZE) 538 return SET_ERROR(EINVAL); 539 540 kd = fp->f_data; 541 if (kd == NULL) 542 return SET_ERROR(ENXIO); 543 kcov_lock(kd); 544 545 if (kd->remote != NULL) 546 kdbuf = kd->remote; 547 else 548 kdbuf = kd; 549 550 if ((size + off) > kdbuf->bufsize) { 551 error = SET_ERROR(ENOMEM); 552 goto out; 553 } 554 555 uao_reference(kdbuf->uobj); 556 557 *uobjp = kdbuf->uobj; 558 *maxprotp = prot; 559 *advicep = UVM_ADV_RANDOM; 560 561 out: 562 kcov_unlock(kd); 563 return error; 564 } 565 566 /* -------------------------------------------------------------------------- */ 567 568 /* 569 * Constraints on the functions here: they must be marked with __nomsan, and 570 * must not make any external call. 571 */ 572 573 static inline bool __nomsan 574 in_interrupt(void) 575 { 576 return curcpu()->ci_idepth >= 0; 577 } 578 579 void __sanitizer_cov_trace_pc(void); 580 581 void __nomsan 582 __sanitizer_cov_trace_pc(void) 583 { 584 uint64_t idx; 585 kcov_t *kd; 586 587 if (__predict_false(cold)) { 588 /* Do not trace during boot. */ 589 return; 590 } 591 592 if (in_interrupt()) { 593 /* Do not trace in interrupts. */ 594 return; 595 } 596 597 kd = curlwp->l_kcov; 598 if (__predict_true(kd == NULL)) { 599 /* Not traced. */ 600 return; 601 } 602 603 if (!kd->enabled) { 604 /* Tracing not enabled */ 605 return; 606 } 607 608 if (__predict_false(kd->silenced)) { 609 /* Silenced. */ 610 return; 611 } 612 613 if (kd->mode != KCOV_MODE_TRACE_PC) { 614 /* PC tracing mode not enabled */ 615 return; 616 } 617 KASSERT(kd->remote == NULL); 618 619 idx = kd->buf[0]; 620 if (idx < kd->bufnent) { 621 kd->buf[idx+1] = 622 (intptr_t)__builtin_return_address(0); 623 kd->buf[0] = idx + 1; 624 } 625 } 626 627 static void __nomsan 628 trace_cmp(uint64_t type, uint64_t arg1, uint64_t arg2, intptr_t pc) 629 { 630 uint64_t idx; 631 kcov_t *kd; 632 633 if (__predict_false(cold)) { 634 /* Do not trace during boot. */ 635 return; 636 } 637 638 if (in_interrupt()) { 639 /* Do not trace in interrupts. */ 640 return; 641 } 642 643 kd = curlwp->l_kcov; 644 if (__predict_true(kd == NULL)) { 645 /* Not traced. */ 646 return; 647 } 648 649 if (!kd->enabled) { 650 /* Tracing not enabled */ 651 return; 652 } 653 654 if (__predict_false(kd->silenced)) { 655 /* Silenced. */ 656 return; 657 } 658 659 if (kd->mode != KCOV_MODE_TRACE_CMP) { 660 /* CMP tracing mode not enabled */ 661 return; 662 } 663 KASSERT(kd->remote == NULL); 664 665 idx = kd->buf[0]; 666 if ((idx * 4 + 4) <= kd->bufnent) { 667 kd->buf[idx * 4 + 1] = type; 668 kd->buf[idx * 4 + 2] = arg1; 669 kd->buf[idx * 4 + 3] = arg2; 670 kd->buf[idx * 4 + 4] = pc; 671 kd->buf[0] = idx + 1; 672 } 673 } 674 675 void __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2); 676 677 void __nomsan 678 __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2) 679 { 680 681 trace_cmp(KCOV_CMP_SIZE(0), arg1, arg2, 682 (intptr_t)__builtin_return_address(0)); 683 } 684 685 void __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2); 686 687 void __nomsan 688 __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2) 689 { 690 691 trace_cmp(KCOV_CMP_SIZE(1), arg1, arg2, 692 (intptr_t)__builtin_return_address(0)); 693 } 694 695 void __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2); 696 697 void __nomsan 698 __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2) 699 { 700 701 trace_cmp(KCOV_CMP_SIZE(2), arg1, arg2, 702 (intptr_t)__builtin_return_address(0)); 703 } 704 705 void __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2); 706 707 void __nomsan 708 __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2) 709 { 710 711 trace_cmp(KCOV_CMP_SIZE(3), arg1, arg2, 712 (intptr_t)__builtin_return_address(0)); 713 } 714 715 void __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2); 716 717 void __nomsan 718 __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2) 719 { 720 721 trace_cmp(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2, 722 (intptr_t)__builtin_return_address(0)); 723 } 724 725 void __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2); 726 727 void __nomsan 728 __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2) 729 { 730 731 trace_cmp(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2, 732 (intptr_t)__builtin_return_address(0)); 733 } 734 735 void __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2); 736 737 void __nomsan 738 __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2) 739 { 740 741 trace_cmp(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2, 742 (intptr_t)__builtin_return_address(0)); 743 } 744 745 void __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2); 746 747 void __nomsan 748 __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2) 749 { 750 751 trace_cmp(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2, 752 (intptr_t)__builtin_return_address(0)); 753 } 754 755 void __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases); 756 757 void __nomsan 758 __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases) 759 { 760 uint64_t i, nbits, ncases, type; 761 intptr_t pc; 762 763 pc = (intptr_t)__builtin_return_address(0); 764 ncases = cases[0]; 765 nbits = cases[1]; 766 767 switch (nbits) { 768 case 8: 769 type = KCOV_CMP_SIZE(0); 770 break; 771 case 16: 772 type = KCOV_CMP_SIZE(1); 773 break; 774 case 32: 775 type = KCOV_CMP_SIZE(2); 776 break; 777 case 64: 778 type = KCOV_CMP_SIZE(3); 779 break; 780 default: 781 return; 782 } 783 type |= KCOV_CMP_CONST; 784 785 for (i = 0; i < ncases; i++) 786 trace_cmp(type, cases[i + 2], val, pc); 787 } 788 789 /* -------------------------------------------------------------------------- */ 790 791 MODULE(MODULE_CLASS_MISC, kcov, NULL); 792 793 static int 794 kcov_modcmd(modcmd_t cmd, void *arg) 795 { 796 797 switch (cmd) { 798 case MODULE_CMD_INIT: 799 return 0; 800 case MODULE_CMD_FINI: 801 return SET_ERROR(EINVAL); 802 default: 803 return SET_ERROR(ENOTTY); 804 } 805 } 806