1 /* $NetBSD: kern_ktrace.c,v 1.188 2026/02/01 19:41:46 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1989, 1993 34 * The Regents of the University of California. All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. Neither the name of the University nor the names of its contributors 45 * may be used to endorse or promote products derived from this software 46 * without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * SUCH DAMAGE. 59 * 60 * @(#)kern_ktrace.c 8.5 (Berkeley) 5/14/95 61 */ 62 63 #include <sys/cdefs.h> 64 __KERNEL_RCSID(0, "$NetBSD: kern_ktrace.c,v 1.188 2026/02/01 19:41:46 christos Exp $"); 65 66 #include <sys/param.h> 67 68 #include <sys/callout.h> 69 #include <sys/cpu.h> 70 #include <sys/file.h> 71 #include <sys/filedesc.h> 72 #include <sys/ioctl.h> 73 #include <sys/kauth.h> 74 #include <sys/kernel.h> 75 #include <sys/kmem.h> 76 #include <sys/kthread.h> 77 #include <sys/ktrace.h> 78 #include <sys/mount.h> 79 #include <sys/proc.h> 80 #include <sys/sdt.h> 81 #include <sys/syncobj.h> 82 #include <sys/syscallargs.h> 83 #include <sys/syslog.h> 84 #include <sys/systm.h> 85 86 /* 87 * TODO: 88 * - need better error reporting? 89 * - userland utility to sort ktrace.out by timestamp. 90 * - keep minimum information in ktrace_entry when rest of alloc failed. 91 * - per trace control of configurable parameters. 92 */ 93 94 struct ktrace_entry { 95 TAILQ_ENTRY(ktrace_entry) kte_list; 96 struct ktr_header kte_kth; 97 void *kte_buf; 98 size_t kte_bufsz; 99 #define KTE_SPACE 32 100 uint8_t kte_space[KTE_SPACE] __aligned(sizeof(register_t)); 101 }; 102 103 struct ktr_desc { 104 TAILQ_ENTRY(ktr_desc) ktd_list; 105 int ktd_flags; 106 #define KTDF_WAIT 0x0001 107 #define KTDF_DONE 0x0002 108 #define KTDF_BLOCKING 0x0004 109 #define KTDF_INTERACTIVE 0x0008 110 int ktd_error; 111 #define KTDE_ENOMEM 0x0001 112 #define KTDE_ENOSPC 0x0002 113 int ktd_errcnt; 114 int ktd_ref; /* # of reference */ 115 int ktd_qcount; /* # of entry in the queue */ 116 117 /* 118 * Params to control behaviour. 119 */ 120 int ktd_delayqcnt; /* # of entry allowed to delay */ 121 int ktd_wakedelay; /* delay of wakeup in *tick* */ 122 int ktd_intrwakdl; /* ditto, but when interactive */ 123 124 file_t *ktd_fp; /* trace output file */ 125 lwp_t *ktd_lwp; /* our kernel thread */ 126 TAILQ_HEAD(, ktrace_entry) ktd_queue; 127 callout_t ktd_wakch; /* delayed wakeup */ 128 kcondvar_t ktd_sync_cv; 129 kcondvar_t ktd_cv; 130 }; 131 132 static void ktrwrite(struct ktr_desc *, struct ktrace_entry *); 133 static int ktrops(lwp_t *, struct proc *, int, int, 134 struct ktr_desc *); 135 static int ktrsetchildren(lwp_t *, struct proc *, int, int, 136 struct ktr_desc *); 137 static int ktrcanset(lwp_t *, struct proc *); 138 static int ktrsamefile(file_t *, file_t *); 139 static void ktr_kmem(lwp_t *, int, const void *, size_t); 140 static void ktr_io(lwp_t *, int, enum uio_rw, struct iovec *, size_t); 141 142 static struct ktr_desc * 143 ktd_lookup(file_t *); 144 static void ktdrel(struct ktr_desc *); 145 static void ktdref(struct ktr_desc *); 146 static void ktefree(struct ktrace_entry *); 147 static void ktd_logerrl(struct ktr_desc *, int); 148 static void ktrace_thread(void *); 149 static int ktrderefall(struct ktr_desc *, int); 150 151 /* 152 * Default values. 153 */ 154 #define KTD_MAXENTRY 1000 /* XXX: tune */ 155 #define KTD_TIMEOUT 5 /* XXX: tune */ 156 #define KTD_DELAYQCNT 100 /* XXX: tune */ 157 #define KTD_WAKEDELAY 5000 /* XXX: tune */ 158 #define KTD_INTRWAKDL 100 /* XXX: tune */ 159 160 /* 161 * Patchable variables. 162 */ 163 int ktd_maxentry = KTD_MAXENTRY; /* max # of entry in the queue */ 164 int ktd_timeout = KTD_TIMEOUT; /* timeout in seconds */ 165 int ktd_delayqcnt = KTD_DELAYQCNT; /* # of entry allowed to delay */ 166 int ktd_wakedelay = KTD_WAKEDELAY; /* delay of wakeup in *ms* */ 167 int ktd_intrwakdl = KTD_INTRWAKDL; /* ditto, but when interactive */ 168 169 kmutex_t ktrace_lock; 170 int ktrace_on; 171 static TAILQ_HEAD(, ktr_desc) ktdq = TAILQ_HEAD_INITIALIZER(ktdq); 172 static pool_cache_t kte_cache; 173 174 static kauth_listener_t ktrace_listener; 175 176 static void 177 ktd_wakeup(struct ktr_desc *ktd) 178 { 179 180 callout_stop(&ktd->ktd_wakch); 181 cv_signal(&ktd->ktd_cv); 182 } 183 184 static void 185 ktd_callout(void *arg) 186 { 187 188 mutex_enter(&ktrace_lock); 189 ktd_wakeup(arg); 190 mutex_exit(&ktrace_lock); 191 } 192 193 static void 194 ktd_logerrl(struct ktr_desc *ktd, int error) 195 { 196 197 ktd->ktd_error |= error; 198 ktd->ktd_errcnt++; 199 } 200 201 #if 0 202 static void 203 ktd_logerr(struct proc *p, int error) 204 { 205 struct ktr_desc *ktd; 206 207 KASSERT(mutex_owned(&ktrace_lock)); 208 209 ktd = p->p_tracep; 210 if (ktd == NULL) 211 return; 212 213 ktd_logerrl(ktd, error); 214 } 215 #endif 216 217 static int 218 ktrace_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 219 void *arg0, void *arg1, void *arg2, void *arg3) 220 { 221 struct proc *p; 222 int result; 223 enum kauth_process_req req; 224 225 result = KAUTH_RESULT_DEFER; 226 p = arg0; 227 228 if (action != KAUTH_PROCESS_KTRACE) 229 return result; 230 231 req = (enum kauth_process_req)(uintptr_t)arg1; 232 233 /* Privileged; secmodel should handle these. */ 234 if (req == KAUTH_REQ_PROCESS_KTRACE_PERSISTENT) 235 return result; 236 237 if ((p->p_traceflag & KTRFAC_PERSISTENT) || 238 (p->p_flag & PK_SUGID)) 239 return result; 240 241 if (kauth_cred_geteuid(cred) == kauth_cred_getuid(p->p_cred) && 242 kauth_cred_getuid(cred) == kauth_cred_getsvuid(p->p_cred) && 243 kauth_cred_getgid(cred) == kauth_cred_getgid(p->p_cred) && 244 kauth_cred_getgid(cred) == kauth_cred_getsvgid(p->p_cred)) 245 result = KAUTH_RESULT_ALLOW; 246 247 return result; 248 } 249 250 /* 251 * Initialise the ktrace system. 252 */ 253 void 254 ktrinit(void) 255 { 256 257 mutex_init(&ktrace_lock, MUTEX_DEFAULT, IPL_NONE); 258 kte_cache = pool_cache_init(sizeof(struct ktrace_entry), 0, 0, 0, 259 "ktrace", &pool_allocator_nointr, IPL_NONE, NULL, NULL, NULL); 260 261 ktrace_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS, 262 ktrace_listener_cb, NULL); 263 } 264 265 /* 266 * Release a reference. Called with ktrace_lock held. 267 */ 268 static void 269 ktdrel(struct ktr_desc *ktd) 270 { 271 272 KASSERT(mutex_owned(&ktrace_lock)); 273 274 KDASSERT(ktd->ktd_ref != 0); 275 KASSERT(ktd->ktd_ref > 0); 276 KASSERT(ktrace_on > 0); 277 ktrace_on--; 278 if (--ktd->ktd_ref <= 0) { 279 ktd->ktd_flags |= KTDF_DONE; 280 cv_signal(&ktd->ktd_cv); 281 } 282 } 283 284 static void 285 ktdref(struct ktr_desc *ktd) 286 { 287 288 KASSERT(mutex_owned(&ktrace_lock)); 289 290 ktd->ktd_ref++; 291 ktrace_on++; 292 } 293 294 static struct ktr_desc * 295 ktd_lookup(file_t *fp) 296 { 297 struct ktr_desc *ktd; 298 299 KASSERT(mutex_owned(&ktrace_lock)); 300 301 for (ktd = TAILQ_FIRST(&ktdq); ktd != NULL; 302 ktd = TAILQ_NEXT(ktd, ktd_list)) { 303 if (ktrsamefile(ktd->ktd_fp, fp)) { 304 ktdref(ktd); 305 break; 306 } 307 } 308 309 return (ktd); 310 } 311 312 void 313 ktraddentry(lwp_t *l, struct ktrace_entry *kte, int flags) 314 { 315 struct proc *p = l->l_proc; 316 struct ktr_desc *ktd; 317 #ifdef DEBUG 318 struct timeval t1, t2; 319 #endif 320 321 mutex_enter(&ktrace_lock); 322 323 if (p->p_traceflag & KTRFAC_TRC_EMUL) { 324 /* Add emulation trace before first entry for this process */ 325 p->p_traceflag &= ~KTRFAC_TRC_EMUL; 326 mutex_exit(&ktrace_lock); 327 ktrexit(l); 328 ktremul(); 329 (void)ktrenter(l); 330 mutex_enter(&ktrace_lock); 331 } 332 333 /* Tracing may have been cancelled. */ 334 ktd = p->p_tracep; 335 if (ktd == NULL) 336 goto freekte; 337 338 /* 339 * Bump reference count so that the object will remain while 340 * we are here. Note that the trace is controlled by other 341 * process. 342 */ 343 ktdref(ktd); 344 345 if (ktd->ktd_flags & KTDF_DONE) 346 goto relktd; 347 348 if (ktd->ktd_qcount > ktd_maxentry) { 349 ktd_logerrl(ktd, KTDE_ENOSPC); 350 goto relktd; 351 } 352 TAILQ_INSERT_TAIL(&ktd->ktd_queue, kte, kte_list); 353 ktd->ktd_qcount++; 354 if (ktd->ktd_flags & KTDF_BLOCKING) 355 goto skip_sync; 356 357 if (flags & KTA_WAITOK && 358 (/* flags & KTA_LARGE */0 || ktd->ktd_flags & KTDF_WAIT || 359 ktd->ktd_qcount > ktd_maxentry >> 1)) 360 /* 361 * Sync with writer thread since we're requesting rather 362 * big one or many requests are pending. 363 */ 364 do { 365 ktd->ktd_flags |= KTDF_WAIT; 366 ktd_wakeup(ktd); 367 #ifdef DEBUG 368 getmicrouptime(&t1); 369 #endif 370 if (cv_timedwait(&ktd->ktd_sync_cv, &ktrace_lock, 371 ktd_timeout * hz) != 0) { 372 ktd->ktd_flags |= KTDF_BLOCKING; 373 /* 374 * Maybe the writer thread is blocking 375 * completely for some reason, but 376 * don't stop target process forever. 377 */ 378 log(LOG_NOTICE, "ktrace timeout\n"); 379 break; 380 } 381 #ifdef DEBUG 382 getmicrouptime(&t2); 383 timersub(&t2, &t1, &t2); 384 if (t2.tv_sec > 0) 385 log(LOG_NOTICE, 386 "ktrace long wait: %lld.%06ld\n", 387 (long long)t2.tv_sec, (long)t2.tv_usec); 388 #endif 389 } while (p->p_tracep == ktd && 390 (ktd->ktd_flags & (KTDF_WAIT | KTDF_DONE)) == KTDF_WAIT); 391 else { 392 /* Schedule delayed wakeup */ 393 if (ktd->ktd_qcount > ktd->ktd_delayqcnt) 394 ktd_wakeup(ktd); /* Wakeup now */ 395 else if (!callout_pending(&ktd->ktd_wakch)) 396 callout_reset(&ktd->ktd_wakch, 397 ktd->ktd_flags & KTDF_INTERACTIVE ? 398 ktd->ktd_intrwakdl : ktd->ktd_wakedelay, 399 ktd_callout, ktd); 400 } 401 402 skip_sync: 403 ktdrel(ktd); 404 mutex_exit(&ktrace_lock); 405 ktrexit(l); 406 return; 407 408 relktd: 409 ktdrel(ktd); 410 411 freekte: 412 mutex_exit(&ktrace_lock); 413 ktefree(kte); 414 ktrexit(l); 415 } 416 417 static void 418 ktefree(struct ktrace_entry *kte) 419 { 420 421 if (kte->kte_buf != kte->kte_space) 422 kmem_free(kte->kte_buf, kte->kte_bufsz); 423 pool_cache_put(kte_cache, kte); 424 } 425 426 /* 427 * "deep" compare of two files for the purposes of clearing a trace. 428 * Returns true if they're the same open file, or if they point at the 429 * same underlying vnode/socket. 430 */ 431 432 static int 433 ktrsamefile(file_t *f1, file_t *f2) 434 { 435 436 return ((f1 == f2) || 437 ((f1 != NULL) && (f2 != NULL) && 438 (f1->f_type == f2->f_type) && 439 (f1->f_data == f2->f_data))); 440 } 441 442 void 443 ktrderef(struct proc *p) 444 { 445 struct ktr_desc *ktd = p->p_tracep; 446 447 KASSERT(mutex_owned(&ktrace_lock)); 448 449 p->p_traceflag = 0; 450 if (ktd == NULL) 451 return; 452 p->p_tracep = NULL; 453 454 cv_broadcast(&ktd->ktd_sync_cv); 455 ktdrel(ktd); 456 } 457 458 void 459 ktradref(struct proc *p) 460 { 461 struct ktr_desc *ktd = p->p_tracep; 462 463 KASSERT(mutex_owned(&ktrace_lock)); 464 465 ktdref(ktd); 466 } 467 468 static int 469 ktrderefall(struct ktr_desc *ktd, int auth) 470 { 471 lwp_t *curl = curlwp; 472 struct proc *p; 473 int error = 0; 474 475 mutex_enter(&proc_lock); 476 PROCLIST_FOREACH(p, &allproc) { 477 if (p->p_tracep != ktd) 478 continue; 479 mutex_enter(p->p_lock); 480 mutex_enter(&ktrace_lock); 481 if (p->p_tracep == ktd) { 482 if (!auth || ktrcanset(curl, p)) 483 ktrderef(p); 484 else 485 error = SET_ERROR(EPERM); 486 } 487 mutex_exit(&ktrace_lock); 488 mutex_exit(p->p_lock); 489 } 490 mutex_exit(&proc_lock); 491 492 return error; 493 } 494 495 int 496 ktealloc(struct ktrace_entry **ktep, void **bufp, lwp_t *l, int type, 497 size_t sz) 498 { 499 struct proc *p = l->l_proc; 500 struct ktrace_entry *kte; 501 struct ktr_header *kth; 502 void *buf; 503 504 if (ktrenter(l)) 505 return SET_ERROR(EAGAIN); 506 507 kte = pool_cache_get(kte_cache, PR_WAITOK); 508 if (sz > sizeof(kte->kte_space)) { 509 buf = kmem_alloc(sz, KM_SLEEP); 510 } else 511 buf = kte->kte_space; 512 513 kte->kte_bufsz = sz; 514 kte->kte_buf = buf; 515 516 kth = &kte->kte_kth; 517 (void)memset(kth, 0, sizeof(*kth)); 518 kth->ktr_len = sz; 519 kth->ktr_type = type; 520 kth->ktr_pid = p->p_pid; 521 memcpy(kth->ktr_comm, p->p_comm, MAXCOMLEN); 522 kth->ktr_version = KTRFAC_VERSION(p->p_traceflag); 523 kth->ktr_lid = l->l_lid; 524 nanotime(&kth->ktr_ts); 525 526 *ktep = kte; 527 *bufp = buf; 528 529 return 0; 530 } 531 532 void 533 ktesethdrlen(struct ktrace_entry *kte, size_t l) 534 { 535 kte->kte_kth.ktr_len = l; 536 } 537 538 void 539 ktr_syscall(register_t code, const register_t args[], int narg) 540 { 541 lwp_t *l = curlwp; 542 struct proc *p = l->l_proc; 543 struct ktrace_entry *kte; 544 struct ktr_syscall *ktp; 545 register_t *argp; 546 size_t len; 547 u_int i; 548 549 if (!KTRPOINT(p, KTR_SYSCALL)) 550 return; 551 552 len = sizeof(struct ktr_syscall) + narg * sizeof argp[0]; 553 554 if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSCALL, len)) 555 return; 556 557 ktp->ktr_code = code; 558 ktp->ktr_argsize = narg * sizeof argp[0]; 559 argp = (register_t *)(ktp + 1); 560 for (i = 0; i < narg; i++) 561 *argp++ = args[i]; 562 563 ktraddentry(l, kte, KTA_WAITOK); 564 } 565 566 void 567 ktr_sysret(register_t code, int error, register_t *retval) 568 { 569 lwp_t *l = curlwp; 570 struct ktrace_entry *kte; 571 struct ktr_sysret *ktp; 572 573 if (!KTRPOINT(l->l_proc, KTR_SYSRET)) 574 return; 575 576 if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSRET, 577 sizeof(struct ktr_sysret))) 578 return; 579 580 ktp->ktr_code = code; 581 ktp->ktr_eosys = 0; /* XXX unused */ 582 ktp->ktr_error = error; 583 ktp->ktr_retval = retval && error == 0 ? retval[0] : 0; 584 ktp->ktr_retval_1 = retval && error == 0 ? retval[1] : 0; 585 586 ktraddentry(l, kte, KTA_WAITOK); 587 } 588 589 void 590 ktr_namei(const char *path, size_t pathlen) 591 { 592 lwp_t *l = curlwp; 593 594 if (!KTRPOINT(l->l_proc, KTR_NAMEI)) 595 return; 596 597 ktr_kmem(l, KTR_NAMEI, path, pathlen); 598 } 599 600 void 601 ktr_namei2(const char *eroot, size_t erootlen, 602 const char *path, size_t pathlen) 603 { 604 lwp_t *l = curlwp; 605 struct ktrace_entry *kte; 606 void *buf; 607 608 if (!KTRPOINT(l->l_proc, KTR_NAMEI)) 609 return; 610 611 if (ktealloc(&kte, &buf, l, KTR_NAMEI, erootlen + pathlen)) 612 return; 613 memcpy(buf, eroot, erootlen); 614 buf = (char *)buf + erootlen; 615 memcpy(buf, path, pathlen); 616 ktraddentry(l, kte, KTA_WAITOK); 617 } 618 619 void 620 ktr_emul(void) 621 { 622 lwp_t *l = curlwp; 623 const char *emul = l->l_proc->p_emul->e_name; 624 625 if (!KTRPOINT(l->l_proc, KTR_EMUL)) 626 return; 627 628 ktr_kmem(l, KTR_EMUL, emul, strlen(emul)); 629 } 630 631 void 632 ktr_execarg(const void *bf, size_t len) 633 { 634 lwp_t *l = curlwp; 635 636 if (!KTRPOINT(l->l_proc, KTR_EXEC_ARG)) 637 return; 638 639 ktr_kmem(l, KTR_EXEC_ARG, bf, len); 640 } 641 642 void 643 ktr_execenv(const void *bf, size_t len) 644 { 645 lwp_t *l = curlwp; 646 647 if (!KTRPOINT(l->l_proc, KTR_EXEC_ENV)) 648 return; 649 650 ktr_kmem(l, KTR_EXEC_ENV, bf, len); 651 } 652 653 void 654 ktr_execfd(int fd, u_int dtype) 655 { 656 struct ktrace_entry *kte; 657 struct ktr_execfd* ktp; 658 659 lwp_t *l = curlwp; 660 661 if (!KTRPOINT(l->l_proc, KTR_EXEC_FD)) 662 return; 663 664 if (ktealloc(&kte, (void *)&ktp, l, KTR_EXEC_FD, sizeof(*ktp))) 665 return; 666 667 ktp->ktr_fd = fd; 668 ktp->ktr_dtype = dtype; 669 ktraddentry(l, kte, KTA_WAITOK); 670 } 671 672 void 673 ktr_sigmask(int how, const sigset_t *nset, const sigset_t *oset, 674 const sigset_t *rset) 675 { 676 struct ktrace_entry *kte; 677 struct ktr_sigmask *ktp; 678 lwp_t *l = curlwp; 679 680 if (!KTRPOINT(l->l_proc, KTR_SIGMASK)) 681 return; 682 683 if (ktealloc(&kte, (void *)&ktp, l, KTR_SIGMASK, sizeof(*ktp))) 684 return; 685 686 ktp->ktr_how = how; 687 ktp->ktr_nset = *nset; 688 ktp->ktr_oset = *oset; 689 ktp->ktr_rset = *rset; 690 ktraddentry(l, kte, KTA_WAITOK); 691 } 692 693 static void 694 ktr_kmem(lwp_t *l, int type, const void *bf, size_t len) 695 { 696 struct ktrace_entry *kte; 697 void *buf; 698 699 if (ktealloc(&kte, &buf, l, type, len)) 700 return; 701 memcpy(buf, bf, len); 702 ktraddentry(l, kte, KTA_WAITOK); 703 } 704 705 static void 706 ktr_io(lwp_t *l, int fd, enum uio_rw rw, struct iovec *iov, size_t len) 707 { 708 struct ktrace_entry *kte; 709 struct ktr_genio *ktp; 710 size_t resid = len, cnt, buflen; 711 char *cp; 712 713 next: 714 buflen = uimin(PAGE_SIZE, resid + sizeof(struct ktr_genio)); 715 716 if (ktealloc(&kte, (void *)&ktp, l, KTR_GENIO, buflen)) 717 return; 718 719 ktp->ktr_fd = fd; 720 ktp->ktr_rw = rw; 721 722 cp = (void *)(ktp + 1); 723 buflen -= sizeof(struct ktr_genio); 724 kte->kte_kth.ktr_len = sizeof(struct ktr_genio); 725 726 while (buflen > 0) { 727 cnt = uimin(iov->iov_len, buflen); 728 if (copyin(iov->iov_base, cp, cnt) != 0) 729 goto out; 730 kte->kte_kth.ktr_len += cnt; 731 cp += cnt; 732 buflen -= cnt; 733 resid -= cnt; 734 iov->iov_len -= cnt; 735 if (iov->iov_len == 0) 736 iov++; 737 else 738 iov->iov_base = (char *)iov->iov_base + cnt; 739 } 740 741 /* 742 * Don't push so many entry at once. It will cause kmem map 743 * shortage. 744 */ 745 ktraddentry(l, kte, KTA_WAITOK | KTA_LARGE); 746 if (resid > 0) { 747 if (preempt_needed()) { 748 (void)ktrenter(l); 749 preempt(); 750 ktrexit(l); 751 } 752 753 goto next; 754 } 755 756 return; 757 758 out: 759 ktefree(kte); 760 ktrexit(l); 761 } 762 763 void 764 ktr_genio(int fd, enum uio_rw rw, const void *addr, size_t len, int error) 765 { 766 lwp_t *l = curlwp; 767 struct iovec iov; 768 769 if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0) 770 return; 771 iov.iov_base = __UNCONST(addr); 772 iov.iov_len = len; 773 ktr_io(l, fd, rw, &iov, len); 774 } 775 776 void 777 ktr_geniov(int fd, enum uio_rw rw, struct iovec *iov, size_t len, int error) 778 { 779 lwp_t *l = curlwp; 780 781 if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0) 782 return; 783 ktr_io(l, fd, rw, iov, len); 784 } 785 786 void 787 ktr_mibio(int fd, enum uio_rw rw, const void *addr, size_t len, int error) 788 { 789 lwp_t *l = curlwp; 790 struct iovec iov; 791 792 if (!KTRPOINT(l->l_proc, KTR_MIB) || error != 0) 793 return; 794 iov.iov_base = __UNCONST(addr); 795 iov.iov_len = len; 796 ktr_io(l, fd, rw, &iov, len); 797 } 798 799 void 800 ktr_psig(int sig, sig_t action, const sigset_t *mask, 801 const ksiginfo_t *ksi) 802 { 803 struct ktrace_entry *kte; 804 lwp_t *l = curlwp; 805 struct { 806 struct ktr_psig kp; 807 siginfo_t si; 808 } *kbuf; 809 810 if (!KTRPOINT(l->l_proc, KTR_PSIG)) 811 return; 812 813 if (ktealloc(&kte, (void *)&kbuf, l, KTR_PSIG, sizeof(*kbuf))) 814 return; 815 816 memset(&kbuf->kp, 0, sizeof(kbuf->kp)); 817 kbuf->kp.signo = (char)sig; 818 kbuf->kp.action = action; 819 kbuf->kp.mask = *mask; 820 821 if (ksi) { 822 kbuf->kp.code = KSI_TRAPCODE(ksi); 823 (void)memset(&kbuf->si, 0, sizeof(kbuf->si)); 824 kbuf->si._info = ksi->ksi_info; 825 kte->kte_kth.ktr_len = sizeof(*kbuf); 826 } else { 827 kbuf->kp.code = 0; 828 kte->kte_kth.ktr_len = sizeof(struct ktr_psig); 829 } 830 831 ktraddentry(l, kte, KTA_WAITOK); 832 } 833 834 void 835 ktr_csw(int out, int user, const struct syncobj *syncobj) 836 { 837 lwp_t *l = curlwp; 838 struct proc *p = l->l_proc; 839 struct ktrace_entry *kte; 840 struct ktr_csw *kc; 841 842 if (!KTRPOINT(p, KTR_CSW)) 843 return; 844 845 /* 846 * Don't record context switches resulting from blocking on 847 * locks; the results are not useful, and the mutex may be in a 848 * softint, which would lead us to ktealloc in softint context, 849 * which is forbidden. 850 */ 851 if (syncobj == &mutex_syncobj || syncobj == &rw_syncobj) 852 return; 853 KASSERT(!cpu_intr_p()); 854 KASSERT(!cpu_softintr_p()); 855 856 /* 857 * We can't sleep if we're already going to sleep (if original 858 * condition is met during sleep, we hang up). 859 * 860 * XXX This is not ideal: it would be better to maintain a pool 861 * of ktes and actually push this to the kthread when context 862 * switch happens, however given the points where we are called 863 * from that is difficult to do. 864 */ 865 if (out) { 866 if (ktrenter(l)) 867 return; 868 869 nanotime(&l->l_ktrcsw); 870 l->l_pflag |= LP_KTRCSW; 871 if (user) 872 l->l_pflag |= LP_KTRCSWUSER; 873 else 874 l->l_pflag &= ~LP_KTRCSWUSER; 875 876 ktrexit(l); 877 return; 878 } 879 880 /* 881 * On the way back in, we need to record twice: once for entry, and 882 * once for exit. 883 */ 884 if ((l->l_pflag & LP_KTRCSW) != 0) { 885 struct timespec *ts; 886 l->l_pflag &= ~LP_KTRCSW; 887 888 if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc))) 889 return; 890 891 kc->out = 1; 892 kc->user = ((l->l_pflag & LP_KTRCSWUSER) != 0); 893 894 ts = &l->l_ktrcsw; 895 switch (KTRFAC_VERSION(p->p_traceflag)) { 896 case 0: 897 kte->kte_kth.ktr_otv.tv_sec = ts->tv_sec; 898 kte->kte_kth.ktr_otv.tv_usec = ts->tv_nsec / 1000; 899 break; 900 case 1: 901 kte->kte_kth.ktr_ots.tv_sec = ts->tv_sec; 902 kte->kte_kth.ktr_ots.tv_nsec = ts->tv_nsec; 903 break; 904 case 2: 905 kte->kte_kth.ktr_ts.tv_sec = ts->tv_sec; 906 kte->kte_kth.ktr_ts.tv_nsec = ts->tv_nsec; 907 break; 908 default: 909 break; 910 } 911 912 ktraddentry(l, kte, KTA_WAITOK); 913 } 914 915 if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc))) 916 return; 917 918 kc->out = 0; 919 kc->user = user; 920 921 ktraddentry(l, kte, KTA_WAITOK); 922 } 923 924 bool 925 ktr_point(int fac_bit) 926 { 927 return curlwp->l_proc->p_traceflag & fac_bit; 928 } 929 930 int 931 ktruser(const char *id, void *addr, size_t len, int ustr) 932 { 933 struct ktrace_entry *kte; 934 struct ktr_user *ktp; 935 lwp_t *l = curlwp; 936 void *user_dta; 937 int error; 938 939 if (!KTRPOINT(l->l_proc, KTR_USER)) 940 return 0; 941 942 if (len > KTR_USER_MAXLEN) 943 return SET_ERROR(ENOSPC); 944 945 error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len); 946 if (error != 0) 947 return error; 948 949 if (ustr) { 950 if (copyinstr(id, ktp->ktr_id, KTR_USER_MAXIDLEN, NULL) != 0) 951 ktp->ktr_id[0] = '\0'; 952 } else 953 strncpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN); 954 ktp->ktr_id[KTR_USER_MAXIDLEN-1] = '\0'; 955 956 user_dta = (void *)(ktp + 1); 957 if ((error = copyin(addr, user_dta, len)) != 0) 958 kte->kte_kth.ktr_len = 0; 959 960 ktraddentry(l, kte, KTA_WAITOK); 961 return error; 962 } 963 964 void 965 ktr_kuser(const char *id, const void *addr, size_t len) 966 { 967 struct ktrace_entry *kte; 968 struct ktr_user *ktp; 969 lwp_t *l = curlwp; 970 int error; 971 972 if (!KTRPOINT(l->l_proc, KTR_USER)) 973 return; 974 975 if (len > KTR_USER_MAXLEN) 976 return; 977 978 error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len); 979 if (error != 0) 980 return; 981 982 strncpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN - 1); 983 ktp->ktr_id[KTR_USER_MAXIDLEN - 1] = '\0'; 984 985 memcpy(ktp + 1, addr, len); 986 987 ktraddentry(l, kte, KTA_WAITOK); 988 } 989 990 void 991 ktr_mib(const int *name, u_int namelen) 992 { 993 struct ktrace_entry *kte; 994 int *namep; 995 size_t size; 996 lwp_t *l = curlwp; 997 998 if (!KTRPOINT(l->l_proc, KTR_MIB)) 999 return; 1000 1001 size = namelen * sizeof(*name); 1002 1003 if (ktealloc(&kte, (void *)&namep, l, KTR_MIB, size)) 1004 return; 1005 1006 (void)memcpy(namep, name, namelen * sizeof(*name)); 1007 1008 ktraddentry(l, kte, KTA_WAITOK); 1009 } 1010 1011 /* Interface and common routines */ 1012 1013 int 1014 ktrace_common(lwp_t *curl, int ops, int facs, int pid, file_t **fpp) 1015 { 1016 struct proc *p; 1017 struct pgrp *pg; 1018 struct ktr_desc *ktd = NULL, *nktd; 1019 file_t *fp = *fpp; 1020 int ret = 0; 1021 int error = 0; 1022 int descend; 1023 1024 descend = ops & KTRFLAG_DESCEND; 1025 facs = facs & ~((unsigned) KTRFAC_PERSISTENT); 1026 1027 (void)ktrenter(curl); 1028 1029 switch (KTROP(ops)) { 1030 1031 case KTROP_CLEARFILE: 1032 /* 1033 * Clear all uses of the tracefile 1034 */ 1035 mutex_enter(&ktrace_lock); 1036 ktd = ktd_lookup(fp); 1037 mutex_exit(&ktrace_lock); 1038 if (ktd == NULL) 1039 goto done; 1040 error = ktrderefall(ktd, 1); 1041 goto done; 1042 1043 case KTROP_SET: 1044 mutex_enter(&ktrace_lock); 1045 ktd = ktd_lookup(fp); 1046 mutex_exit(&ktrace_lock); 1047 if (ktd == NULL) { 1048 nktd = kmem_alloc(sizeof(*nktd), KM_SLEEP); 1049 TAILQ_INIT(&nktd->ktd_queue); 1050 callout_init(&nktd->ktd_wakch, CALLOUT_MPSAFE); 1051 cv_init(&nktd->ktd_cv, "ktrwait"); 1052 cv_init(&nktd->ktd_sync_cv, "ktrsync"); 1053 nktd->ktd_flags = 0; 1054 nktd->ktd_qcount = 0; 1055 nktd->ktd_error = 0; 1056 nktd->ktd_errcnt = 0; 1057 nktd->ktd_delayqcnt = ktd_delayqcnt; 1058 nktd->ktd_wakedelay = mstohz(ktd_wakedelay); 1059 nktd->ktd_intrwakdl = mstohz(ktd_intrwakdl); 1060 nktd->ktd_ref = 0; 1061 nktd->ktd_fp = fp; 1062 mutex_enter(&ktrace_lock); 1063 ktdref(nktd); 1064 mutex_exit(&ktrace_lock); 1065 1066 /* 1067 * XXX: not correct. needs a way to detect 1068 * whether ktruss or ktrace. 1069 */ 1070 if (fp->f_type == DTYPE_PIPE) 1071 nktd->ktd_flags |= KTDF_INTERACTIVE; 1072 1073 mutex_enter(&fp->f_lock); 1074 fp->f_count++; 1075 mutex_exit(&fp->f_lock); 1076 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, 1077 ktrace_thread, nktd, &nktd->ktd_lwp, "ktrace"); 1078 if (error != 0) { 1079 kmem_free(nktd, sizeof(*nktd)); 1080 nktd = NULL; 1081 mutex_enter(&fp->f_lock); 1082 fp->f_count--; 1083 mutex_exit(&fp->f_lock); 1084 goto done; 1085 } 1086 1087 mutex_enter(&ktrace_lock); 1088 ktd = ktd_lookup(fp); 1089 if (ktd != NULL) { 1090 ktdrel(nktd); 1091 nktd = NULL; 1092 } else { 1093 TAILQ_INSERT_TAIL(&ktdq, nktd, ktd_list); 1094 ktd = nktd; 1095 } 1096 mutex_exit(&ktrace_lock); 1097 } 1098 break; 1099 1100 case KTROP_CLEAR: 1101 break; 1102 } 1103 1104 /* 1105 * need something to (un)trace (XXX - why is this here?) 1106 */ 1107 if (!facs) { 1108 error = SET_ERROR(EINVAL); 1109 *fpp = NULL; 1110 goto done; 1111 } 1112 1113 /* 1114 * do it 1115 */ 1116 mutex_enter(&proc_lock); 1117 if (pid < 0) { 1118 /* 1119 * by process group 1120 */ 1121 if (pid == INT_MIN) 1122 pg = NULL; 1123 else 1124 pg = pgrp_find(-pid); 1125 if (pg == NULL) 1126 error = SET_ERROR(ESRCH); 1127 else { 1128 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 1129 if (descend) 1130 ret |= ktrsetchildren(curl, p, ops, 1131 facs, ktd); 1132 else 1133 ret |= ktrops(curl, p, ops, facs, 1134 ktd); 1135 } 1136 } 1137 1138 } else { 1139 /* 1140 * by pid 1141 */ 1142 p = proc_find(pid); 1143 if (p == NULL) 1144 error = SET_ERROR(ESRCH); 1145 else if (descend) 1146 ret |= ktrsetchildren(curl, p, ops, facs, ktd); 1147 else 1148 ret |= ktrops(curl, p, ops, facs, ktd); 1149 } 1150 mutex_exit(&proc_lock); 1151 if (error == 0 && !ret) 1152 error = SET_ERROR(EPERM); 1153 *fpp = NULL; 1154 done: 1155 if (ktd != NULL) { 1156 mutex_enter(&ktrace_lock); 1157 if (error != 0) { 1158 /* 1159 * Wakeup the thread so that it can be die if we 1160 * can't trace any process. 1161 */ 1162 ktd_wakeup(ktd); 1163 } 1164 if (KTROP(ops) == KTROP_SET || KTROP(ops) == KTROP_CLEARFILE) 1165 ktdrel(ktd); 1166 mutex_exit(&ktrace_lock); 1167 } 1168 ktrexit(curl); 1169 return (error); 1170 } 1171 1172 /* 1173 * fktrace system call 1174 */ 1175 /* ARGSUSED */ 1176 int 1177 sys_fktrace(struct lwp *l, const struct sys_fktrace_args *uap, 1178 register_t *retval) 1179 { 1180 /* { 1181 syscallarg(int) fd; 1182 syscallarg(int) ops; 1183 syscallarg(int) facs; 1184 syscallarg(int) pid; 1185 } */ 1186 file_t *fp; 1187 int error, fd; 1188 1189 fd = SCARG(uap, fd); 1190 if ((fp = fd_getfile(fd)) == NULL) 1191 return SET_ERROR(EBADF); 1192 if ((fp->f_flag & FWRITE) == 0) 1193 error = SET_ERROR(EBADF); 1194 else 1195 error = ktrace_common(l, SCARG(uap, ops), 1196 SCARG(uap, facs), SCARG(uap, pid), &fp); 1197 fd_putfile(fd); 1198 return error; 1199 } 1200 1201 static int 1202 ktrops(lwp_t *curl, struct proc *p, int ops, int facs, 1203 struct ktr_desc *ktd) 1204 { 1205 int vers = ops & KTRFAC_VER_MASK; 1206 int error = 0; 1207 1208 mutex_enter(p->p_lock); 1209 mutex_enter(&ktrace_lock); 1210 1211 if (!ktrcanset(curl, p)) 1212 goto out; 1213 1214 switch (vers) { 1215 case KTRFACv0: 1216 case KTRFACv1: 1217 case KTRFACv2: 1218 break; 1219 default: 1220 error = SET_ERROR(EINVAL); 1221 goto out; 1222 } 1223 1224 if (KTROP(ops) == KTROP_SET) { 1225 if (p->p_tracep != ktd) { 1226 /* 1227 * if trace file already in use, relinquish 1228 */ 1229 ktrderef(p); 1230 p->p_tracep = ktd; 1231 ktradref(p); 1232 } 1233 p->p_traceflag |= facs; 1234 if (kauth_authorize_process(curl->l_cred, KAUTH_PROCESS_KTRACE, 1235 p, KAUTH_ARG(KAUTH_REQ_PROCESS_KTRACE_PERSISTENT), NULL, 1236 NULL) == 0) 1237 p->p_traceflag |= KTRFAC_PERSISTENT; 1238 } else { 1239 /* KTROP_CLEAR */ 1240 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) { 1241 /* no more tracing */ 1242 ktrderef(p); 1243 } 1244 } 1245 1246 if (p->p_traceflag) 1247 p->p_traceflag |= vers; 1248 /* 1249 * Emit an emulation record, every time there is a ktrace 1250 * change/attach request. 1251 */ 1252 if (KTRPOINT(p, KTR_EMUL)) 1253 p->p_traceflag |= KTRFAC_TRC_EMUL; 1254 1255 p->p_trace_enabled = trace_is_enabled(p); 1256 #ifdef __HAVE_SYSCALL_INTERN 1257 (*p->p_emul->e_syscall_intern)(p); 1258 #endif 1259 1260 out: 1261 mutex_exit(&ktrace_lock); 1262 mutex_exit(p->p_lock); 1263 1264 return error ? 0 : 1; 1265 } 1266 1267 static int 1268 ktrsetchildren(lwp_t *curl, struct proc *top, int ops, int facs, 1269 struct ktr_desc *ktd) 1270 { 1271 struct proc *p; 1272 int ret = 0; 1273 1274 KASSERT(mutex_owned(&proc_lock)); 1275 1276 p = top; 1277 for (;;) { 1278 ret |= ktrops(curl, p, ops, facs, ktd); 1279 /* 1280 * If this process has children, descend to them next, 1281 * otherwise do any siblings, and if done with this level, 1282 * follow back up the tree (but not past top). 1283 */ 1284 if (LIST_FIRST(&p->p_children) != NULL) { 1285 p = LIST_FIRST(&p->p_children); 1286 continue; 1287 } 1288 for (;;) { 1289 if (p == top) 1290 return (ret); 1291 if (LIST_NEXT(p, p_sibling) != NULL) { 1292 p = LIST_NEXT(p, p_sibling); 1293 break; 1294 } 1295 p = p->p_pptr; 1296 } 1297 } 1298 /*NOTREACHED*/ 1299 } 1300 1301 static void 1302 ktrwrite(struct ktr_desc *ktd, struct ktrace_entry *kte) 1303 { 1304 size_t hlen; 1305 struct uio auio; 1306 struct iovec aiov[64], *iov; 1307 struct ktrace_entry *top = kte; 1308 struct ktr_header *kth; 1309 file_t *fp = ktd->ktd_fp; 1310 int error; 1311 next: 1312 auio.uio_iov = iov = &aiov[0]; 1313 auio.uio_offset = 0; 1314 auio.uio_rw = UIO_WRITE; 1315 auio.uio_resid = 0; 1316 auio.uio_iovcnt = 0; 1317 UIO_SETUP_SYSSPACE(&auio); 1318 do { 1319 struct timespec ts; 1320 lwpid_t lid; 1321 kth = &kte->kte_kth; 1322 1323 hlen = sizeof(struct ktr_header); 1324 switch (kth->ktr_version) { 1325 case 0: 1326 ts = kth->ktr_time; 1327 1328 kth->ktr_otv.tv_sec = ts.tv_sec; 1329 kth->ktr_otv.tv_usec = ts.tv_nsec / 1000; 1330 kth->ktr_unused = NULL; 1331 hlen -= sizeof(kth->_v) - 1332 MAX(sizeof(kth->_v._v0), sizeof(kth->_v._v1)); 1333 break; 1334 case 1: 1335 ts = kth->ktr_time; 1336 lid = kth->ktr_lid; 1337 1338 kth->ktr_ots.tv_sec = ts.tv_sec; 1339 kth->ktr_ots.tv_nsec = ts.tv_nsec; 1340 kth->ktr_olid = lid; 1341 hlen -= sizeof(kth->_v) - 1342 MAX(sizeof(kth->_v._v0), sizeof(kth->_v._v1)); 1343 break; 1344 } 1345 iov->iov_base = (void *)kth; 1346 iov++->iov_len = hlen; 1347 auio.uio_resid += hlen; 1348 auio.uio_iovcnt++; 1349 if (kth->ktr_len > 0) { 1350 iov->iov_base = kte->kte_buf; 1351 iov++->iov_len = kth->ktr_len; 1352 auio.uio_resid += kth->ktr_len; 1353 auio.uio_iovcnt++; 1354 } 1355 } while ((kte = TAILQ_NEXT(kte, kte_list)) != NULL && 1356 auio.uio_iovcnt < sizeof(aiov) / sizeof(aiov[0]) - 1); 1357 1358 again: 1359 error = (*fp->f_ops->fo_write)(fp, &fp->f_offset, &auio, 1360 fp->f_cred, FOF_UPDATE_OFFSET); 1361 switch (error) { 1362 1363 case 0: 1364 if (auio.uio_resid > 0) 1365 goto again; 1366 if (kte != NULL) 1367 goto next; 1368 break; 1369 1370 case EWOULDBLOCK: 1371 kpause("ktrzzz", false, 1, NULL); 1372 goto again; 1373 1374 default: 1375 /* 1376 * If error encountered, give up tracing on this 1377 * vnode. Don't report EPIPE as this can easily 1378 * happen with fktrace()/ktruss. 1379 */ 1380 #ifndef DEBUG 1381 if (error != EPIPE) 1382 #endif 1383 log(LOG_NOTICE, 1384 "ktrace write failed, errno %d, tracing stopped\n", 1385 error); 1386 (void)ktrderefall(ktd, 0); 1387 } 1388 1389 while ((kte = top) != NULL) { 1390 top = TAILQ_NEXT(top, kte_list); 1391 ktefree(kte); 1392 } 1393 } 1394 1395 static void 1396 ktrace_thread(void *arg) 1397 { 1398 struct ktr_desc *ktd = arg; 1399 file_t *fp = ktd->ktd_fp; 1400 struct ktrace_entry *kte; 1401 int ktrerr, errcnt; 1402 1403 mutex_enter(&ktrace_lock); 1404 for (;;) { 1405 kte = TAILQ_FIRST(&ktd->ktd_queue); 1406 if (kte == NULL) { 1407 if (ktd->ktd_flags & KTDF_WAIT) { 1408 ktd->ktd_flags &= ~(KTDF_WAIT | KTDF_BLOCKING); 1409 cv_broadcast(&ktd->ktd_sync_cv); 1410 } 1411 if (ktd->ktd_ref == 0) 1412 break; 1413 cv_wait(&ktd->ktd_cv, &ktrace_lock); 1414 continue; 1415 } 1416 TAILQ_INIT(&ktd->ktd_queue); 1417 ktd->ktd_qcount = 0; 1418 ktrerr = ktd->ktd_error; 1419 errcnt = ktd->ktd_errcnt; 1420 ktd->ktd_error = ktd->ktd_errcnt = 0; 1421 mutex_exit(&ktrace_lock); 1422 1423 if (ktrerr) { 1424 log(LOG_NOTICE, 1425 "ktrace failed, fp %p, error 0x%x, total %d\n", 1426 fp, ktrerr, errcnt); 1427 } 1428 ktrwrite(ktd, kte); 1429 mutex_enter(&ktrace_lock); 1430 } 1431 1432 if (ktd_lookup(ktd->ktd_fp) == ktd) { 1433 TAILQ_REMOVE(&ktdq, ktd, ktd_list); 1434 } else { 1435 /* nothing, collision in KTROP_SET */ 1436 } 1437 1438 callout_halt(&ktd->ktd_wakch, &ktrace_lock); 1439 callout_destroy(&ktd->ktd_wakch); 1440 mutex_exit(&ktrace_lock); 1441 1442 /* 1443 * ktrace file descriptor can't be watched (are not visible to 1444 * userspace), so no kqueue stuff here 1445 * XXX: The above comment is wrong, because the fktrace file 1446 * descriptor is available in userland. 1447 */ 1448 closef(fp); 1449 1450 cv_destroy(&ktd->ktd_sync_cv); 1451 cv_destroy(&ktd->ktd_cv); 1452 1453 kmem_free(ktd, sizeof(*ktd)); 1454 1455 kthread_exit(0); 1456 } 1457 1458 /* 1459 * Return true if caller has permission to set the ktracing state 1460 * of target. Essentially, the target can't possess any 1461 * more permissions than the caller. KTRFAC_PERSISTENT signifies that 1462 * the tracing will persist on sugid processes during exec; it is only 1463 * settable by a process with appropriate credentials. 1464 * 1465 * TODO: check groups. use caller effective gid. 1466 */ 1467 static int 1468 ktrcanset(lwp_t *calll, struct proc *targetp) 1469 { 1470 KASSERT(mutex_owned(targetp->p_lock)); 1471 KASSERT(mutex_owned(&ktrace_lock)); 1472 1473 if (kauth_authorize_process(calll->l_cred, KAUTH_PROCESS_KTRACE, 1474 targetp, NULL, NULL, NULL) == 0) 1475 return (1); 1476 1477 return (0); 1478 } 1479 1480 /* 1481 * Put user defined entry to ktrace records. 1482 */ 1483 int 1484 sys_utrace(struct lwp *l, const struct sys_utrace_args *uap, register_t *retval) 1485 { 1486 /* { 1487 syscallarg(const char *) label; 1488 syscallarg(void *) addr; 1489 syscallarg(size_t) len; 1490 } */ 1491 1492 return ktruser(SCARG(uap, label), SCARG(uap, addr), 1493 SCARG(uap, len), 1); 1494 } 1495