kern_ktrace.c revision 1.53.2.9 1 /* $NetBSD: kern_ktrace.c,v 1.53.2.9 2002/12/11 06:43:04 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)kern_ktrace.c 8.5 (Berkeley) 5/14/95
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: kern_ktrace.c,v 1.53.2.9 2002/12/11 06:43:04 thorpej Exp $");
40
41 #include "opt_ktrace.h"
42 #include "opt_compat_mach.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/file.h>
48 #include <sys/namei.h>
49 #include <sys/vnode.h>
50 #include <sys/ktrace.h>
51 #include <sys/malloc.h>
52 #include <sys/syslog.h>
53 #include <sys/filedesc.h>
54 #include <sys/ioctl.h>
55
56 #include <sys/mount.h>
57 #include <sys/sa.h>
58 #include <sys/syscallargs.h>
59
60 #ifdef COMPAT_MACH
61 #include <compat/mach/mach_types.h>
62 #include <compat/mach/mach_message.h>
63 #endif
64
65 #ifdef KTRACE
66
67 int ktrace_common(struct proc *, int, int, int, struct file *);
68 void ktrinitheader(struct ktr_header *, struct proc *, int);
69 int ktrops(struct proc *, struct proc *, int, int, struct file *);
70 int ktrsetchildren(struct proc *, struct proc *, int, int,
71 struct file *);
72 int ktrwrite(struct proc *, struct ktr_header *);
73 int ktrcanset(struct proc *, struct proc *);
74 int ktrsamefile(struct file *, struct file *);
75
76 /*
77 * "deep" compare of two files for the purposes of clearing a trace.
78 * Returns true if they're the same open file, or if they point at the
79 * same underlying vnode/socket.
80 */
81
82 int
83 ktrsamefile(f1, f2)
84 struct file *f1;
85 struct file *f2;
86 {
87 return ((f1 == f2) ||
88 ((f1 != NULL) && (f2 != NULL) &&
89 (f1->f_type == f2->f_type) &&
90 (f1->f_data == f2->f_data)));
91 }
92
93 void
94 ktrderef(p)
95 struct proc *p;
96 {
97 struct file *fp = p->p_tracep;
98 p->p_traceflag = 0;
99 if (fp == NULL)
100 return;
101 FILE_USE(fp);
102
103 /*
104 * ktrace file descriptor can't be watched (are not visible to
105 * userspace), so no kqueue stuff here
106 */
107 closef(fp, NULL);
108
109 p->p_tracep = NULL;
110 }
111
112 void
113 ktradref(p)
114 struct proc *p;
115 {
116 struct file *fp = p->p_tracep;
117
118 fp->f_count++;
119 }
120
121 void
122 ktrinitheader(kth, p, type)
123 struct ktr_header *kth;
124 struct proc *p;
125 int type;
126 {
127
128 memset(kth, 0, sizeof(*kth));
129 kth->ktr_type = type;
130 microtime(&kth->ktr_time);
131 kth->ktr_pid = p->p_pid;
132 memcpy(kth->ktr_comm, p->p_comm, MAXCOMLEN);
133 }
134
135 void
136 ktrsyscall(p, code, realcode, args)
137 struct proc *p;
138 register_t code;
139 register_t realcode;
140 register_t args[];
141 {
142 struct ktr_header kth;
143 struct ktr_syscall *ktp;
144 register_t *argp;
145 int argsize;
146 size_t len;
147 u_int i;
148
149 argsize = p->p_emul->e_sysent[code].sy_narg * sizeof (register_t);
150 len = sizeof(struct ktr_syscall) + argsize;
151
152 p->p_traceflag |= KTRFAC_ACTIVE;
153 ktrinitheader(&kth, p, KTR_SYSCALL);
154 ktp = malloc(len, M_TEMP, M_WAITOK);
155 ktp->ktr_code = realcode;
156 ktp->ktr_argsize = argsize;
157 argp = (register_t *)((char *)ktp + sizeof(struct ktr_syscall));
158 for (i = 0; i < (argsize / sizeof(*argp)); i++)
159 *argp++ = args[i];
160 kth.ktr_buf = (caddr_t)ktp;
161 kth.ktr_len = len;
162 (void) ktrwrite(p, &kth);
163 free(ktp, M_TEMP);
164 p->p_traceflag &= ~KTRFAC_ACTIVE;
165 }
166
167 void
168 ktrsysret(p, code, error, retval)
169 struct proc *p;
170 register_t code;
171 int error;
172 register_t retval;
173 {
174 struct ktr_header kth;
175 struct ktr_sysret ktp;
176
177 p->p_traceflag |= KTRFAC_ACTIVE;
178 ktrinitheader(&kth, p, KTR_SYSRET);
179 ktp.ktr_code = code;
180 ktp.ktr_eosys = 0; /* XXX unused */
181 ktp.ktr_error = error;
182 ktp.ktr_retval = retval; /* what about val2 ? */
183
184 kth.ktr_buf = (caddr_t)&ktp;
185 kth.ktr_len = sizeof(struct ktr_sysret);
186
187 (void) ktrwrite(p, &kth);
188 p->p_traceflag &= ~KTRFAC_ACTIVE;
189 }
190
191 void
192 ktrnamei(p, path)
193 struct proc *p;
194 char *path;
195 {
196 struct ktr_header kth;
197
198 p->p_traceflag |= KTRFAC_ACTIVE;
199 ktrinitheader(&kth, p, KTR_NAMEI);
200 kth.ktr_len = strlen(path);
201 kth.ktr_buf = path;
202
203 (void) ktrwrite(p, &kth);
204 p->p_traceflag &= ~KTRFAC_ACTIVE;
205 }
206
207 void
208 ktremul(p)
209 struct proc *p;
210 {
211 struct ktr_header kth;
212 const char *emul = p->p_emul->e_name;
213
214 p->p_traceflag |= KTRFAC_ACTIVE;
215 ktrinitheader(&kth, p, KTR_EMUL);
216 kth.ktr_len = strlen(emul);
217 kth.ktr_buf = (caddr_t)emul;
218
219 (void) ktrwrite(p, &kth);
220 p->p_traceflag &= ~KTRFAC_ACTIVE;
221 }
222
223 void
224 ktrgenio(p, fd, rw, iov, len, error)
225 struct proc *p;
226 int fd;
227 enum uio_rw rw;
228 struct iovec *iov;
229 int len;
230 int error;
231 {
232 struct ktr_header kth;
233 struct ktr_genio *ktp;
234 caddr_t cp;
235 int resid = len, cnt;
236 int buflen;
237
238 if (error)
239 return;
240
241 p->p_traceflag |= KTRFAC_ACTIVE;
242
243 buflen = min(PAGE_SIZE, len + sizeof(struct ktr_genio));
244
245 ktrinitheader(&kth, p, KTR_GENIO);
246 ktp = malloc(buflen, M_TEMP, M_WAITOK);
247 ktp->ktr_fd = fd;
248 ktp->ktr_rw = rw;
249
250 kth.ktr_buf = (caddr_t)ktp;
251
252 cp = (caddr_t)((char *)ktp + sizeof(struct ktr_genio));
253 buflen -= sizeof(struct ktr_genio);
254
255 while (resid > 0) {
256 #if 0 /* XXX NJWLWP */
257 KDASSERT(p->p_cpu != NULL);
258 KDASSERT(p->p_cpu == curcpu());
259 #endif
260 /* XXX NJWLWP */
261 if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
262 preempt(NULL);
263
264 cnt = min(iov->iov_len, buflen);
265 if (cnt > resid)
266 cnt = resid;
267 if (copyin(iov->iov_base, cp, cnt))
268 break;
269
270 kth.ktr_len = cnt + sizeof(struct ktr_genio);
271
272 if (__predict_false(ktrwrite(p, &kth) != 0))
273 break;
274
275 iov->iov_base = (caddr_t)iov->iov_base + cnt;
276 iov->iov_len -= cnt;
277
278 if (iov->iov_len == 0)
279 iov++;
280
281 resid -= cnt;
282 }
283
284 free(ktp, M_TEMP);
285 p->p_traceflag &= ~KTRFAC_ACTIVE;
286 }
287
288 void
289 ktrpsig(p, sig, action, mask, code)
290 struct proc *p;
291 int sig;
292 sig_t action;
293 sigset_t *mask;
294 int code;
295 {
296 struct ktr_header kth;
297 struct ktr_psig kp;
298
299 p->p_traceflag |= KTRFAC_ACTIVE;
300 ktrinitheader(&kth, p, KTR_PSIG);
301 kp.signo = (char)sig;
302 kp.action = action;
303 kp.mask = *mask;
304 kp.code = code;
305 kth.ktr_buf = (caddr_t)&kp;
306 kth.ktr_len = sizeof(struct ktr_psig);
307
308 (void) ktrwrite(p, &kth);
309 p->p_traceflag &= ~KTRFAC_ACTIVE;
310 }
311
312 void
313 ktrcsw(p, out, user)
314 struct proc *p;
315 int out;
316 int user;
317 {
318 struct ktr_header kth;
319 struct ktr_csw kc;
320
321 p->p_traceflag |= KTRFAC_ACTIVE;
322 ktrinitheader(&kth, p, KTR_CSW);
323 kc.out = out;
324 kc.user = user;
325 kth.ktr_buf = (caddr_t)&kc;
326 kth.ktr_len = sizeof(struct ktr_csw);
327
328 (void) ktrwrite(p, &kth);
329 p->p_traceflag &= ~KTRFAC_ACTIVE;
330 }
331
332 void
333 ktruser(p, id, addr, len, ustr)
334 struct proc *p;
335 const char *id;
336 void *addr;
337 size_t len;
338 int ustr;
339 {
340 struct ktr_header kth;
341 struct ktr_user *ktp;
342 caddr_t user_dta;
343
344 p->p_traceflag |= KTRFAC_ACTIVE;
345 ktrinitheader(&kth, p, KTR_USER);
346 ktp = malloc(sizeof(struct ktr_user) + len, M_TEMP, M_WAITOK);
347 if (ustr) {
348 if (copyinstr(id, ktp->ktr_id, KTR_USER_MAXIDLEN, NULL) != 0)
349 ktp->ktr_id[0] = '\0';
350 } else
351 strncpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN);
352 ktp->ktr_id[KTR_USER_MAXIDLEN-1] = '\0';
353
354 user_dta = (caddr_t) ((char *)ktp + sizeof(struct ktr_user));
355 if (copyin(addr, (void *) user_dta, len) != 0)
356 len = 0;
357
358 kth.ktr_buf = (void *)ktp;
359 kth.ktr_len = sizeof(struct ktr_user) + len;
360 (void) ktrwrite(p, &kth);
361
362 free(ktp, M_TEMP);
363 p->p_traceflag &= ~KTRFAC_ACTIVE;
364
365 }
366
367 #ifdef COMPAT_MACH
368 void
369 ktrmmsg(p, msgh, size)
370 struct proc *p;
371 const char *msgh;
372 size_t size;
373 {
374 struct ktr_header kth;
375 struct ktr_mmsg *kp;
376 int error;
377
378 p->p_traceflag |= KTRFAC_ACTIVE;
379 ktrinitheader(&kth, p, KTR_MMSG);
380
381 kp = (struct ktr_mmsg *)malloc(size, M_TEMP, M_WAITOK);
382 if ((error = copyin(msgh, kp, size)) != 0)
383 size = 0; /* Still log a message, but empty */
384
385 kth.ktr_buf = (caddr_t)kp;
386 kth.ktr_len = size;
387 (void) ktrwrite(p, &kth);
388 free(kp, M_TEMP);
389 p->p_traceflag &= ~KTRFAC_ACTIVE;
390 }
391 #endif /* COMPAT_MACH */
392
393 /* Interface and common routines */
394
395 int
396 ktrace_common(curp, ops, facs, pid, fp)
397 struct proc *curp;
398 int ops;
399 int facs;
400 int pid;
401 struct file *fp;
402 {
403 int ret = 0;
404 int error = 0;
405 int one = 1;
406 int descend;
407 struct proc *p;
408 struct pgrp *pg;
409
410 curp->p_traceflag |= KTRFAC_ACTIVE;
411 descend = ops & KTRFLAG_DESCEND;
412 facs = facs & ~((unsigned) KTRFAC_ROOT);
413
414 /*
415 * Clear all uses of the tracefile
416 */
417 if (KTROP(ops) == KTROP_CLEARFILE) {
418 proclist_lock_read();
419 for (p = LIST_FIRST(&allproc); p != NULL;
420 p = LIST_NEXT(p, p_list)) {
421 if (ktrsamefile(p->p_tracep, fp)) {
422 if (ktrcanset(curp, p))
423 ktrderef(p);
424 else
425 error = EPERM;
426 }
427 }
428 proclist_unlock_read();
429 goto done;
430 }
431
432 /*
433 * Mark fp non-blocking, to avoid problems from possible deadlocks.
434 */
435
436 if (fp != NULL) {
437 fp->f_flag |= FNONBLOCK;
438 (*fp->f_ops->fo_ioctl)(fp, FIONBIO, (caddr_t)&one, curp);
439 }
440
441 /*
442 * need something to (un)trace (XXX - why is this here?)
443 */
444 if (!facs) {
445 error = EINVAL;
446 goto done;
447 }
448 /*
449 * do it
450 */
451 if (pid < 0) {
452 /*
453 * by process group
454 */
455 pg = pgfind(-pid);
456 if (pg == NULL) {
457 error = ESRCH;
458 goto done;
459 }
460 for (p = LIST_FIRST(&pg->pg_members); p != NULL;
461 p = LIST_NEXT(p, p_pglist)) {
462 if (descend)
463 ret |= ktrsetchildren(curp, p, ops, facs, fp);
464 else
465 ret |= ktrops(curp, p, ops, facs, fp);
466 }
467
468 } else {
469 /*
470 * by pid
471 */
472 p = pfind(pid);
473 if (p == NULL) {
474 error = ESRCH;
475 goto done;
476 }
477 if (descend)
478 ret |= ktrsetchildren(curp, p, ops, facs, fp);
479 else
480 ret |= ktrops(curp, p, ops, facs, fp);
481 }
482 if (!ret)
483 error = EPERM;
484 done:
485 curp->p_traceflag &= ~KTRFAC_ACTIVE;
486 return (error);
487 }
488
489 /*
490 * ktrace system call
491 */
492 /* ARGSUSED */
493 int
494 sys_fktrace(l, v, retval)
495 struct lwp *l;
496 void *v;
497 register_t *retval;
498 {
499 struct sys_fktrace_args /* {
500 syscallarg(int) fd;
501 syscallarg(int) ops;
502 syscallarg(int) facs;
503 syscallarg(int) pid;
504 } */ *uap = v;
505 struct proc *curp = l->l_proc;
506 struct file *fp = NULL;
507 struct filedesc *fdp = curp->p_fd;
508
509 if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL)
510 return (EBADF);
511
512 if ((fp->f_flag & FWRITE) == 0)
513 return (EBADF);
514
515 return ktrace_common(curp, SCARG(uap, ops),
516 SCARG(uap, facs), SCARG(uap, pid), fp);
517 }
518
519 /*
520 * ktrace system call
521 */
522 /* ARGSUSED */
523 int
524 sys_ktrace(l, v, retval)
525 struct lwp *l;
526 void *v;
527 register_t *retval;
528 {
529 struct sys_ktrace_args /* {
530 syscallarg(const char *) fname;
531 syscallarg(int) ops;
532 syscallarg(int) facs;
533 syscallarg(int) pid;
534 } */ *uap = v;
535 struct proc *curp = l->l_proc;
536 struct vnode *vp = NULL;
537 struct file *fp = NULL;
538 int fd;
539 int ops = SCARG(uap, ops);
540 int error = 0;
541 struct nameidata nd;
542
543 ops = KTROP(ops) | (ops & KTRFLAG_DESCEND);
544
545 curp->p_traceflag |= KTRFAC_ACTIVE;
546 if (ops != KTROP_CLEAR) {
547 /*
548 * an operation which requires a file argument.
549 */
550 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, fname),
551 curp);
552 if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) {
553 curp->p_traceflag &= ~KTRFAC_ACTIVE;
554 return (error);
555 }
556 vp = nd.ni_vp;
557 VOP_UNLOCK(vp, 0);
558 if (vp->v_type != VREG) {
559 (void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp);
560 curp->p_traceflag &= ~KTRFAC_ACTIVE;
561 return (EACCES);
562 }
563 /*
564 * XXX This uses up a file descriptor slot in the
565 * tracing process for the duration of this syscall.
566 * This is not expected to be a problem. If
567 * falloc(NULL, ...) DTRT we could skip that part, but
568 * that would require changing its interface to allow
569 * the caller to pass in a ucred..
570 *
571 * This will FILE_USE the fp it returns, if any.
572 * Keep it in use until we return.
573 */
574 if ((error = falloc(curp, &fp, &fd)) != 0)
575 goto done;
576
577 fp->f_flag = FWRITE|FAPPEND;
578 fp->f_type = DTYPE_VNODE;
579 fp->f_ops = &vnops;
580 fp->f_data = (caddr_t)vp;
581 FILE_SET_MATURE(fp);
582 vp = NULL;
583 }
584 error = ktrace_common(curp, SCARG(uap, ops), SCARG(uap, facs),
585 SCARG(uap, pid), fp);
586 done:
587 if (vp != NULL)
588 (void) vn_close(vp, FWRITE, curp->p_ucred, curp);
589 if (fp != NULL) {
590 FILE_UNUSE(fp, curp); /* release file */
591 fdrelease(curp, fd); /* release fd table slot */
592 }
593 return (error);
594 }
595
596 int
597 ktrops(curp, p, ops, facs, fp)
598 struct proc *curp;
599 struct proc *p;
600 int ops;
601 int facs;
602 struct file *fp;
603 {
604
605 if (!ktrcanset(curp, p))
606 return (0);
607 if (KTROP(ops) == KTROP_SET) {
608 if (p->p_tracep != fp) {
609 /*
610 * if trace file already in use, relinquish
611 */
612 ktrderef(p);
613 p->p_tracep = fp;
614 ktradref(p);
615 }
616 p->p_traceflag |= facs;
617 if (curp->p_ucred->cr_uid == 0)
618 p->p_traceflag |= KTRFAC_ROOT;
619 } else {
620 /* KTROP_CLEAR */
621 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
622 /* no more tracing */
623 ktrderef(p);
624 }
625 }
626
627 /*
628 * Emit an emulation record, every time there is a ktrace
629 * change/attach request.
630 */
631 if (KTRPOINT(p, KTR_EMUL))
632 ktremul(p);
633 #ifdef __HAVE_SYSCALL_INTERN
634 (*p->p_emul->e_syscall_intern)(p);
635 #endif
636
637 return (1);
638 }
639
640 int
641 ktrsetchildren(curp, top, ops, facs, fp)
642 struct proc *curp;
643 struct proc *top;
644 int ops;
645 int facs;
646 struct file *fp;
647 {
648 struct proc *p;
649 int ret = 0;
650
651 p = top;
652 for (;;) {
653 ret |= ktrops(curp, p, ops, facs, fp);
654 /*
655 * If this process has children, descend to them next,
656 * otherwise do any siblings, and if done with this level,
657 * follow back up the tree (but not past top).
658 */
659 if (LIST_FIRST(&p->p_children) != NULL)
660 p = LIST_FIRST(&p->p_children);
661 else for (;;) {
662 if (p == top)
663 return (ret);
664 if (LIST_NEXT(p, p_sibling) != NULL) {
665 p = LIST_NEXT(p, p_sibling);
666 break;
667 }
668 p = p->p_pptr;
669 }
670 }
671 /*NOTREACHED*/
672 }
673
674 int
675 ktrwrite(p, kth)
676 struct proc *p;
677 struct ktr_header *kth;
678 {
679 struct uio auio;
680 struct iovec aiov[2];
681 int error, tries;
682 struct file *fp = p->p_tracep;
683
684 if (fp == NULL)
685 return 0;
686
687 auio.uio_iov = &aiov[0];
688 auio.uio_offset = 0;
689 auio.uio_segflg = UIO_SYSSPACE;
690 auio.uio_rw = UIO_WRITE;
691 aiov[0].iov_base = (caddr_t)kth;
692 aiov[0].iov_len = sizeof(struct ktr_header);
693 auio.uio_resid = sizeof(struct ktr_header);
694 auio.uio_iovcnt = 1;
695 auio.uio_procp = (struct proc *)0;
696 if (kth->ktr_len > 0) {
697 auio.uio_iovcnt++;
698 aiov[1].iov_base = kth->ktr_buf;
699 aiov[1].iov_len = kth->ktr_len;
700 auio.uio_resid += kth->ktr_len;
701 }
702
703 FILE_USE(fp);
704
705 tries = 0;
706 do {
707 error = (*fp->f_ops->fo_write)(fp, &fp->f_offset, &auio,
708 fp->f_cred, FOF_UPDATE_OFFSET);
709 tries++;
710 if (error == EWOULDBLOCK)
711 yield();
712 } while ((error == EWOULDBLOCK) && (tries < 3));
713 FILE_UNUSE(fp, NULL);
714
715 if (__predict_true(error == 0))
716 return (0);
717 /*
718 * If error encountered, give up tracing on this vnode. Don't report
719 * EPIPE as this can easily happen with fktrace()/ktruss.
720 */
721 if (error != EPIPE)
722 log(LOG_NOTICE,
723 "ktrace write failed, errno %d, tracing stopped\n",
724 error);
725 proclist_lock_read();
726 for (p = LIST_FIRST(&allproc); p != NULL; p = LIST_NEXT(p, p_list)) {
727 if (ktrsamefile(p->p_tracep, fp))
728 ktrderef(p);
729 }
730 proclist_unlock_read();
731
732 return (error);
733 }
734
735 /*
736 * Return true if caller has permission to set the ktracing state
737 * of target. Essentially, the target can't possess any
738 * more permissions than the caller. KTRFAC_ROOT signifies that
739 * root previously set the tracing status on the target process, and
740 * so, only root may further change it.
741 *
742 * TODO: check groups. use caller effective gid.
743 */
744 int
745 ktrcanset(callp, targetp)
746 struct proc *callp;
747 struct proc *targetp;
748 {
749 struct pcred *caller = callp->p_cred;
750 struct pcred *target = targetp->p_cred;
751
752 if ((caller->pc_ucred->cr_uid == target->p_ruid &&
753 target->p_ruid == target->p_svuid &&
754 caller->p_rgid == target->p_rgid && /* XXX */
755 target->p_rgid == target->p_svgid &&
756 (targetp->p_traceflag & KTRFAC_ROOT) == 0 &&
757 (targetp->p_flag & P_SUGID) == 0) ||
758 caller->pc_ucred->cr_uid == 0)
759 return (1);
760
761 return (0);
762 }
763 #endif /* KTRACE */
764
765 /*
766 * Put user defined entry to ktrace records.
767 */
768 int
769 sys_utrace(l, v, retval)
770 struct lwp *l;
771 void *v;
772 register_t *retval;
773 {
774 #ifdef KTRACE
775 struct sys_utrace_args /* {
776 syscallarg(const char *) label;
777 syscallarg(void *) addr;
778 syscallarg(size_t) len;
779 } */ *uap = v;
780 struct proc *p = l->l_proc;
781 if (!KTRPOINT(p, KTR_USER))
782 return (0);
783
784 if (SCARG(uap, len) > KTR_USER_MAXLEN)
785 return (EINVAL);
786
787 ktruser(p, SCARG(uap, label), SCARG(uap, addr), SCARG(uap, len), 1);
788
789 return (0);
790 #else /* !KTRACE */
791 return ENOSYS;
792 #endif /* KTRACE */
793 }
794