kern_ktrace.c revision 1.68 1 /* $NetBSD: kern_ktrace.c,v 1.68 2003/02/23 14:37:34 pk Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)kern_ktrace.c 8.5 (Berkeley) 5/14/95
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: kern_ktrace.c,v 1.68 2003/02/23 14:37:34 pk Exp $");
40
41 #include "opt_ktrace.h"
42 #include "opt_compat_mach.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/file.h>
48 #include <sys/namei.h>
49 #include <sys/vnode.h>
50 #include <sys/ktrace.h>
51 #include <sys/malloc.h>
52 #include <sys/syslog.h>
53 #include <sys/filedesc.h>
54 #include <sys/ioctl.h>
55
56 #include <sys/mount.h>
57 #include <sys/sa.h>
58 #include <sys/syscallargs.h>
59
60 #ifdef KTRACE
61
62 int ktrace_common(struct proc *, int, int, int, struct file *);
63 void ktrinitheader(struct ktr_header *, struct proc *, int);
64 int ktrops(struct proc *, struct proc *, int, int, struct file *);
65 int ktrsetchildren(struct proc *, struct proc *, int, int,
66 struct file *);
67 int ktrwrite(struct proc *, struct ktr_header *);
68 int ktrcanset(struct proc *, struct proc *);
69 int ktrsamefile(struct file *, struct file *);
70
71 /*
72 * "deep" compare of two files for the purposes of clearing a trace.
73 * Returns true if they're the same open file, or if they point at the
74 * same underlying vnode/socket.
75 */
76
77 int
78 ktrsamefile(f1, f2)
79 struct file *f1;
80 struct file *f2;
81 {
82 return ((f1 == f2) ||
83 ((f1 != NULL) && (f2 != NULL) &&
84 (f1->f_type == f2->f_type) &&
85 (f1->f_data == f2->f_data)));
86 }
87
88 void
89 ktrderef(p)
90 struct proc *p;
91 {
92 struct file *fp = p->p_tracep;
93 p->p_traceflag = 0;
94 if (fp == NULL)
95 return;
96 simple_lock(&fp->f_slock);
97 FILE_USE(fp);
98
99 /*
100 * ktrace file descriptor can't be watched (are not visible to
101 * userspace), so no kqueue stuff here
102 */
103 closef(fp, NULL);
104
105 p->p_tracep = NULL;
106 }
107
108 void
109 ktradref(p)
110 struct proc *p;
111 {
112 struct file *fp = p->p_tracep;
113
114 fp->f_count++;
115 }
116
117 void
118 ktrinitheader(kth, p, type)
119 struct ktr_header *kth;
120 struct proc *p;
121 int type;
122 {
123
124 memset(kth, 0, sizeof(*kth));
125 kth->ktr_type = type;
126 microtime(&kth->ktr_time);
127 kth->ktr_pid = p->p_pid;
128 memcpy(kth->ktr_comm, p->p_comm, MAXCOMLEN);
129 }
130
131 void
132 ktrsyscall(p, code, realcode, callp, args)
133 struct proc *p;
134 register_t code;
135 register_t realcode;
136 const struct sysent *callp;
137 register_t args[];
138 {
139 struct ktr_header kth;
140 struct ktr_syscall *ktp;
141 register_t *argp;
142 int argsize;
143 size_t len;
144 u_int i;
145
146 if (callp == NULL)
147 callp = p->p_emul->e_sysent;
148
149 argsize = callp[code].sy_narg * sizeof (register_t);
150 len = sizeof(struct ktr_syscall) + argsize;
151
152 p->p_traceflag |= KTRFAC_ACTIVE;
153 ktrinitheader(&kth, p, KTR_SYSCALL);
154 ktp = malloc(len, M_TEMP, M_WAITOK);
155 ktp->ktr_code = realcode;
156 ktp->ktr_argsize = argsize;
157 argp = (register_t *)((char *)ktp + sizeof(struct ktr_syscall));
158 for (i = 0; i < (argsize / sizeof(*argp)); i++)
159 *argp++ = args[i];
160 kth.ktr_buf = (caddr_t)ktp;
161 kth.ktr_len = len;
162 (void) ktrwrite(p, &kth);
163 free(ktp, M_TEMP);
164 p->p_traceflag &= ~KTRFAC_ACTIVE;
165 }
166
167 void
168 ktrsysret(p, code, error, retval)
169 struct proc *p;
170 register_t code;
171 int error;
172 register_t retval;
173 {
174 struct ktr_header kth;
175 struct ktr_sysret ktp;
176
177 p->p_traceflag |= KTRFAC_ACTIVE;
178 ktrinitheader(&kth, p, KTR_SYSRET);
179 ktp.ktr_code = code;
180 ktp.ktr_eosys = 0; /* XXX unused */
181 ktp.ktr_error = error;
182 ktp.ktr_retval = retval; /* what about val2 ? */
183
184 kth.ktr_buf = (caddr_t)&ktp;
185 kth.ktr_len = sizeof(struct ktr_sysret);
186
187 (void) ktrwrite(p, &kth);
188 p->p_traceflag &= ~KTRFAC_ACTIVE;
189 }
190
191 void
192 ktrnamei(p, path)
193 struct proc *p;
194 char *path;
195 {
196 struct ktr_header kth;
197
198 p->p_traceflag |= KTRFAC_ACTIVE;
199 ktrinitheader(&kth, p, KTR_NAMEI);
200 kth.ktr_len = strlen(path);
201 kth.ktr_buf = path;
202
203 (void) ktrwrite(p, &kth);
204 p->p_traceflag &= ~KTRFAC_ACTIVE;
205 }
206
207 void
208 ktremul(p)
209 struct proc *p;
210 {
211 struct ktr_header kth;
212 const char *emul = p->p_emul->e_name;
213
214 p->p_traceflag |= KTRFAC_ACTIVE;
215 ktrinitheader(&kth, p, KTR_EMUL);
216 kth.ktr_len = strlen(emul);
217 kth.ktr_buf = (caddr_t)emul;
218
219 (void) ktrwrite(p, &kth);
220 p->p_traceflag &= ~KTRFAC_ACTIVE;
221 }
222
223 void
224 ktrgenio(p, fd, rw, iov, len, error)
225 struct proc *p;
226 int fd;
227 enum uio_rw rw;
228 struct iovec *iov;
229 int len;
230 int error;
231 {
232 struct ktr_header kth;
233 struct ktr_genio *ktp;
234 caddr_t cp;
235 int resid = len, cnt;
236 int buflen;
237
238 if (error)
239 return;
240
241 p->p_traceflag |= KTRFAC_ACTIVE;
242
243 buflen = min(PAGE_SIZE, len + sizeof(struct ktr_genio));
244
245 ktrinitheader(&kth, p, KTR_GENIO);
246 ktp = malloc(buflen, M_TEMP, M_WAITOK);
247 ktp->ktr_fd = fd;
248 ktp->ktr_rw = rw;
249
250 kth.ktr_buf = (caddr_t)ktp;
251
252 cp = (caddr_t)((char *)ktp + sizeof(struct ktr_genio));
253 buflen -= sizeof(struct ktr_genio);
254
255 while (resid > 0) {
256 #if 0 /* XXX NJWLWP */
257 KDASSERT(p->p_cpu != NULL);
258 KDASSERT(p->p_cpu == curcpu());
259 #endif
260 /* XXX NJWLWP */
261 if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
262 preempt(1);
263
264 cnt = min(iov->iov_len, buflen);
265 if (cnt > resid)
266 cnt = resid;
267 if (copyin(iov->iov_base, cp, cnt))
268 break;
269
270 kth.ktr_len = cnt + sizeof(struct ktr_genio);
271
272 if (__predict_false(ktrwrite(p, &kth) != 0))
273 break;
274
275 iov->iov_base = (caddr_t)iov->iov_base + cnt;
276 iov->iov_len -= cnt;
277
278 if (iov->iov_len == 0)
279 iov++;
280
281 resid -= cnt;
282 }
283
284 free(ktp, M_TEMP);
285 p->p_traceflag &= ~KTRFAC_ACTIVE;
286 }
287
288 void
289 ktrpsig(p, sig, action, mask, code)
290 struct proc *p;
291 int sig;
292 sig_t action;
293 sigset_t *mask;
294 int code;
295 {
296 struct ktr_header kth;
297 struct ktr_psig kp;
298
299 p->p_traceflag |= KTRFAC_ACTIVE;
300 ktrinitheader(&kth, p, KTR_PSIG);
301 kp.signo = (char)sig;
302 kp.action = action;
303 kp.mask = *mask;
304 kp.code = code;
305 kth.ktr_buf = (caddr_t)&kp;
306 kth.ktr_len = sizeof(struct ktr_psig);
307
308 (void) ktrwrite(p, &kth);
309 p->p_traceflag &= ~KTRFAC_ACTIVE;
310 }
311
312 void
313 ktrcsw(p, out, user)
314 struct proc *p;
315 int out;
316 int user;
317 {
318 struct ktr_header kth;
319 struct ktr_csw kc;
320
321 p->p_traceflag |= KTRFAC_ACTIVE;
322 ktrinitheader(&kth, p, KTR_CSW);
323 kc.out = out;
324 kc.user = user;
325 kth.ktr_buf = (caddr_t)&kc;
326 kth.ktr_len = sizeof(struct ktr_csw);
327
328 (void) ktrwrite(p, &kth);
329 p->p_traceflag &= ~KTRFAC_ACTIVE;
330 }
331
332 void
333 ktruser(p, id, addr, len, ustr)
334 struct proc *p;
335 const char *id;
336 void *addr;
337 size_t len;
338 int ustr;
339 {
340 struct ktr_header kth;
341 struct ktr_user *ktp;
342 caddr_t user_dta;
343
344 p->p_traceflag |= KTRFAC_ACTIVE;
345 ktrinitheader(&kth, p, KTR_USER);
346 ktp = malloc(sizeof(struct ktr_user) + len, M_TEMP, M_WAITOK);
347 if (ustr) {
348 if (copyinstr(id, ktp->ktr_id, KTR_USER_MAXIDLEN, NULL) != 0)
349 ktp->ktr_id[0] = '\0';
350 } else
351 strncpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN);
352 ktp->ktr_id[KTR_USER_MAXIDLEN-1] = '\0';
353
354 user_dta = (caddr_t) ((char *)ktp + sizeof(struct ktr_user));
355 if (copyin(addr, (void *) user_dta, len) != 0)
356 len = 0;
357
358 kth.ktr_buf = (void *)ktp;
359 kth.ktr_len = sizeof(struct ktr_user) + len;
360 (void) ktrwrite(p, &kth);
361
362 free(ktp, M_TEMP);
363 p->p_traceflag &= ~KTRFAC_ACTIVE;
364
365 }
366
367 void
368 ktrmmsg(p, msgh, size)
369 struct proc *p;
370 const void *msgh;
371 size_t size;
372 {
373 struct ktr_header kth;
374 struct ktr_mmsg *kp;
375
376 p->p_traceflag |= KTRFAC_ACTIVE;
377 ktrinitheader(&kth, p, KTR_MMSG);
378
379 kp = (struct ktr_mmsg *)msgh;
380 kth.ktr_buf = (caddr_t)kp;
381 kth.ktr_len = size;
382 (void) ktrwrite(p, &kth);
383 p->p_traceflag &= ~KTRFAC_ACTIVE;
384 }
385
386 /* Interface and common routines */
387
388 int
389 ktrace_common(curp, ops, facs, pid, fp)
390 struct proc *curp;
391 int ops;
392 int facs;
393 int pid;
394 struct file *fp;
395 {
396 int ret = 0;
397 int error = 0;
398 int one = 1;
399 int descend;
400 struct proc *p;
401 struct pgrp *pg;
402
403 curp->p_traceflag |= KTRFAC_ACTIVE;
404 descend = ops & KTRFLAG_DESCEND;
405 facs = facs & ~((unsigned) KTRFAC_ROOT);
406
407 /*
408 * Clear all uses of the tracefile
409 */
410 if (KTROP(ops) == KTROP_CLEARFILE) {
411 proclist_lock_read();
412 for (p = LIST_FIRST(&allproc); p != NULL;
413 p = LIST_NEXT(p, p_list)) {
414 if (ktrsamefile(p->p_tracep, fp)) {
415 if (ktrcanset(curp, p))
416 ktrderef(p);
417 else
418 error = EPERM;
419 }
420 }
421 proclist_unlock_read();
422 goto done;
423 }
424
425 /*
426 * Mark fp non-blocking, to avoid problems from possible deadlocks.
427 */
428
429 if (fp != NULL) {
430 fp->f_flag |= FNONBLOCK;
431 (*fp->f_ops->fo_ioctl)(fp, FIONBIO, (caddr_t)&one, curp);
432 }
433
434 /*
435 * need something to (un)trace (XXX - why is this here?)
436 */
437 if (!facs) {
438 error = EINVAL;
439 goto done;
440 }
441 /*
442 * do it
443 */
444 if (pid < 0) {
445 /*
446 * by process group
447 */
448 pg = pgfind(-pid);
449 if (pg == NULL) {
450 error = ESRCH;
451 goto done;
452 }
453 for (p = LIST_FIRST(&pg->pg_members); p != NULL;
454 p = LIST_NEXT(p, p_pglist)) {
455 if (descend)
456 ret |= ktrsetchildren(curp, p, ops, facs, fp);
457 else
458 ret |= ktrops(curp, p, ops, facs, fp);
459 }
460
461 } else {
462 /*
463 * by pid
464 */
465 p = pfind(pid);
466 if (p == NULL) {
467 error = ESRCH;
468 goto done;
469 }
470 if (descend)
471 ret |= ktrsetchildren(curp, p, ops, facs, fp);
472 else
473 ret |= ktrops(curp, p, ops, facs, fp);
474 }
475 if (!ret)
476 error = EPERM;
477 done:
478 curp->p_traceflag &= ~KTRFAC_ACTIVE;
479 return (error);
480 }
481
482 /*
483 * ktrace system call
484 */
485 /* ARGSUSED */
486 int
487 sys_fktrace(l, v, retval)
488 struct lwp *l;
489 void *v;
490 register_t *retval;
491 {
492 struct sys_fktrace_args /* {
493 syscallarg(int) fd;
494 syscallarg(int) ops;
495 syscallarg(int) facs;
496 syscallarg(int) pid;
497 } */ *uap = v;
498 struct proc *curp = l->l_proc;
499 struct file *fp = NULL;
500 struct filedesc *fdp = curp->p_fd;
501
502 if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL)
503 return (EBADF);
504
505 if ((fp->f_flag & FWRITE) == 0)
506 return (EBADF);
507
508 return ktrace_common(curp, SCARG(uap, ops),
509 SCARG(uap, facs), SCARG(uap, pid), fp);
510 }
511
512 /*
513 * ktrace system call
514 */
515 /* ARGSUSED */
516 int
517 sys_ktrace(l, v, retval)
518 struct lwp *l;
519 void *v;
520 register_t *retval;
521 {
522 struct sys_ktrace_args /* {
523 syscallarg(const char *) fname;
524 syscallarg(int) ops;
525 syscallarg(int) facs;
526 syscallarg(int) pid;
527 } */ *uap = v;
528 struct proc *curp = l->l_proc;
529 struct vnode *vp = NULL;
530 struct file *fp = NULL;
531 int fd;
532 int ops = SCARG(uap, ops);
533 int error = 0;
534 struct nameidata nd;
535
536 ops = KTROP(ops) | (ops & KTRFLAG_DESCEND);
537
538 curp->p_traceflag |= KTRFAC_ACTIVE;
539 if (ops != KTROP_CLEAR) {
540 /*
541 * an operation which requires a file argument.
542 */
543 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, fname),
544 curp);
545 if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) {
546 curp->p_traceflag &= ~KTRFAC_ACTIVE;
547 return (error);
548 }
549 vp = nd.ni_vp;
550 VOP_UNLOCK(vp, 0);
551 if (vp->v_type != VREG) {
552 (void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp);
553 curp->p_traceflag &= ~KTRFAC_ACTIVE;
554 return (EACCES);
555 }
556 /*
557 * XXX This uses up a file descriptor slot in the
558 * tracing process for the duration of this syscall.
559 * This is not expected to be a problem. If
560 * falloc(NULL, ...) DTRT we could skip that part, but
561 * that would require changing its interface to allow
562 * the caller to pass in a ucred..
563 *
564 * This will FILE_USE the fp it returns, if any.
565 * Keep it in use until we return.
566 */
567 if ((error = falloc(curp, &fp, &fd)) != 0)
568 goto done;
569
570 fp->f_flag = FWRITE|FAPPEND;
571 fp->f_type = DTYPE_VNODE;
572 fp->f_ops = &vnops;
573 fp->f_data = (caddr_t)vp;
574 FILE_SET_MATURE(fp);
575 vp = NULL;
576 }
577 error = ktrace_common(curp, SCARG(uap, ops), SCARG(uap, facs),
578 SCARG(uap, pid), fp);
579 done:
580 if (vp != NULL)
581 (void) vn_close(vp, FWRITE, curp->p_ucred, curp);
582 if (fp != NULL) {
583 FILE_UNUSE(fp, curp); /* release file */
584 fdrelease(curp, fd); /* release fd table slot */
585 }
586 return (error);
587 }
588
589 int
590 ktrops(curp, p, ops, facs, fp)
591 struct proc *curp;
592 struct proc *p;
593 int ops;
594 int facs;
595 struct file *fp;
596 {
597
598 if (!ktrcanset(curp, p))
599 return (0);
600 if (KTROP(ops) == KTROP_SET) {
601 if (p->p_tracep != fp) {
602 /*
603 * if trace file already in use, relinquish
604 */
605 ktrderef(p);
606 p->p_tracep = fp;
607 ktradref(p);
608 }
609 p->p_traceflag |= facs;
610 if (curp->p_ucred->cr_uid == 0)
611 p->p_traceflag |= KTRFAC_ROOT;
612 } else {
613 /* KTROP_CLEAR */
614 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
615 /* no more tracing */
616 ktrderef(p);
617 }
618 }
619
620 /*
621 * Emit an emulation record, every time there is a ktrace
622 * change/attach request.
623 */
624 if (KTRPOINT(p, KTR_EMUL))
625 ktremul(p);
626 #ifdef __HAVE_SYSCALL_INTERN
627 (*p->p_emul->e_syscall_intern)(p);
628 #endif
629
630 return (1);
631 }
632
633 int
634 ktrsetchildren(curp, top, ops, facs, fp)
635 struct proc *curp;
636 struct proc *top;
637 int ops;
638 int facs;
639 struct file *fp;
640 {
641 struct proc *p;
642 int ret = 0;
643
644 p = top;
645 for (;;) {
646 ret |= ktrops(curp, p, ops, facs, fp);
647 /*
648 * If this process has children, descend to them next,
649 * otherwise do any siblings, and if done with this level,
650 * follow back up the tree (but not past top).
651 */
652 if (LIST_FIRST(&p->p_children) != NULL)
653 p = LIST_FIRST(&p->p_children);
654 else for (;;) {
655 if (p == top)
656 return (ret);
657 if (LIST_NEXT(p, p_sibling) != NULL) {
658 p = LIST_NEXT(p, p_sibling);
659 break;
660 }
661 p = p->p_pptr;
662 }
663 }
664 /*NOTREACHED*/
665 }
666
667 int
668 ktrwrite(p, kth)
669 struct proc *p;
670 struct ktr_header *kth;
671 {
672 struct uio auio;
673 struct iovec aiov[2];
674 int error, tries;
675 struct file *fp = p->p_tracep;
676
677 if (fp == NULL)
678 return 0;
679
680 auio.uio_iov = &aiov[0];
681 auio.uio_offset = 0;
682 auio.uio_segflg = UIO_SYSSPACE;
683 auio.uio_rw = UIO_WRITE;
684 aiov[0].iov_base = (caddr_t)kth;
685 aiov[0].iov_len = sizeof(struct ktr_header);
686 auio.uio_resid = sizeof(struct ktr_header);
687 auio.uio_iovcnt = 1;
688 auio.uio_procp = (struct proc *)0;
689 if (kth->ktr_len > 0) {
690 auio.uio_iovcnt++;
691 aiov[1].iov_base = kth->ktr_buf;
692 aiov[1].iov_len = kth->ktr_len;
693 auio.uio_resid += kth->ktr_len;
694 }
695
696 simple_lock(&fp->f_slock);
697 FILE_USE(fp);
698
699 tries = 0;
700 do {
701 error = (*fp->f_ops->fo_write)(fp, &fp->f_offset, &auio,
702 fp->f_cred, FOF_UPDATE_OFFSET);
703 tries++;
704 if (error == EWOULDBLOCK)
705 preempt(1);
706 } while ((error == EWOULDBLOCK) && (tries < 3));
707 FILE_UNUSE(fp, NULL);
708
709 if (__predict_true(error == 0))
710 return (0);
711 /*
712 * If error encountered, give up tracing on this vnode. Don't report
713 * EPIPE as this can easily happen with fktrace()/ktruss.
714 */
715 if (error != EPIPE)
716 log(LOG_NOTICE,
717 "ktrace write failed, errno %d, tracing stopped\n",
718 error);
719 proclist_lock_read();
720 for (p = LIST_FIRST(&allproc); p != NULL; p = LIST_NEXT(p, p_list)) {
721 if (ktrsamefile(p->p_tracep, fp))
722 ktrderef(p);
723 }
724 proclist_unlock_read();
725
726 return (error);
727 }
728
729 /*
730 * Return true if caller has permission to set the ktracing state
731 * of target. Essentially, the target can't possess any
732 * more permissions than the caller. KTRFAC_ROOT signifies that
733 * root previously set the tracing status on the target process, and
734 * so, only root may further change it.
735 *
736 * TODO: check groups. use caller effective gid.
737 */
738 int
739 ktrcanset(callp, targetp)
740 struct proc *callp;
741 struct proc *targetp;
742 {
743 struct pcred *caller = callp->p_cred;
744 struct pcred *target = targetp->p_cred;
745
746 if ((caller->pc_ucred->cr_uid == target->p_ruid &&
747 target->p_ruid == target->p_svuid &&
748 caller->p_rgid == target->p_rgid && /* XXX */
749 target->p_rgid == target->p_svgid &&
750 (targetp->p_traceflag & KTRFAC_ROOT) == 0 &&
751 (targetp->p_flag & P_SUGID) == 0) ||
752 caller->pc_ucred->cr_uid == 0)
753 return (1);
754
755 return (0);
756 }
757 #endif /* KTRACE */
758
759 /*
760 * Put user defined entry to ktrace records.
761 */
762 int
763 sys_utrace(l, v, retval)
764 struct lwp *l;
765 void *v;
766 register_t *retval;
767 {
768 #ifdef KTRACE
769 struct sys_utrace_args /* {
770 syscallarg(const char *) label;
771 syscallarg(void *) addr;
772 syscallarg(size_t) len;
773 } */ *uap = v;
774 struct proc *p = l->l_proc;
775 if (!KTRPOINT(p, KTR_USER))
776 return (0);
777
778 if (SCARG(uap, len) > KTR_USER_MAXLEN)
779 return (EINVAL);
780
781 ktruser(p, SCARG(uap, label), SCARG(uap, addr), SCARG(uap, len), 1);
782
783 return (0);
784 #else /* !KTRACE */
785 return ENOSYS;
786 #endif /* KTRACE */
787 }
788