kern_ktrace.c revision 1.16 1 /* $NetBSD: kern_ktrace.c,v 1.16 1995/03/09 08:55:47 mycroft Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
36 */
37
38 #ifdef KTRACE
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/file.h>
44 #include <sys/namei.h>
45 #include <sys/vnode.h>
46 #include <sys/ktrace.h>
47 #include <sys/malloc.h>
48 #include <sys/syslog.h>
49
50 #include <sys/mount.h>
51 #include <sys/syscallargs.h>
52
53 struct ktr_header *
54 ktrgetheader(type)
55 int type;
56 {
57 register struct ktr_header *kth;
58 struct proc *p = curproc; /* XXX */
59
60 MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header),
61 M_TEMP, M_WAITOK);
62 kth->ktr_type = type;
63 microtime(&kth->ktr_time);
64 kth->ktr_pid = p->p_pid;
65 bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN);
66 return (kth);
67 }
68
69 ktrsyscall(vp, code, narg, argsize, args)
70 struct vnode *vp;
71 register_t code;
72 int narg;
73 size_t argsize;
74 register_t args[];
75 {
76 struct ktr_header *kth;
77 struct ktr_syscall *ktp;
78 register len = sizeof(struct ktr_syscall) + argsize;
79 struct proc *p = curproc; /* XXX */
80 int *argp, i;
81
82 p->p_traceflag |= KTRFAC_ACTIVE;
83 kth = ktrgetheader(KTR_SYSCALL);
84 MALLOC(ktp, struct ktr_syscall *, len, M_TEMP, M_WAITOK);
85 ktp->ktr_code = code;
86 ktp->ktr_narg = narg;
87 argp = (int *)((char *)ktp + sizeof(struct ktr_syscall));
88 for (i = 0; i < (argsize / sizeof *argp); i++)
89 *argp++ = args[i];
90 kth->ktr_buf = (caddr_t)ktp;
91 kth->ktr_len = len;
92 ktrwrite(vp, kth);
93 FREE(ktp, M_TEMP);
94 FREE(kth, M_TEMP);
95 p->p_traceflag &= ~KTRFAC_ACTIVE;
96 }
97
98 ktrsysret(vp, code, error, retval)
99 struct vnode *vp;
100 register_t code;
101 int error;
102 register_t retval;
103 {
104 struct ktr_header *kth;
105 struct ktr_sysret ktp;
106 struct proc *p = curproc; /* XXX */
107
108 p->p_traceflag |= KTRFAC_ACTIVE;
109 kth = ktrgetheader(KTR_SYSRET);
110 ktp.ktr_code = code;
111 ktp.ktr_error = error;
112 ktp.ktr_retval = retval; /* what about val2 ? */
113
114 kth->ktr_buf = (caddr_t)&ktp;
115 kth->ktr_len = sizeof(struct ktr_sysret);
116
117 ktrwrite(vp, kth);
118 FREE(kth, M_TEMP);
119 p->p_traceflag &= ~KTRFAC_ACTIVE;
120 }
121
122 ktrnamei(vp, path)
123 struct vnode *vp;
124 char *path;
125 {
126 struct ktr_header *kth;
127 struct proc *p = curproc; /* XXX */
128
129 p->p_traceflag |= KTRFAC_ACTIVE;
130 kth = ktrgetheader(KTR_NAMEI);
131 kth->ktr_len = strlen(path);
132 kth->ktr_buf = path;
133
134 ktrwrite(vp, kth);
135 FREE(kth, M_TEMP);
136 p->p_traceflag &= ~KTRFAC_ACTIVE;
137 }
138
139 ktrgenio(vp, fd, rw, iov, len, error)
140 struct vnode *vp;
141 int fd;
142 enum uio_rw rw;
143 register struct iovec *iov;
144 int len, error;
145 {
146 struct ktr_header *kth;
147 register struct ktr_genio *ktp;
148 register caddr_t cp;
149 register int resid = len, cnt;
150 struct proc *p = curproc; /* XXX */
151
152 if (error)
153 return;
154 p->p_traceflag |= KTRFAC_ACTIVE;
155 kth = ktrgetheader(KTR_GENIO);
156 MALLOC(ktp, struct ktr_genio *, sizeof(struct ktr_genio) + len,
157 M_TEMP, M_WAITOK);
158 ktp->ktr_fd = fd;
159 ktp->ktr_rw = rw;
160 cp = (caddr_t)((char *)ktp + sizeof (struct ktr_genio));
161 while (resid > 0) {
162 if ((cnt = iov->iov_len) > resid)
163 cnt = resid;
164 if (copyin(iov->iov_base, cp, (unsigned)cnt))
165 goto done;
166 cp += cnt;
167 resid -= cnt;
168 iov++;
169 }
170 kth->ktr_buf = (caddr_t)ktp;
171 kth->ktr_len = sizeof (struct ktr_genio) + len;
172
173 ktrwrite(vp, kth);
174 done:
175 FREE(kth, M_TEMP);
176 FREE(ktp, M_TEMP);
177 p->p_traceflag &= ~KTRFAC_ACTIVE;
178 }
179
180 ktrpsig(vp, sig, action, mask, code)
181 struct vnode *vp;
182 int sig;
183 sig_t action;
184 int mask, code;
185 {
186 struct ktr_header *kth;
187 struct ktr_psig kp;
188 struct proc *p = curproc; /* XXX */
189
190 p->p_traceflag |= KTRFAC_ACTIVE;
191 kth = ktrgetheader(KTR_PSIG);
192 kp.signo = (char)sig;
193 kp.action = action;
194 kp.mask = mask;
195 kp.code = code;
196 kth->ktr_buf = (caddr_t)&kp;
197 kth->ktr_len = sizeof (struct ktr_psig);
198
199 ktrwrite(vp, kth);
200 FREE(kth, M_TEMP);
201 p->p_traceflag &= ~KTRFAC_ACTIVE;
202 }
203
204 ktrcsw(vp, out, user)
205 struct vnode *vp;
206 int out, user;
207 {
208 struct ktr_header *kth;
209 struct ktr_csw kc;
210 struct proc *p = curproc; /* XXX */
211
212 p->p_traceflag |= KTRFAC_ACTIVE;
213 kth = ktrgetheader(KTR_CSW);
214 kc.out = out;
215 kc.user = user;
216 kth->ktr_buf = (caddr_t)&kc;
217 kth->ktr_len = sizeof (struct ktr_csw);
218
219 ktrwrite(vp, kth);
220 FREE(kth, M_TEMP);
221 p->p_traceflag &= ~KTRFAC_ACTIVE;
222 }
223
224 /* Interface and common routines */
225
226 /*
227 * ktrace system call
228 */
229 /* ARGSUSED */
230 ktrace(curp, uap, retval)
231 struct proc *curp;
232 register struct ktrace_args /* {
233 syscallarg(char *) fname;
234 syscallarg(int) ops;
235 syscallarg(int) facs;
236 syscallarg(int) pid;
237 } */ *uap;
238 register_t *retval;
239 {
240 register struct vnode *vp = NULL;
241 register struct proc *p;
242 struct pgrp *pg;
243 int facs = SCARG(uap, facs) & ~KTRFAC_ROOT;
244 int ops = KTROP(SCARG(uap, ops));
245 int descend = SCARG(uap, ops) & KTRFLAG_DESCEND;
246 int ret = 0;
247 int error = 0;
248 struct nameidata nd;
249
250 curp->p_traceflag |= KTRFAC_ACTIVE;
251 if (ops != KTROP_CLEAR) {
252 /*
253 * an operation which requires a file argument.
254 */
255 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, fname),
256 curp);
257 if (error = vn_open(&nd, FREAD|FWRITE, 0)) {
258 curp->p_traceflag &= ~KTRFAC_ACTIVE;
259 return (error);
260 }
261 vp = nd.ni_vp;
262 VOP_UNLOCK(vp);
263 if (vp->v_type != VREG) {
264 (void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp);
265 curp->p_traceflag &= ~KTRFAC_ACTIVE;
266 return (EACCES);
267 }
268 }
269 /*
270 * Clear all uses of the tracefile
271 */
272 if (ops == KTROP_CLEARFILE) {
273 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
274 if (p->p_tracep == vp) {
275 if (ktrcanset(curp, p)) {
276 p->p_tracep = NULL;
277 p->p_traceflag = 0;
278 (void) vn_close(vp, FREAD|FWRITE,
279 p->p_ucred, p);
280 } else
281 error = EPERM;
282 }
283 }
284 goto done;
285 }
286 /*
287 * need something to (un)trace (XXX - why is this here?)
288 */
289 if (!facs) {
290 error = EINVAL;
291 goto done;
292 }
293 /*
294 * do it
295 */
296 if (SCARG(uap, pid) < 0) {
297 /*
298 * by process group
299 */
300 pg = pgfind(-SCARG(uap, pid));
301 if (pg == NULL) {
302 error = ESRCH;
303 goto done;
304 }
305 for (p = pg->pg_members.lh_first; p != 0; p = p->p_pglist.le_next)
306 if (descend)
307 ret |= ktrsetchildren(curp, p, ops, facs, vp);
308 else
309 ret |= ktrops(curp, p, ops, facs, vp);
310
311 } else {
312 /*
313 * by pid
314 */
315 p = pfind(SCARG(uap, pid));
316 if (p == NULL) {
317 error = ESRCH;
318 goto done;
319 }
320 if (descend)
321 ret |= ktrsetchildren(curp, p, ops, facs, vp);
322 else
323 ret |= ktrops(curp, p, ops, facs, vp);
324 }
325 if (!ret)
326 error = EPERM;
327 done:
328 if (vp != NULL)
329 (void) vn_close(vp, FWRITE, curp->p_ucred, curp);
330 curp->p_traceflag &= ~KTRFAC_ACTIVE;
331 return (error);
332 }
333
334 int
335 ktrops(curp, p, ops, facs, vp)
336 struct proc *p, *curp;
337 int ops, facs;
338 struct vnode *vp;
339 {
340
341 if (!ktrcanset(curp, p))
342 return (0);
343 if (ops == KTROP_SET) {
344 if (p->p_tracep != vp) {
345 /*
346 * if trace file already in use, relinquish
347 */
348 if (p->p_tracep != NULL)
349 vrele(p->p_tracep);
350 VREF(vp);
351 p->p_tracep = vp;
352 }
353 p->p_traceflag |= facs;
354 if (curp->p_ucred->cr_uid == 0)
355 p->p_traceflag |= KTRFAC_ROOT;
356 } else {
357 /* KTROP_CLEAR */
358 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
359 /* no more tracing */
360 p->p_traceflag = 0;
361 if (p->p_tracep != NULL) {
362 vrele(p->p_tracep);
363 p->p_tracep = NULL;
364 }
365 }
366 }
367
368 return (1);
369 }
370
371 ktrsetchildren(curp, top, ops, facs, vp)
372 struct proc *curp, *top;
373 int ops, facs;
374 struct vnode *vp;
375 {
376 register struct proc *p;
377 register int ret = 0;
378
379 p = top;
380 for (;;) {
381 ret |= ktrops(curp, p, ops, facs, vp);
382 /*
383 * If this process has children, descend to them next,
384 * otherwise do any siblings, and if done with this level,
385 * follow back up the tree (but not past top).
386 */
387 if (p->p_children.lh_first)
388 p = p->p_children.lh_first;
389 else for (;;) {
390 if (p == top)
391 return (ret);
392 if (p->p_sibling.le_next) {
393 p = p->p_sibling.le_next;
394 break;
395 }
396 p = p->p_pptr;
397 }
398 }
399 /*NOTREACHED*/
400 }
401
402 ktrwrite(vp, kth)
403 struct vnode *vp;
404 register struct ktr_header *kth;
405 {
406 struct uio auio;
407 struct iovec aiov[2];
408 register struct proc *p = curproc; /* XXX */
409 int error;
410
411 if (vp == NULL)
412 return;
413 auio.uio_iov = &aiov[0];
414 auio.uio_offset = 0;
415 auio.uio_segflg = UIO_SYSSPACE;
416 auio.uio_rw = UIO_WRITE;
417 aiov[0].iov_base = (caddr_t)kth;
418 aiov[0].iov_len = sizeof(struct ktr_header);
419 auio.uio_resid = sizeof(struct ktr_header);
420 auio.uio_iovcnt = 1;
421 auio.uio_procp = (struct proc *)0;
422 if (kth->ktr_len > 0) {
423 auio.uio_iovcnt++;
424 aiov[1].iov_base = kth->ktr_buf;
425 aiov[1].iov_len = kth->ktr_len;
426 auio.uio_resid += kth->ktr_len;
427 }
428 VOP_LOCK(vp);
429 error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, p->p_ucred);
430 VOP_UNLOCK(vp);
431 if (!error)
432 return;
433 /*
434 * If error encountered, give up tracing on this vnode.
435 */
436 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
437 error);
438 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
439 if (p->p_tracep == vp) {
440 p->p_tracep = NULL;
441 p->p_traceflag = 0;
442 vrele(vp);
443 }
444 }
445 }
446
447 /*
448 * Return true if caller has permission to set the ktracing state
449 * of target. Essentially, the target can't possess any
450 * more permissions than the caller. KTRFAC_ROOT signifies that
451 * root previously set the tracing status on the target process, and
452 * so, only root may further change it.
453 *
454 * TODO: check groups. use caller effective gid.
455 */
456 ktrcanset(callp, targetp)
457 struct proc *callp, *targetp;
458 {
459 register struct pcred *caller = callp->p_cred;
460 register struct pcred *target = targetp->p_cred;
461
462 if ((caller->pc_ucred->cr_uid == target->p_ruid &&
463 target->p_ruid == target->p_svuid &&
464 caller->p_rgid == target->p_rgid && /* XXX */
465 target->p_rgid == target->p_svgid &&
466 (targetp->p_traceflag & KTRFAC_ROOT) == 0) ||
467 caller->pc_ucred->cr_uid == 0)
468 return (1);
469
470 return (0);
471 }
472
473 #endif
474