kern_sysctl.c revision 1.86.2.2 1 /* $NetBSD: kern_sysctl.c,v 1.86.2.2 2001/03/29 01:09:11 nathanw Exp $ */
2
3 /*-
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Mike Karels at Berkeley Software Design, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_sysctl.c 8.9 (Berkeley) 5/20/95
39 */
40
41 /*
42 * sysctl system call.
43 */
44
45 #include "opt_ddb.h"
46 #include "opt_insecure.h"
47 #include "opt_defcorename.h"
48 #include "opt_sysv.h"
49 #include "pty.h"
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/buf.h>
55 #include <sys/device.h>
56 #include <sys/disklabel.h>
57 #include <sys/dkstat.h>
58 #include <sys/exec.h>
59 #include <sys/file.h>
60 #include <sys/ioctl.h>
61 #include <sys/malloc.h>
62 #include <sys/mount.h>
63 #include <sys/msgbuf.h>
64 #include <sys/pool.h>
65 #include <sys/lwp.h>
66 #include <sys/proc.h>
67 #include <sys/resource.h>
68 #include <sys/resourcevar.h>
69 #include <sys/syscallargs.h>
70 #include <sys/tty.h>
71 #include <sys/unistd.h>
72 #include <sys/vnode.h>
73 #define __SYSCTL_PRIVATE
74 #include <sys/sysctl.h>
75 #include <sys/lock.h>
76
77 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
78 #include <sys/ipc.h>
79 #endif
80 #ifdef SYSVMSG
81 #include <sys/msg.h>
82 #endif
83 #ifdef SYSVSEM
84 #include <sys/sem.h>
85 #endif
86 #ifdef SYSVSHM
87 #include <sys/shm.h>
88 #endif
89
90 #include <dev/cons.h>
91
92 #if defined(DDB)
93 #include <ddb/ddbvar.h>
94 #endif
95
96 #define PTRTOINT64(foo) ((u_int64_t)(uintptr_t)(foo))
97
98 static int sysctl_file __P((void *, size_t *));
99 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
100 static int sysctl_sysvipc __P((int *, u_int, void *, size_t *));
101 #endif
102 static int sysctl_msgbuf __P((void *, size_t *));
103 static int sysctl_doeproc __P((int *, u_int, void *, size_t *));
104 #ifdef MULTIPROCESSOR
105 static int sysctl_docptime __P((void *, size_t *, void *));
106 static int sysctl_ncpus __P((void));
107 #endif
108 static void fill_kproc2 __P((struct proc *, struct kinfo_proc2 *));
109 static int sysctl_procargs __P((int *, u_int, void *, size_t *, struct proc *));
110 #if NPTY > 0
111 static int sysctl_pty __P((void *, size_t *, void *, size_t));
112 #endif
113
114 static struct lwp *proc_representative_lwp(struct proc *);
115
116 /*
117 * The `sysctl_memlock' is intended to keep too many processes from
118 * locking down memory by doing sysctls at once. Whether or not this
119 * is really a good idea to worry about it probably a subject of some
120 * debate.
121 */
122 struct lock sysctl_memlock;
123
124 void
125 sysctl_init(void)
126 {
127
128 lockinit(&sysctl_memlock, PRIBIO|PCATCH, "sysctl", 0, 0);
129 }
130
131 int
132 sys___sysctl(l, v, retval)
133 struct lwp *l;
134 void *v;
135 register_t *retval;
136 {
137 struct sys___sysctl_args /* {
138 syscallarg(int *) name;
139 syscallarg(u_int) namelen;
140 syscallarg(void *) old;
141 syscallarg(size_t *) oldlenp;
142 syscallarg(void *) new;
143 syscallarg(size_t) newlen;
144 } */ *uap = v;
145 struct proc *p = l->l_proc;
146 int error;
147 size_t savelen = 0, oldlen = 0;
148 sysctlfn *fn;
149 int name[CTL_MAXNAME];
150 size_t *oldlenp;
151
152 /*
153 * all top-level sysctl names are non-terminal
154 */
155 if (SCARG(uap, namelen) > CTL_MAXNAME || SCARG(uap, namelen) < 2)
156 return (EINVAL);
157 error = copyin(SCARG(uap, name), &name,
158 SCARG(uap, namelen) * sizeof(int));
159 if (error)
160 return (error);
161
162 /*
163 * For all but CTL_PROC, must be root to change a value.
164 * For CTL_PROC, must be root, or owner of the proc (and not suid),
165 * this is checked in proc_sysctl() (once we know the targer proc).
166 */
167 if (SCARG(uap, new) != NULL && name[0] != CTL_PROC &&
168 (error = suser(p->p_ucred, &p->p_acflag)))
169 return error;
170
171 switch (name[0]) {
172 case CTL_KERN:
173 fn = kern_sysctl;
174 break;
175 case CTL_HW:
176 fn = hw_sysctl;
177 break;
178 case CTL_VM:
179 fn = uvm_sysctl;
180 break;
181 case CTL_NET:
182 fn = net_sysctl;
183 break;
184 case CTL_VFS:
185 fn = vfs_sysctl;
186 break;
187 case CTL_MACHDEP:
188 fn = cpu_sysctl;
189 break;
190 #ifdef DEBUG
191 case CTL_DEBUG:
192 fn = debug_sysctl;
193 break;
194 #endif
195 #ifdef DDB
196 case CTL_DDB:
197 fn = ddb_sysctl;
198 break;
199 #endif
200 case CTL_PROC:
201 fn = proc_sysctl;
202 break;
203 default:
204 return (EOPNOTSUPP);
205 }
206
207 /*
208 * XXX Hey, we wire `old', but what about `new'?
209 */
210
211 oldlenp = SCARG(uap, oldlenp);
212 if (oldlenp) {
213 if ((error = copyin(oldlenp, &oldlen, sizeof(oldlen))))
214 return (error);
215 oldlenp = &oldlen;
216 }
217 if (SCARG(uap, old) != NULL) {
218 error = lockmgr(&sysctl_memlock, LK_EXCLUSIVE, NULL);
219 if (error)
220 return (error);
221 if (uvm_vslock(p, SCARG(uap, old), oldlen,
222 VM_PROT_READ|VM_PROT_WRITE) != KERN_SUCCESS) {
223 (void) lockmgr(&sysctl_memlock, LK_RELEASE, NULL);
224 return (EFAULT);
225 }
226 savelen = oldlen;
227 }
228 error = (*fn)(name + 1, SCARG(uap, namelen) - 1, SCARG(uap, old),
229 oldlenp, SCARG(uap, new), SCARG(uap, newlen), p);
230 if (SCARG(uap, old) != NULL) {
231 uvm_vsunlock(p, SCARG(uap, old), savelen);
232 (void) lockmgr(&sysctl_memlock, LK_RELEASE, NULL);
233 }
234 if (error)
235 return (error);
236 if (SCARG(uap, oldlenp))
237 error = copyout(&oldlen, SCARG(uap, oldlenp), sizeof(oldlen));
238 return (error);
239 }
240
241 /*
242 * Attributes stored in the kernel.
243 */
244 char hostname[MAXHOSTNAMELEN];
245 int hostnamelen;
246
247 char domainname[MAXHOSTNAMELEN];
248 int domainnamelen;
249
250 long hostid;
251
252 #ifdef INSECURE
253 int securelevel = -1;
254 #else
255 int securelevel = 0;
256 #endif
257
258 #ifndef DEFCORENAME
259 #define DEFCORENAME "%n.core"
260 #endif
261 char defcorename[MAXPATHLEN] = DEFCORENAME;
262 int defcorenamelen = sizeof(DEFCORENAME);
263
264 extern int kern_logsigexit;
265 extern fixpt_t ccpu;
266
267 #ifndef MULTIPROCESSOR
268 #define sysctl_ncpus() 1
269 #endif
270
271 #ifdef MULTIPROCESSOR
272
273 #ifndef CPU_INFO_FOREACH
274 #define CPU_INFO_ITERATOR int
275 #define CPU_INFO_FOREACH(cii, ci) cii = 0, ci = curcpu(); ci != NULL; ci = NULL
276 #endif
277
278 static int
279 sysctl_docptime(oldp, oldlenp, newp)
280 void *oldp;
281 size_t *oldlenp;
282 void *newp;
283 {
284 u_int64_t cp_time[CPUSTATES];
285 int i;
286 struct cpu_info *ci;
287 CPU_INFO_ITERATOR cii;
288
289 for (i=0; i<CPUSTATES; i++)
290 cp_time[i] = 0;
291
292 for (CPU_INFO_FOREACH(cii, ci)) {
293 for (i=0; i<CPUSTATES; i++)
294 cp_time[i] += ci->ci_schedstate.spc_cp_time[i];
295 }
296 return (sysctl_rdstruct(oldp, oldlenp, newp,
297 cp_time, sizeof(cp_time)));
298 }
299
300 static int
301 sysctl_ncpus(void)
302 {
303 struct cpu_info *ci;
304 CPU_INFO_ITERATOR cii;
305
306 int ncpus = 0;
307 for (CPU_INFO_FOREACH(cii, ci))
308 ncpus++;
309 return ncpus;
310 }
311
312 #endif
313
314 /*
315 * kernel related system variables.
316 */
317 int
318 kern_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
319 int *name;
320 u_int namelen;
321 void *oldp;
322 size_t *oldlenp;
323 void *newp;
324 size_t newlen;
325 struct proc *p;
326 {
327 int error, level, inthostid;
328 int old_autonicetime;
329 int old_vnodes;
330 dev_t consdev;
331
332 /* All sysctl names at this level, except for a few, are terminal. */
333 switch (name[0]) {
334 case KERN_PROC:
335 case KERN_PROC2:
336 case KERN_PROF:
337 case KERN_MBUF:
338 case KERN_PROC_ARGS:
339 case KERN_SYSVIPC_INFO:
340 /* Not terminal. */
341 break;
342 default:
343 if (namelen != 1)
344 return (ENOTDIR); /* overloaded */
345 }
346
347 switch (name[0]) {
348 case KERN_OSTYPE:
349 return (sysctl_rdstring(oldp, oldlenp, newp, ostype));
350 case KERN_OSRELEASE:
351 return (sysctl_rdstring(oldp, oldlenp, newp, osrelease));
352 case KERN_OSREV:
353 return (sysctl_rdint(oldp, oldlenp, newp, __NetBSD_Version__));
354 case KERN_VERSION:
355 return (sysctl_rdstring(oldp, oldlenp, newp, version));
356 case KERN_MAXVNODES:
357 old_vnodes = desiredvnodes;
358 error = sysctl_int(oldp, oldlenp, newp, newlen, &desiredvnodes);
359 if (old_vnodes > desiredvnodes) {
360 desiredvnodes = old_vnodes;
361 return (EINVAL);
362 }
363 return (error);
364 case KERN_MAXPROC:
365 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxproc));
366 case KERN_MAXFILES:
367 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxfiles));
368 case KERN_ARGMAX:
369 return (sysctl_rdint(oldp, oldlenp, newp, ARG_MAX));
370 case KERN_SECURELVL:
371 level = securelevel;
372 if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &level)) ||
373 newp == NULL)
374 return (error);
375 if (level < securelevel && p->p_pid != 1)
376 return (EPERM);
377 securelevel = level;
378 return (0);
379 case KERN_HOSTNAME:
380 error = sysctl_string(oldp, oldlenp, newp, newlen,
381 hostname, sizeof(hostname));
382 if (newp && !error)
383 hostnamelen = newlen;
384 return (error);
385 case KERN_DOMAINNAME:
386 error = sysctl_string(oldp, oldlenp, newp, newlen,
387 domainname, sizeof(domainname));
388 if (newp && !error)
389 domainnamelen = newlen;
390 return (error);
391 case KERN_HOSTID:
392 inthostid = hostid; /* XXX assumes sizeof long <= sizeof int */
393 error = sysctl_int(oldp, oldlenp, newp, newlen, &inthostid);
394 hostid = inthostid;
395 return (error);
396 case KERN_CLOCKRATE:
397 return (sysctl_clockrate(oldp, oldlenp));
398 case KERN_BOOTTIME:
399 return (sysctl_rdstruct(oldp, oldlenp, newp, &boottime,
400 sizeof(struct timeval)));
401 case KERN_VNODE:
402 return (sysctl_vnode(oldp, oldlenp, p));
403 case KERN_PROC:
404 case KERN_PROC2:
405 return (sysctl_doeproc(name, namelen, oldp, oldlenp));
406 case KERN_PROC_ARGS:
407 return (sysctl_procargs(name + 1, namelen - 1,
408 oldp, oldlenp, p));
409 case KERN_FILE:
410 return (sysctl_file(oldp, oldlenp));
411 #ifdef GPROF
412 case KERN_PROF:
413 return (sysctl_doprof(name + 1, namelen - 1, oldp, oldlenp,
414 newp, newlen));
415 #endif
416 case KERN_POSIX1:
417 return (sysctl_rdint(oldp, oldlenp, newp, _POSIX_VERSION));
418 case KERN_NGROUPS:
419 return (sysctl_rdint(oldp, oldlenp, newp, NGROUPS_MAX));
420 case KERN_JOB_CONTROL:
421 return (sysctl_rdint(oldp, oldlenp, newp, 1));
422 case KERN_SAVED_IDS:
423 #ifdef _POSIX_SAVED_IDS
424 return (sysctl_rdint(oldp, oldlenp, newp, 1));
425 #else
426 return (sysctl_rdint(oldp, oldlenp, newp, 0));
427 #endif
428 case KERN_MAXPARTITIONS:
429 return (sysctl_rdint(oldp, oldlenp, newp, MAXPARTITIONS));
430 case KERN_RAWPARTITION:
431 return (sysctl_rdint(oldp, oldlenp, newp, RAW_PART));
432 #ifdef NTP
433 case KERN_NTPTIME:
434 return (sysctl_ntptime(oldp, oldlenp));
435 #endif
436 case KERN_AUTONICETIME:
437 old_autonicetime = autonicetime;
438 error = sysctl_int(oldp, oldlenp, newp, newlen, &autonicetime);
439 if (autonicetime < 0)
440 autonicetime = old_autonicetime;
441 return (error);
442 case KERN_AUTONICEVAL:
443 error = sysctl_int(oldp, oldlenp, newp, newlen, &autoniceval);
444 if (autoniceval < PRIO_MIN)
445 autoniceval = PRIO_MIN;
446 if (autoniceval > PRIO_MAX)
447 autoniceval = PRIO_MAX;
448 return (error);
449 case KERN_RTC_OFFSET:
450 return (sysctl_rdint(oldp, oldlenp, newp, rtc_offset));
451 case KERN_ROOT_DEVICE:
452 return (sysctl_rdstring(oldp, oldlenp, newp,
453 root_device->dv_xname));
454 case KERN_MSGBUFSIZE:
455 /*
456 * deal with cases where the message buffer has
457 * become corrupted.
458 */
459 if (!msgbufenabled || msgbufp->msg_magic != MSG_MAGIC) {
460 msgbufenabled = 0;
461 return (ENXIO);
462 }
463 return (sysctl_rdint(oldp, oldlenp, newp, msgbufp->msg_bufs));
464 case KERN_FSYNC:
465 return (sysctl_rdint(oldp, oldlenp, newp, 1));
466 case KERN_SYSVMSG:
467 #ifdef SYSVMSG
468 return (sysctl_rdint(oldp, oldlenp, newp, 1));
469 #else
470 return (sysctl_rdint(oldp, oldlenp, newp, 0));
471 #endif
472 case KERN_SYSVSEM:
473 #ifdef SYSVSEM
474 return (sysctl_rdint(oldp, oldlenp, newp, 1));
475 #else
476 return (sysctl_rdint(oldp, oldlenp, newp, 0));
477 #endif
478 case KERN_SYSVSHM:
479 #ifdef SYSVSHM
480 return (sysctl_rdint(oldp, oldlenp, newp, 1));
481 #else
482 return (sysctl_rdint(oldp, oldlenp, newp, 0));
483 #endif
484 case KERN_DEFCORENAME:
485 if (newp && newlen < 1)
486 return (EINVAL);
487 error = sysctl_string(oldp, oldlenp, newp, newlen,
488 defcorename, sizeof(defcorename));
489 if (newp && !error)
490 defcorenamelen = newlen;
491 return (error);
492 case KERN_SYNCHRONIZED_IO:
493 return (sysctl_rdint(oldp, oldlenp, newp, 1));
494 case KERN_IOV_MAX:
495 return (sysctl_rdint(oldp, oldlenp, newp, IOV_MAX));
496 case KERN_MBUF:
497 return (sysctl_dombuf(name + 1, namelen - 1, oldp, oldlenp,
498 newp, newlen));
499 case KERN_MAPPED_FILES:
500 return (sysctl_rdint(oldp, oldlenp, newp, 1));
501 case KERN_MEMLOCK:
502 return (sysctl_rdint(oldp, oldlenp, newp, 1));
503 case KERN_MEMLOCK_RANGE:
504 return (sysctl_rdint(oldp, oldlenp, newp, 1));
505 case KERN_MEMORY_PROTECTION:
506 return (sysctl_rdint(oldp, oldlenp, newp, 1));
507 case KERN_LOGIN_NAME_MAX:
508 return (sysctl_rdint(oldp, oldlenp, newp, LOGIN_NAME_MAX));
509 case KERN_LOGSIGEXIT:
510 return (sysctl_int(oldp, oldlenp, newp, newlen,
511 &kern_logsigexit));
512 case KERN_FSCALE:
513 return (sysctl_rdint(oldp, oldlenp, newp, FSCALE));
514 case KERN_CCPU:
515 return (sysctl_rdint(oldp, oldlenp, newp, ccpu));
516 case KERN_CP_TIME:
517 #ifndef MULTIPROCESSOR
518 return (sysctl_rdstruct(oldp, oldlenp, newp,
519 curcpu()->ci_schedstate.spc_cp_time,
520 sizeof(curcpu()->ci_schedstate.spc_cp_time)));
521 #else
522 return (sysctl_docptime(oldp, oldlenp, newp));
523 #endif
524 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
525 case KERN_SYSVIPC_INFO:
526 return (sysctl_sysvipc(name + 1, namelen - 1, oldp, oldlenp));
527 #endif
528 case KERN_MSGBUF:
529 return (sysctl_msgbuf(oldp, oldlenp));
530 case KERN_CONSDEV:
531 if (cn_tab != NULL)
532 consdev = cn_tab->cn_dev;
533 else
534 consdev = NODEV;
535 return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
536 sizeof consdev));
537 #if NPTY > 0
538 case KERN_MAXPTYS:
539 return sysctl_pty(oldp, oldlenp, newp, newlen);
540 #endif
541 default:
542 return (EOPNOTSUPP);
543 }
544 /* NOTREACHED */
545 }
546
547 /*
548 * hardware related system variables.
549 */
550 int
551 hw_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
552 int *name;
553 u_int namelen;
554 void *oldp;
555 size_t *oldlenp;
556 void *newp;
557 size_t newlen;
558 struct proc *p;
559 {
560
561 /* all sysctl names at this level are terminal */
562 if (namelen != 1)
563 return (ENOTDIR); /* overloaded */
564
565 switch (name[0]) {
566 case HW_MACHINE:
567 return (sysctl_rdstring(oldp, oldlenp, newp, machine));
568 case HW_MACHINE_ARCH:
569 return (sysctl_rdstring(oldp, oldlenp, newp, machine_arch));
570 case HW_MODEL:
571 return (sysctl_rdstring(oldp, oldlenp, newp, cpu_model));
572 case HW_NCPU:
573 return (sysctl_rdint(oldp, oldlenp, newp, sysctl_ncpus()));
574 case HW_BYTEORDER:
575 return (sysctl_rdint(oldp, oldlenp, newp, BYTE_ORDER));
576 case HW_PHYSMEM:
577 return (sysctl_rdint(oldp, oldlenp, newp, ctob(physmem)));
578 case HW_USERMEM:
579 return (sysctl_rdint(oldp, oldlenp, newp,
580 ctob(physmem - uvmexp.wired)));
581 case HW_PAGESIZE:
582 return (sysctl_rdint(oldp, oldlenp, newp, PAGE_SIZE));
583 case HW_ALIGNBYTES:
584 return (sysctl_rdint(oldp, oldlenp, newp, ALIGNBYTES));
585 case HW_CNMAGIC: {
586 char magic[CNS_LEN];
587 int error;
588
589 if (oldp)
590 cn_get_magic(magic, CNS_LEN);
591 error = sysctl_string(oldp, oldlenp, newp, newlen,
592 magic, sizeof(magic));
593 if (newp && !error) {
594 error = cn_set_magic(magic);
595 }
596 return (error);
597 }
598 default:
599 return (EOPNOTSUPP);
600 }
601 /* NOTREACHED */
602 }
603
604 #ifdef DEBUG
605 /*
606 * Debugging related system variables.
607 */
608 struct ctldebug debug0, debug1, debug2, debug3, debug4;
609 struct ctldebug debug5, debug6, debug7, debug8, debug9;
610 struct ctldebug debug10, debug11, debug12, debug13, debug14;
611 struct ctldebug debug15, debug16, debug17, debug18, debug19;
612 static struct ctldebug *debugvars[CTL_DEBUG_MAXID] = {
613 &debug0, &debug1, &debug2, &debug3, &debug4,
614 &debug5, &debug6, &debug7, &debug8, &debug9,
615 &debug10, &debug11, &debug12, &debug13, &debug14,
616 &debug15, &debug16, &debug17, &debug18, &debug19,
617 };
618 int
619 debug_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
620 int *name;
621 u_int namelen;
622 void *oldp;
623 size_t *oldlenp;
624 void *newp;
625 size_t newlen;
626 struct proc *p;
627 {
628 struct ctldebug *cdp;
629
630 /* all sysctl names at this level are name and field */
631 if (namelen != 2)
632 return (ENOTDIR); /* overloaded */
633 cdp = debugvars[name[0]];
634 if (name[0] >= CTL_DEBUG_MAXID || cdp->debugname == 0)
635 return (EOPNOTSUPP);
636 switch (name[1]) {
637 case CTL_DEBUG_NAME:
638 return (sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname));
639 case CTL_DEBUG_VALUE:
640 return (sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar));
641 default:
642 return (EOPNOTSUPP);
643 }
644 /* NOTREACHED */
645 }
646 #endif /* DEBUG */
647
648 int
649 proc_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
650 int *name;
651 u_int namelen;
652 void *oldp;
653 size_t *oldlenp;
654 void *newp;
655 size_t newlen;
656 struct proc *p;
657 {
658 struct proc *ptmp = NULL;
659 const struct proclist_desc *pd;
660 int error = 0;
661 struct rlimit alim;
662 struct plimit *newplim;
663 char *tmps = NULL;
664 int i, curlen, len;
665
666 if (namelen < 2)
667 return EINVAL;
668
669 if (name[0] == PROC_CURPROC) {
670 ptmp = p;
671 } else {
672 proclist_lock_read();
673 for (pd = proclists; pd->pd_list != NULL; pd++) {
674 for (ptmp = LIST_FIRST(pd->pd_list); ptmp != NULL;
675 ptmp = LIST_NEXT(ptmp, p_list)) {
676 /* Skip embryonic processes. */
677 if (ptmp->p_stat == SIDL)
678 continue;
679 if (ptmp->p_pid == (pid_t)name[0])
680 break;
681 }
682 if (ptmp != NULL)
683 break;
684 }
685 proclist_unlock_read();
686 if (ptmp == NULL)
687 return(ESRCH);
688 if (p->p_ucred->cr_uid != 0) {
689 if(p->p_cred->p_ruid != ptmp->p_cred->p_ruid ||
690 p->p_cred->p_ruid != ptmp->p_cred->p_svuid)
691 return EPERM;
692 if (ptmp->p_cred->p_rgid != ptmp->p_cred->p_svgid)
693 return EPERM; /* sgid proc */
694 for (i = 0; i < p->p_ucred->cr_ngroups; i++) {
695 if (p->p_ucred->cr_groups[i] ==
696 ptmp->p_cred->p_rgid)
697 break;
698 }
699 if (i == p->p_ucred->cr_ngroups)
700 return EPERM;
701 }
702 }
703 if (name[1] == PROC_PID_CORENAME) {
704 if (namelen != 2)
705 return EINVAL;
706 /*
707 * Can't use sysctl_string() here because we may malloc a new
708 * area during the process, so we have to do it by hand.
709 */
710 curlen = strlen(ptmp->p_limit->pl_corename) + 1;
711 if (oldlenp && *oldlenp < curlen) {
712 if (!oldp)
713 *oldlenp = curlen;
714 return (ENOMEM);
715 }
716 if (newp) {
717 if (securelevel > 2)
718 return EPERM;
719 if (newlen > MAXPATHLEN)
720 return ENAMETOOLONG;
721 tmps = malloc(newlen + 1, M_TEMP, M_WAITOK);
722 if (tmps == NULL)
723 return ENOMEM;
724 error = copyin(newp, tmps, newlen + 1);
725 tmps[newlen] = '\0';
726 if (error)
727 goto cleanup;
728 /* Enforce to be either 'core' for end with '.core' */
729 if (newlen < 4) { /* c.o.r.e */
730 error = EINVAL;
731 goto cleanup;
732 }
733 len = newlen - 4;
734 if (len > 0) {
735 if (tmps[len - 1] != '.' &&
736 tmps[len - 1] != '/') {
737 error = EINVAL;
738 goto cleanup;
739 }
740 }
741 if (strcmp(&tmps[len], "core") != 0) {
742 error = EINVAL;
743 goto cleanup;
744 }
745 }
746 if (oldp && oldlenp) {
747 *oldlenp = curlen;
748 error = copyout(ptmp->p_limit->pl_corename, oldp,
749 curlen);
750 }
751 if (newp && error == 0) {
752 /* if the 2 strings are identical, don't limcopy() */
753 if (strcmp(tmps, ptmp->p_limit->pl_corename) == 0) {
754 error = 0;
755 goto cleanup;
756 }
757 if (ptmp->p_limit->p_refcnt > 1 &&
758 (ptmp->p_limit->p_lflags & PL_SHAREMOD) == 0) {
759 newplim = limcopy(ptmp->p_limit);
760 limfree(ptmp->p_limit);
761 ptmp->p_limit = newplim;
762 } else if (ptmp->p_limit->pl_corename != defcorename) {
763 free(ptmp->p_limit->pl_corename, M_TEMP);
764 }
765 ptmp->p_limit->pl_corename = tmps;
766 return (0);
767 }
768 cleanup:
769 if (tmps)
770 free(tmps, M_TEMP);
771 return (error);
772 }
773 if (name[1] == PROC_PID_LIMIT) {
774 if (namelen != 4 || name[2] >= PROC_PID_LIMIT_MAXID)
775 return EINVAL;
776 memcpy(&alim, &ptmp->p_rlimit[name[2] - 1], sizeof(alim));
777 if (name[3] == PROC_PID_LIMIT_TYPE_HARD)
778 error = sysctl_quad(oldp, oldlenp, newp, newlen,
779 &alim.rlim_max);
780 else if (name[3] == PROC_PID_LIMIT_TYPE_SOFT)
781 error = sysctl_quad(oldp, oldlenp, newp, newlen,
782 &alim.rlim_cur);
783 else
784 error = EINVAL;
785
786 if (error)
787 return error;
788
789 if (newp)
790 error = dosetrlimit(ptmp, p->p_cred,
791 name[2] - 1, &alim);
792 return error;
793 }
794 return (EINVAL);
795 }
796
797 /*
798 * Convenience macros.
799 */
800
801 #define SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, valp, len) \
802 if (oldlenp) { \
803 if (!oldp) \
804 *oldlenp = len; \
805 else { \
806 if (*oldlenp < len) \
807 return(ENOMEM); \
808 *oldlenp = len; \
809 error = copyout((caddr_t)valp, oldp, len); \
810 } \
811 }
812
813 #define SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, valp, typ) \
814 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, valp, sizeof(typ))
815
816 #define SYSCTL_SCALAR_NEWPCHECK_LEN(newp, newlen, len) \
817 if (newp && newlen != len) \
818 return (EINVAL);
819
820 #define SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, typ) \
821 SYSCTL_SCALAR_NEWPCHECK_LEN(newp, newlen, sizeof(typ))
822
823 #define SYSCTL_SCALAR_NEWPCOP_LEN(newp, valp, len) \
824 if (error == 0 && newp) \
825 error = copyin(newp, valp, len);
826
827 #define SYSCTL_SCALAR_NEWPCOP_TYP(newp, valp, typ) \
828 SYSCTL_SCALAR_NEWPCOP_LEN(newp, valp, sizeof(typ))
829
830 #define SYSCTL_STRING_CORE(oldp, oldlenp, str) \
831 if (oldlenp) { \
832 len = strlen(str) + 1; \
833 if (!oldp) \
834 *oldlenp = len; \
835 else { \
836 if (*oldlenp < len) { \
837 err2 = ENOMEM; \
838 len = *oldlenp; \
839 } else \
840 *oldlenp = len; \
841 error = copyout(str, oldp, len);\
842 if (error == 0) \
843 error = err2; \
844 } \
845 }
846
847 /*
848 * Validate parameters and get old / set new parameters
849 * for an integer-valued sysctl function.
850 */
851 int
852 sysctl_int(oldp, oldlenp, newp, newlen, valp)
853 void *oldp;
854 size_t *oldlenp;
855 void *newp;
856 size_t newlen;
857 int *valp;
858 {
859 int error = 0;
860
861 SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, int)
862 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, valp, int)
863 SYSCTL_SCALAR_NEWPCOP_TYP(newp, valp, int)
864
865 return (error);
866 }
867
868
869 /*
870 * As above, but read-only.
871 */
872 int
873 sysctl_rdint(oldp, oldlenp, newp, val)
874 void *oldp;
875 size_t *oldlenp;
876 void *newp;
877 int val;
878 {
879 int error = 0;
880
881 if (newp)
882 return (EPERM);
883
884 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, &val, int)
885
886 return (error);
887 }
888
889 /*
890 * Validate parameters and get old / set new parameters
891 * for an quad-valued sysctl function.
892 */
893 int
894 sysctl_quad(oldp, oldlenp, newp, newlen, valp)
895 void *oldp;
896 size_t *oldlenp;
897 void *newp;
898 size_t newlen;
899 quad_t *valp;
900 {
901 int error = 0;
902
903 SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, quad_t)
904 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, valp, quad_t)
905 SYSCTL_SCALAR_NEWPCOP_TYP(newp, valp, quad_t)
906
907 return (error);
908 }
909
910 /*
911 * As above, but read-only.
912 */
913 int
914 sysctl_rdquad(oldp, oldlenp, newp, val)
915 void *oldp;
916 size_t *oldlenp;
917 void *newp;
918 quad_t val;
919 {
920 int error = 0;
921
922 if (newp)
923 return (EPERM);
924
925 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, &val, quad_t)
926
927 return (error);
928 }
929
930 /*
931 * Validate parameters and get old / set new parameters
932 * for a string-valued sysctl function.
933 */
934 int
935 sysctl_string(oldp, oldlenp, newp, newlen, str, maxlen)
936 void *oldp;
937 size_t *oldlenp;
938 void *newp;
939 size_t newlen;
940 char *str;
941 int maxlen;
942 {
943 int len, error = 0, err2 = 0;
944
945 if (newp && newlen >= maxlen)
946 return (EINVAL);
947
948 SYSCTL_STRING_CORE(oldp, oldlenp, str);
949
950 if (error == 0 && newp) {
951 error = copyin(newp, str, newlen);
952 str[newlen] = 0;
953 }
954 return (error);
955 }
956
957 /*
958 * As above, but read-only.
959 */
960 int
961 sysctl_rdstring(oldp, oldlenp, newp, str)
962 void *oldp;
963 size_t *oldlenp;
964 void *newp;
965 const char *str;
966 {
967 int len, error = 0, err2 = 0;
968
969 if (newp)
970 return (EPERM);
971
972 SYSCTL_STRING_CORE(oldp, oldlenp, str);
973
974 return (error);
975 }
976
977 /*
978 * Validate parameters and get old / set new parameters
979 * for a structure oriented sysctl function.
980 */
981 int
982 sysctl_struct(oldp, oldlenp, newp, newlen, sp, len)
983 void *oldp;
984 size_t *oldlenp;
985 void *newp;
986 size_t newlen;
987 void *sp;
988 int len;
989 {
990 int error = 0;
991
992 SYSCTL_SCALAR_NEWPCHECK_LEN(newp, newlen, len)
993 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, sp, len)
994 SYSCTL_SCALAR_NEWPCOP_LEN(newp, sp, len)
995
996 return (error);
997 }
998
999 /*
1000 * Validate parameters and get old parameters
1001 * for a structure oriented sysctl function.
1002 */
1003 int
1004 sysctl_rdstruct(oldp, oldlenp, newp, sp, len)
1005 void *oldp;
1006 size_t *oldlenp;
1007 void *newp;
1008 const void *sp;
1009 int len;
1010 {
1011 int error = 0;
1012
1013 if (newp)
1014 return (EPERM);
1015
1016 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, sp, len)
1017
1018 return (error);
1019 }
1020
1021 /*
1022 * As above, but can return a truncated result.
1023 */
1024 int
1025 sysctl_rdminstruct(oldp, oldlenp, newp, sp, len)
1026 void *oldp;
1027 size_t *oldlenp;
1028 void *newp;
1029 const void *sp;
1030 int len;
1031 {
1032 int error = 0;
1033
1034 if (newp)
1035 return (EPERM);
1036
1037 len = min(*oldlenp, len);
1038 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, sp, len)
1039
1040 return (error);
1041 }
1042
1043 /*
1044 * Get file structures.
1045 */
1046 static int
1047 sysctl_file(vwhere, sizep)
1048 void *vwhere;
1049 size_t *sizep;
1050 {
1051 int buflen, error;
1052 struct file *fp;
1053 char *start, *where;
1054
1055 start = where = vwhere;
1056 buflen = *sizep;
1057 if (where == NULL) {
1058 /*
1059 * overestimate by 10 files
1060 */
1061 *sizep = sizeof(filehead) + (nfiles + 10) * sizeof(struct file);
1062 return (0);
1063 }
1064
1065 /*
1066 * first copyout filehead
1067 */
1068 if (buflen < sizeof(filehead)) {
1069 *sizep = 0;
1070 return (0);
1071 }
1072 error = copyout((caddr_t)&filehead, where, sizeof(filehead));
1073 if (error)
1074 return (error);
1075 buflen -= sizeof(filehead);
1076 where += sizeof(filehead);
1077
1078 /*
1079 * followed by an array of file structures
1080 */
1081 for (fp = filehead.lh_first; fp != 0; fp = fp->f_list.le_next) {
1082 if (buflen < sizeof(struct file)) {
1083 *sizep = where - start;
1084 return (ENOMEM);
1085 }
1086 error = copyout((caddr_t)fp, where, sizeof(struct file));
1087 if (error)
1088 return (error);
1089 buflen -= sizeof(struct file);
1090 where += sizeof(struct file);
1091 }
1092 *sizep = where - start;
1093 return (0);
1094 }
1095
1096 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
1097 #define FILL_PERM(src, dst) do { \
1098 (dst)._key = (src)._key; \
1099 (dst).uid = (src).uid; \
1100 (dst).gid = (src).gid; \
1101 (dst).cuid = (src).cuid; \
1102 (dst).cgid = (src).cgid; \
1103 (dst).mode = (src).mode; \
1104 (dst)._seq = (src)._seq; \
1105 } while (0);
1106 #define FILL_MSG(src, dst) do { \
1107 FILL_PERM((src).msg_perm, (dst).msg_perm); \
1108 (dst).msg_qnum = (src).msg_qnum; \
1109 (dst).msg_qbytes = (src).msg_qbytes; \
1110 (dst)._msg_cbytes = (src)._msg_cbytes; \
1111 (dst).msg_lspid = (src).msg_lspid; \
1112 (dst).msg_lrpid = (src).msg_lrpid; \
1113 (dst).msg_stime = (src).msg_stime; \
1114 (dst).msg_rtime = (src).msg_rtime; \
1115 (dst).msg_ctime = (src).msg_ctime; \
1116 } while (0)
1117 #define FILL_SEM(src, dst) do { \
1118 FILL_PERM((src).sem_perm, (dst).sem_perm); \
1119 (dst).sem_nsems = (src).sem_nsems; \
1120 (dst).sem_otime = (src).sem_otime; \
1121 (dst).sem_ctime = (src).sem_ctime; \
1122 } while (0)
1123 #define FILL_SHM(src, dst) do { \
1124 FILL_PERM((src).shm_perm, (dst).shm_perm); \
1125 (dst).shm_segsz = (src).shm_segsz; \
1126 (dst).shm_lpid = (src).shm_lpid; \
1127 (dst).shm_cpid = (src).shm_cpid; \
1128 (dst).shm_atime = (src).shm_atime; \
1129 (dst).shm_dtime = (src).shm_dtime; \
1130 (dst).shm_ctime = (src).shm_ctime; \
1131 (dst).shm_nattch = (src).shm_nattch; \
1132 } while (0)
1133
1134 static int
1135 sysctl_sysvipc(name, namelen, where, sizep)
1136 int *name;
1137 u_int namelen;
1138 void *where;
1139 size_t *sizep;
1140 {
1141 #ifdef SYSVMSG
1142 struct msg_sysctl_info *msgsi;
1143 #endif
1144 #ifdef SYSVSEM
1145 struct sem_sysctl_info *semsi;
1146 #endif
1147 #ifdef SYSVSHM
1148 struct shm_sysctl_info *shmsi;
1149 #endif
1150 size_t infosize, dssize, tsize, buflen;
1151 void *buf = NULL, *buf2;
1152 char *start;
1153 int32_t nds;
1154 int i, error, ret;
1155
1156 if (namelen != 1)
1157 return (EINVAL);
1158
1159 start = where;
1160 buflen = *sizep;
1161
1162 switch (*name) {
1163 case KERN_SYSVIPC_MSG_INFO:
1164 #ifdef SYSVMSG
1165 infosize = sizeof(msgsi->msginfo);
1166 nds = msginfo.msgmni;
1167 dssize = sizeof(msgsi->msgids[0]);
1168 break;
1169 #else
1170 return (EINVAL);
1171 #endif
1172 case KERN_SYSVIPC_SEM_INFO:
1173 #ifdef SYSVSEM
1174 infosize = sizeof(semsi->seminfo);
1175 nds = seminfo.semmni;
1176 dssize = sizeof(semsi->semids[0]);
1177 break;
1178 #else
1179 return (EINVAL);
1180 #endif
1181 case KERN_SYSVIPC_SHM_INFO:
1182 #ifdef SYSVSHM
1183 infosize = sizeof(shmsi->shminfo);
1184 nds = shminfo.shmmni;
1185 dssize = sizeof(shmsi->shmids[0]);
1186 break;
1187 #else
1188 return (EINVAL);
1189 #endif
1190 default:
1191 return (EINVAL);
1192 }
1193 /*
1194 * Round infosize to 64 bit boundary if requesting more than just
1195 * the info structure or getting the total data size.
1196 */
1197 if (where == NULL || *sizep > infosize)
1198 infosize = ((infosize + 7) / 8) * 8;
1199 tsize = infosize + nds * dssize;
1200
1201 /* Return just the total size required. */
1202 if (where == NULL) {
1203 *sizep = tsize;
1204 return (0);
1205 }
1206
1207 /* Not enough room for even the info struct. */
1208 if (buflen < infosize) {
1209 *sizep = 0;
1210 return (ENOMEM);
1211 }
1212 buf = malloc(min(tsize, buflen), M_TEMP, M_WAITOK);
1213 memset(buf, 0, min(tsize, buflen));
1214
1215 switch (*name) {
1216 #ifdef SYSVMSG
1217 case KERN_SYSVIPC_MSG_INFO:
1218 msgsi = (struct msg_sysctl_info *)buf;
1219 buf2 = &msgsi->msgids[0];
1220 msgsi->msginfo = msginfo;
1221 break;
1222 #endif
1223 #ifdef SYSVSEM
1224 case KERN_SYSVIPC_SEM_INFO:
1225 semsi = (struct sem_sysctl_info *)buf;
1226 buf2 = &semsi->semids[0];
1227 semsi->seminfo = seminfo;
1228 break;
1229 #endif
1230 #ifdef SYSVSHM
1231 case KERN_SYSVIPC_SHM_INFO:
1232 shmsi = (struct shm_sysctl_info *)buf;
1233 buf2 = &shmsi->shmids[0];
1234 shmsi->shminfo = shminfo;
1235 break;
1236 #endif
1237 }
1238 buflen -= infosize;
1239
1240 ret = 0;
1241 if (buflen > 0) {
1242 /* Fill in the IPC data structures. */
1243 for (i = 0; i < nds; i++) {
1244 if (buflen < dssize) {
1245 ret = ENOMEM;
1246 break;
1247 }
1248 switch (*name) {
1249 #ifdef SYSVMSG
1250 case KERN_SYSVIPC_MSG_INFO:
1251 FILL_MSG(msqids[i], msgsi->msgids[i]);
1252 break;
1253 #endif
1254 #ifdef SYSVSEM
1255 case KERN_SYSVIPC_SEM_INFO:
1256 FILL_SEM(sema[i], semsi->semids[i]);
1257 break;
1258 #endif
1259 #ifdef SYSVSHM
1260 case KERN_SYSVIPC_SHM_INFO:
1261 FILL_SHM(shmsegs[i], shmsi->shmids[i]);
1262 break;
1263 #endif
1264 }
1265 buflen -= dssize;
1266 }
1267 }
1268 *sizep -= buflen;
1269 error = copyout(buf, start, *sizep);
1270 /* If copyout succeeded, use return code set earlier. */
1271 if (error == 0)
1272 error = ret;
1273 if (buf)
1274 free(buf, M_TEMP);
1275 return (error);
1276 }
1277 #endif /* SYSVMSG || SYSVSEM || SYSVSHM */
1278
1279 static int
1280 sysctl_msgbuf(vwhere, sizep)
1281 void *vwhere;
1282 size_t *sizep;
1283 {
1284 char *where = vwhere;
1285 size_t len, maxlen = *sizep;
1286 long pos;
1287 int error;
1288
1289 /*
1290 * deal with cases where the message buffer has
1291 * become corrupted.
1292 */
1293 if (!msgbufenabled || msgbufp->msg_magic != MSG_MAGIC) {
1294 msgbufenabled = 0;
1295 return (ENXIO);
1296 }
1297
1298 if (where == NULL) {
1299 /* always return full buffer size */
1300 *sizep = msgbufp->msg_bufs;
1301 return (0);
1302 }
1303
1304 error = 0;
1305 maxlen = min(msgbufp->msg_bufs, maxlen);
1306 pos = msgbufp->msg_bufx;
1307 while (maxlen > 0) {
1308 len = pos == 0 ? msgbufp->msg_bufx : msgbufp->msg_bufs - msgbufp->msg_bufx;
1309 len = min(len, maxlen);
1310 if (len == 0)
1311 break;
1312 error = copyout(&msgbufp->msg_bufc[pos], where, len);
1313 if (error)
1314 break;
1315 where += len;
1316 maxlen -= len;
1317 pos = 0;
1318 }
1319 return (error);
1320 }
1321
1322 /*
1323 * try over estimating by 5 procs
1324 */
1325 #define KERN_PROCSLOP (5 * sizeof(struct kinfo_proc))
1326
1327 static int
1328 sysctl_doeproc(name, namelen, vwhere, sizep)
1329 int *name;
1330 u_int namelen;
1331 void *vwhere;
1332 size_t *sizep;
1333 {
1334 struct eproc eproc;
1335 struct kinfo_proc2 kproc2;
1336 struct kinfo_proc *dp;
1337 struct proc *p;
1338 const struct proclist_desc *pd;
1339 char *where, *dp2;
1340 int type, op, arg, elem_size, elem_count;
1341 int buflen, needed, error;
1342
1343 dp = vwhere;
1344 dp2 = where = vwhere;
1345 buflen = where != NULL ? *sizep : 0;
1346 error = needed = 0;
1347 type = name[0];
1348
1349 if (type == KERN_PROC) {
1350 if (namelen != 3 && !(namelen == 2 && name[1] == KERN_PROC_ALL))
1351 return (EINVAL);
1352 op = name[1];
1353 if (op != KERN_PROC_ALL)
1354 arg = name[2];
1355 } else {
1356 if (namelen != 5)
1357 return (EINVAL);
1358 op = name[1];
1359 arg = name[2];
1360 elem_size = name[3];
1361 elem_count = name[4];
1362 }
1363
1364 proclist_lock_read();
1365
1366 pd = proclists;
1367 again:
1368 for (p = LIST_FIRST(pd->pd_list); p != NULL; p = LIST_NEXT(p, p_list)) {
1369 /*
1370 * Skip embryonic processes.
1371 */
1372 if (p->p_stat == SIDL)
1373 continue;
1374 /*
1375 * TODO - make more efficient (see notes below).
1376 * do by session.
1377 */
1378 switch (op) {
1379
1380 case KERN_PROC_PID:
1381 /* could do this with just a lookup */
1382 if (p->p_pid != (pid_t)arg)
1383 continue;
1384 break;
1385
1386 case KERN_PROC_PGRP:
1387 /* could do this by traversing pgrp */
1388 if (p->p_pgrp->pg_id != (pid_t)arg)
1389 continue;
1390 break;
1391
1392 case KERN_PROC_SESSION:
1393 if (p->p_session->s_sid != (pid_t)arg)
1394 continue;
1395 break;
1396
1397 case KERN_PROC_TTY:
1398 if (arg == KERN_PROC_TTY_REVOKE) {
1399 if ((p->p_flag & P_CONTROLT) == 0 ||
1400 p->p_session->s_ttyp == NULL ||
1401 p->p_session->s_ttyvp != NULL)
1402 continue;
1403 } else if ((p->p_flag & P_CONTROLT) == 0 ||
1404 p->p_session->s_ttyp == NULL) {
1405 if ((dev_t)arg != KERN_PROC_TTY_NODEV)
1406 continue;
1407 } else if (p->p_session->s_ttyp->t_dev != (dev_t)arg)
1408 continue;
1409 break;
1410
1411 case KERN_PROC_UID:
1412 if (p->p_ucred->cr_uid != (uid_t)arg)
1413 continue;
1414 break;
1415
1416 case KERN_PROC_RUID:
1417 if (p->p_cred->p_ruid != (uid_t)arg)
1418 continue;
1419 break;
1420
1421 case KERN_PROC_GID:
1422 if (p->p_ucred->cr_gid != (uid_t)arg)
1423 continue;
1424 break;
1425
1426 case KERN_PROC_RGID:
1427 if (p->p_cred->p_rgid != (uid_t)arg)
1428 continue;
1429 break;
1430
1431 case KERN_PROC_ALL:
1432 /* allow everything */
1433 break;
1434
1435 default:
1436 error = EINVAL;
1437 goto cleanup;
1438 }
1439 if (type == KERN_PROC) {
1440 if (buflen >= sizeof(struct kinfo_proc)) {
1441 fill_eproc(p, &eproc);
1442 error = copyout((caddr_t)p, &dp->kp_proc,
1443 sizeof(struct proc));
1444 if (error)
1445 goto cleanup;
1446 error = copyout((caddr_t)&eproc, &dp->kp_eproc,
1447 sizeof(eproc));
1448 if (error)
1449 goto cleanup;
1450 dp++;
1451 buflen -= sizeof(struct kinfo_proc);
1452 }
1453 needed += sizeof(struct kinfo_proc);
1454 } else { /* KERN_PROC2 */
1455 if (buflen >= elem_size && elem_count > 0) {
1456 fill_kproc2(p, &kproc2);
1457 /*
1458 * Copy out elem_size, but not larger than
1459 * the size of a struct kinfo_proc2.
1460 */
1461 error = copyout(&kproc2, dp2,
1462 min(sizeof(kproc2), elem_size));
1463 if (error)
1464 goto cleanup;
1465 dp2 += elem_size;
1466 buflen -= elem_size;
1467 elem_count--;
1468 }
1469 needed += elem_size;
1470 }
1471 }
1472 pd++;
1473 if (pd->pd_list != NULL)
1474 goto again;
1475 proclist_unlock_read();
1476
1477 if (where != NULL) {
1478 if (type == KERN_PROC)
1479 *sizep = (caddr_t)dp - where;
1480 else
1481 *sizep = dp2 - where;
1482 if (needed > *sizep)
1483 return (ENOMEM);
1484 } else {
1485 needed += KERN_PROCSLOP;
1486 *sizep = needed;
1487 }
1488 return (0);
1489 cleanup:
1490 proclist_unlock_read();
1491 return (error);
1492 }
1493
1494 /*
1495 * Fill in an eproc structure for the specified process.
1496 */
1497 void
1498 fill_eproc(p, ep)
1499 struct proc *p;
1500 struct eproc *ep;
1501 {
1502 struct tty *tp;
1503 struct lwp *l;
1504
1505 ep->e_paddr = p;
1506 ep->e_sess = p->p_session;
1507 ep->e_pcred = *p->p_cred;
1508 ep->e_ucred = *p->p_ucred;
1509 if (p->p_stat == SIDL || P_ZOMBIE(p)) {
1510 ep->e_vm.vm_rssize = 0;
1511 ep->e_vm.vm_tsize = 0;
1512 ep->e_vm.vm_dsize = 0;
1513 ep->e_vm.vm_ssize = 0;
1514 /* ep->e_vm.vm_pmap = XXX; */
1515 } else {
1516 struct vmspace *vm = p->p_vmspace;
1517
1518 ep->e_vm.vm_rssize = vm_resident_count(vm);
1519 ep->e_vm.vm_tsize = vm->vm_tsize;
1520 ep->e_vm.vm_dsize = vm->vm_dsize;
1521 ep->e_vm.vm_ssize = vm->vm_ssize;
1522
1523 /* Pick a "representative" LWP */
1524 l = proc_representative_lwp(p);
1525
1526 if (l->l_wmesg)
1527 strncpy(ep->e_wmesg, l->l_wmesg, WMESGLEN);
1528 }
1529 if (p->p_pptr)
1530 ep->e_ppid = p->p_pptr->p_pid;
1531 else
1532 ep->e_ppid = 0;
1533 ep->e_pgid = p->p_pgrp->pg_id;
1534 ep->e_sid = ep->e_sess->s_sid;
1535 ep->e_jobc = p->p_pgrp->pg_jobc;
1536 if ((p->p_flag & P_CONTROLT) &&
1537 (tp = ep->e_sess->s_ttyp)) {
1538 ep->e_tdev = tp->t_dev;
1539 ep->e_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1540 ep->e_tsess = tp->t_session;
1541 } else
1542 ep->e_tdev = NODEV;
1543
1544 ep->e_xsize = ep->e_xrssize = 0;
1545 ep->e_xccount = ep->e_xswrss = 0;
1546 ep->e_flag = ep->e_sess->s_ttyvp ? EPROC_CTTY : 0;
1547 if (SESS_LEADER(p))
1548 ep->e_flag |= EPROC_SLEADER;
1549 strncpy(ep->e_login, ep->e_sess->s_login, MAXLOGNAME);
1550 }
1551
1552 /*
1553 * Fill in an eproc structure for the specified process.
1554 */
1555 static void
1556 fill_kproc2(p, ki)
1557 struct proc *p;
1558 struct kinfo_proc2 *ki;
1559 {
1560 struct tty *tp;
1561 struct lwp *l;
1562 memset(ki, 0, sizeof(*ki));
1563
1564 /* XXX NJWLWP
1565 * These are likely not what the caller was looking for.
1566 * The perils of playing with the kernel data structures...
1567 */
1568 ki->p_paddr = PTRTOINT64(p);
1569 ki->p_fd = PTRTOINT64(p->p_fd);
1570 ki->p_cwdi = PTRTOINT64(p->p_cwdi);
1571 ki->p_stats = PTRTOINT64(p->p_stats);
1572 ki->p_limit = PTRTOINT64(p->p_limit);
1573 ki->p_vmspace = PTRTOINT64(p->p_vmspace);
1574 ki->p_sigacts = PTRTOINT64(p->p_sigacts);
1575 ki->p_sess = PTRTOINT64(p->p_session);
1576 ki->p_tsess = 0; /* may be changed if controlling tty below */
1577 ki->p_ru = PTRTOINT64(p->p_ru);
1578
1579 ki->p_eflag = 0;
1580 ki->p_exitsig = p->p_exitsig;
1581 ki->p_flag = p->p_flag;
1582
1583 ki->p_pid = p->p_pid;
1584 if (p->p_pptr)
1585 ki->p_ppid = p->p_pptr->p_pid;
1586 else
1587 ki->p_ppid = 0;
1588 ki->p_sid = p->p_session->s_sid;
1589 ki->p__pgid = p->p_pgrp->pg_id;
1590
1591 ki->p_tpgid = NO_PID; /* may be changed if controlling tty below */
1592
1593 ki->p_uid = p->p_ucred->cr_uid;
1594 ki->p_ruid = p->p_cred->p_ruid;
1595 ki->p_gid = p->p_ucred->cr_gid;
1596 ki->p_rgid = p->p_cred->p_rgid;
1597
1598 memcpy(ki->p_groups, p->p_cred->pc_ucred->cr_groups,
1599 min(sizeof(ki->p_groups), sizeof(p->p_cred->pc_ucred->cr_groups)));
1600 ki->p_ngroups = p->p_cred->pc_ucred->cr_ngroups;
1601
1602 ki->p_jobc = p->p_pgrp->pg_jobc;
1603 if ((p->p_flag & P_CONTROLT) && (tp = p->p_session->s_ttyp)) {
1604 ki->p_tdev = tp->t_dev;
1605 ki->p_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1606 ki->p_tsess = PTRTOINT64(tp->t_session);
1607 } else {
1608 ki->p_tdev = NODEV;
1609 }
1610
1611 ki->p_estcpu = p->p_estcpu;
1612 ki->p_rtime_sec = p->p_rtime.tv_sec;
1613 ki->p_rtime_usec = p->p_rtime.tv_usec;
1614 ki->p_cpticks = p->p_cpticks;
1615 ki->p_pctcpu = p->p_pctcpu;
1616
1617 ki->p_uticks = p->p_uticks;
1618 ki->p_sticks = p->p_sticks;
1619 ki->p_iticks = p->p_iticks;
1620
1621 ki->p_tracep = PTRTOINT64(p->p_tracep);
1622 ki->p_traceflag = p->p_traceflag;
1623
1624
1625 memcpy(&ki->p_siglist, &p->p_sigctx.ps_siglist, sizeof(ki_sigset_t));
1626 memcpy(&ki->p_sigmask, &p->p_sigctx.ps_sigmask, sizeof(ki_sigset_t));
1627 memcpy(&ki->p_sigignore, &p->p_sigctx.ps_sigignore,sizeof(ki_sigset_t));
1628 memcpy(&ki->p_sigcatch, &p->p_sigctx.ps_sigcatch, sizeof(ki_sigset_t));
1629
1630 ki->p_stat = p->p_stat;
1631 ki->p_nice = p->p_nice;
1632
1633 ki->p_xstat = p->p_xstat;
1634 ki->p_acflag = p->p_acflag;
1635
1636 strncpy(ki->p_comm, p->p_comm,
1637 min(sizeof(ki->p_comm), sizeof(p->p_comm)));
1638
1639 strncpy(ki->p_login, p->p_session->s_login, sizeof(ki->p_login));
1640
1641 if (p->p_stat == SIDL || P_ZOMBIE(p)) {
1642 ki->p_vm_rssize = 0;
1643 ki->p_vm_tsize = 0;
1644 ki->p_vm_dsize = 0;
1645 ki->p_vm_ssize = 0;
1646 } else {
1647 struct vmspace *vm = p->p_vmspace;
1648
1649 ki->p_vm_rssize = vm_resident_count(vm);
1650 ki->p_vm_tsize = vm->vm_tsize;
1651 ki->p_vm_dsize = vm->vm_dsize;
1652 ki->p_vm_ssize = vm->vm_ssize;
1653
1654 /* Pick a "representative" LWP */
1655 l = proc_representative_lwp(p);
1656 ki->p_forw = PTRTOINT64(l->l_forw);
1657 ki->p_back = PTRTOINT64(l->l_back);
1658 ki->p_addr = PTRTOINT64(l->l_addr);
1659 ki->p_swtime = l->l_swtime;
1660 ki->p_slptime = l->l_slptime;
1661 if (l->l_stat == LSONPROC) {
1662 KDASSERT(l->l_cpu != NULL);
1663 ki->p_schedflags = l->l_cpu->ci_schedstate.spc_flags;
1664 } else
1665 ki->p_schedflags = 0;
1666 ki->p_holdcnt = l->l_holdcnt;
1667 ki->p_priority = l->l_priority;
1668 ki->p_usrpri = l->l_usrpri;
1669 if (l->l_wmesg)
1670 strncpy(ki->p_wmesg, l->l_wmesg, sizeof(ki->p_wmesg));
1671 ki->p_wchan = PTRTOINT64(l->l_wchan);
1672
1673 }
1674
1675 if (p->p_session->s_ttyvp)
1676 ki->p_eflag |= EPROC_CTTY;
1677 if (SESS_LEADER(p))
1678 ki->p_eflag |= EPROC_SLEADER;
1679
1680 /* XXX Is this double check necessary? */
1681 if (P_ZOMBIE(p)) {
1682 ki->p_uvalid = 0;
1683 } else {
1684 ki->p_uvalid = 1;
1685
1686 ki->p_ustart_sec = p->p_stats->p_start.tv_sec;
1687 ki->p_ustart_usec = p->p_stats->p_start.tv_usec;
1688
1689 ki->p_uutime_sec = p->p_stats->p_ru.ru_utime.tv_sec;
1690 ki->p_uutime_usec = p->p_stats->p_ru.ru_utime.tv_usec;
1691 ki->p_ustime_sec = p->p_stats->p_ru.ru_stime.tv_sec;
1692 ki->p_ustime_usec = p->p_stats->p_ru.ru_stime.tv_usec;
1693
1694 ki->p_uru_maxrss = p->p_stats->p_ru.ru_maxrss;
1695 ki->p_uru_ixrss = p->p_stats->p_ru.ru_ixrss;
1696 ki->p_uru_idrss = p->p_stats->p_ru.ru_idrss;
1697 ki->p_uru_isrss = p->p_stats->p_ru.ru_isrss;
1698 ki->p_uru_minflt = p->p_stats->p_ru.ru_minflt;
1699 ki->p_uru_majflt = p->p_stats->p_ru.ru_majflt;
1700 ki->p_uru_nswap = p->p_stats->p_ru.ru_nswap;
1701 ki->p_uru_inblock = p->p_stats->p_ru.ru_inblock;
1702 ki->p_uru_oublock = p->p_stats->p_ru.ru_oublock;
1703 ki->p_uru_msgsnd = p->p_stats->p_ru.ru_msgsnd;
1704 ki->p_uru_msgrcv = p->p_stats->p_ru.ru_msgrcv;
1705 ki->p_uru_nsignals = p->p_stats->p_ru.ru_nsignals;
1706 ki->p_uru_nvcsw = p->p_stats->p_ru.ru_nvcsw;
1707 ki->p_uru_nivcsw = p->p_stats->p_ru.ru_nivcsw;
1708
1709 ki->p_uctime_sec = p->p_stats->p_cru.ru_utime.tv_sec +
1710 p->p_stats->p_cru.ru_stime.tv_sec;
1711 ki->p_uctime_usec = p->p_stats->p_cru.ru_utime.tv_usec +
1712 p->p_stats->p_cru.ru_stime.tv_usec;
1713 }
1714 #ifdef MULTIPROCESSOR
1715 if (p->p_cpu != NULL)
1716 ki->p_cpuid = p->p_cpu->ci_cpuid;
1717 else
1718 #endif
1719 ki->p_cpuid = KI_NOCPU;
1720
1721 }
1722
1723
1724 /*
1725 * Pick a LWP to represent the process for those operations which
1726 * want information about a "process" that is actually associated
1727 * with a LWP.
1728 */
1729 static struct lwp *proc_representative_lwp(p)
1730 struct proc *p;
1731 {
1732 struct lwp *l = NULL;
1733
1734 /* Trivial case: only one LWP */
1735 if (p->p_nrlwps == 1)
1736 return (LIST_FIRST(&p->p_lwps));
1737
1738 switch (p->p_stat) {
1739 case SSTOP:
1740 /* Pick the first stopped LWP */
1741 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1742 if (l->l_stat == LSSTOP)
1743 return (l);
1744 }
1745 /* NOTREACHED */
1746 break;
1747 case SACTIVE:
1748 /* Pick the first live LWP */
1749 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1750 if (l->l_stat == LSRUN ||
1751 l->l_stat == LSSLEEP ||
1752 l->l_stat == LSONPROC)
1753 return (l);
1754 }
1755 break;
1756 case SDEAD:
1757 case SZOMB:
1758 /* Doesn't really matter... */
1759 l = LIST_FIRST(&p->p_lwps);
1760 break;
1761 #ifdef DIAGNOSTIC
1762 case SIDL:
1763 /* We have more than one LWP and we're in SIDL?
1764 * How'd that happen?
1765 */
1766 panic("Too many LWPs (%d) in SIDL process %d (%s)",
1767 p->p_nrlwps, p->p_pid, p->p_comm);
1768 default:
1769 panic("Process %d (%s) in unknown state %d",
1770 p->p_pid, p->p_comm, p->p_stat);
1771 #endif
1772 }
1773
1774 panic("proc_representative_lwp: couldn't find a lwp for process"
1775 " %d (%s)", p->p_pid, p->p_comm);
1776 /* NOTREACHED */
1777 return NULL;
1778 }
1779
1780
1781 int
1782 sysctl_procargs(name, namelen, where, sizep, up)
1783 int *name;
1784 u_int namelen;
1785 void *where;
1786 size_t *sizep;
1787 struct proc *up;
1788 {
1789 struct ps_strings pss;
1790 struct proc *p;
1791 size_t len, upper_bound, xlen;
1792 struct uio auio;
1793 struct iovec aiov;
1794 vaddr_t argv;
1795 pid_t pid;
1796 int nargv, type, error, i;
1797 char *arg;
1798 char *tmp;
1799
1800 if (namelen != 2)
1801 return (EINVAL);
1802 pid = name[0];
1803 type = name[1];
1804
1805 switch (type) {
1806 case KERN_PROC_ARGV:
1807 case KERN_PROC_NARGV:
1808 case KERN_PROC_ENV:
1809 case KERN_PROC_NENV:
1810 /* ok */
1811 break;
1812 default:
1813 return (EINVAL);
1814 }
1815
1816 /* check pid */
1817 if ((p = pfind(pid)) == NULL)
1818 return (EINVAL);
1819
1820 /* only root or same user change look at the environment */
1821 if (type == KERN_PROC_ENV || type == KERN_PROC_NENV) {
1822 if (up->p_ucred->cr_uid != 0) {
1823 if (up->p_cred->p_ruid != p->p_cred->p_ruid ||
1824 up->p_cred->p_ruid != p->p_cred->p_svuid)
1825 return (EPERM);
1826 }
1827 }
1828
1829 if (sizep != NULL && where == NULL) {
1830 if (type == KERN_PROC_NARGV || type == KERN_PROC_NENV)
1831 *sizep = sizeof (int);
1832 else
1833 *sizep = ARG_MAX; /* XXX XXX XXX */
1834 return (0);
1835 }
1836 if (where == NULL || sizep == NULL)
1837 return (EINVAL);
1838
1839 /*
1840 * Zombies don't have a stack, so we can't read their psstrings.
1841 * System processes also don't have a user stack.
1842 */
1843 if (P_ZOMBIE(p) || (p->p_flag & P_SYSTEM) != 0)
1844 return (EINVAL);
1845
1846 /*
1847 * Lock the process down in memory.
1848 */
1849 /* XXXCDC: how should locking work here? */
1850 if ((p->p_flag & P_WEXIT) || (p->p_vmspace->vm_refcnt < 1))
1851 return (EFAULT);
1852
1853 p->p_vmspace->vm_refcnt++; /* XXX */
1854
1855 /*
1856 * Allocate a temporary buffer to hold the arguments.
1857 */
1858 arg = malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
1859
1860 /*
1861 * Read in the ps_strings structure.
1862 */
1863 aiov.iov_base = &pss;
1864 aiov.iov_len = sizeof(pss);
1865 auio.uio_iov = &aiov;
1866 auio.uio_iovcnt = 1;
1867 auio.uio_offset = (vaddr_t)p->p_psstr;
1868 auio.uio_resid = sizeof(pss);
1869 auio.uio_segflg = UIO_SYSSPACE;
1870 auio.uio_rw = UIO_READ;
1871 auio.uio_procp = NULL;
1872 error = uvm_io(&p->p_vmspace->vm_map, &auio);
1873 if (error)
1874 goto done;
1875
1876 if (type == KERN_PROC_ARGV || type == KERN_PROC_NARGV)
1877 memcpy(&nargv, (char *)&pss + p->p_psnargv, sizeof(nargv));
1878 else
1879 memcpy(&nargv, (char *)&pss + p->p_psnenv, sizeof(nargv));
1880 if (type == KERN_PROC_NARGV || type == KERN_PROC_NENV) {
1881 error = copyout(&nargv, where, sizeof(nargv));
1882 *sizep = sizeof(nargv);
1883 goto done;
1884 }
1885 /*
1886 * Now read the address of the argument vector.
1887 */
1888 switch (type) {
1889 case KERN_PROC_ARGV:
1890 /* XXX compat32 stuff here */
1891 memcpy(&tmp, (char *)&pss + p->p_psargv, sizeof(tmp));
1892 break;
1893 case KERN_PROC_ENV:
1894 memcpy(&tmp, (char *)&pss + p->p_psenv, sizeof(tmp));
1895 break;
1896 default:
1897 return (EINVAL);
1898 }
1899 auio.uio_offset = (off_t)(long)tmp;
1900 aiov.iov_base = &argv;
1901 aiov.iov_len = sizeof(argv);
1902 auio.uio_iov = &aiov;
1903 auio.uio_iovcnt = 1;
1904 auio.uio_resid = sizeof(argv);
1905 auio.uio_segflg = UIO_SYSSPACE;
1906 auio.uio_rw = UIO_READ;
1907 auio.uio_procp = NULL;
1908 error = uvm_io(&p->p_vmspace->vm_map, &auio);
1909 if (error)
1910 goto done;
1911
1912 /*
1913 * Now copy in the actual argument vector, one page at a time,
1914 * since we don't know how long the vector is (though, we do
1915 * know how many NUL-terminated strings are in the vector).
1916 */
1917 len = 0;
1918 upper_bound = *sizep;
1919 for (; nargv != 0 && len < upper_bound; len += xlen) {
1920 aiov.iov_base = arg;
1921 aiov.iov_len = PAGE_SIZE;
1922 auio.uio_iov = &aiov;
1923 auio.uio_iovcnt = 1;
1924 auio.uio_offset = argv + len;
1925 xlen = PAGE_SIZE - ((argv + len) & PAGE_MASK);
1926 auio.uio_resid = xlen;
1927 auio.uio_segflg = UIO_SYSSPACE;
1928 auio.uio_rw = UIO_READ;
1929 auio.uio_procp = NULL;
1930 error = uvm_io(&p->p_vmspace->vm_map, &auio);
1931 if (error)
1932 goto done;
1933
1934 for (i = 0; i < xlen && nargv != 0; i++) {
1935 if (arg[i] == '\0')
1936 nargv--; /* one full string */
1937 }
1938
1939 /* make sure we don't copyout past the end of the user's buffer */
1940 if (len + i > upper_bound)
1941 i = upper_bound - len;
1942
1943 error = copyout(arg, (char *)where + len, i);
1944 if (error)
1945 break;
1946
1947 if (nargv == 0) {
1948 len += i;
1949 break;
1950 }
1951 }
1952 *sizep = len;
1953
1954 done:
1955 uvmspace_free(p->p_vmspace);
1956
1957 free(arg, M_TEMP);
1958 return (error);
1959 }
1960
1961 #if NPTY > 0
1962 int pty_maxptys __P((int, int)); /* defined in kern/tty_pty.c */
1963
1964 /*
1965 * Validate parameters and get old / set new parameters
1966 * for pty sysctl function.
1967 */
1968 static int
1969 sysctl_pty(oldp, oldlenp, newp, newlen)
1970 void *oldp;
1971 size_t *oldlenp;
1972 void *newp;
1973 size_t newlen;
1974 {
1975 int error = 0;
1976 int oldmax = 0, newmax = 0;
1977
1978 /* get current value of maxptys */
1979 oldmax = pty_maxptys(0, 0);
1980
1981 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, &oldmax, int)
1982
1983 if (!error && newp) {
1984 SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, int)
1985 SYSCTL_SCALAR_NEWPCOP_TYP(newp, &newmax, int)
1986
1987 if (newmax != pty_maxptys(newmax, (newp != NULL)))
1988 return (EINVAL);
1989
1990 }
1991
1992 return (error);
1993 }
1994 #endif /* NPTY > 0 */
1995