kern_sysctl.c revision 1.86.2.3 1 /* $NetBSD: kern_sysctl.c,v 1.86.2.3 2001/04/09 01:57:54 nathanw Exp $ */
2
3 /*-
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Mike Karels at Berkeley Software Design, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_sysctl.c 8.9 (Berkeley) 5/20/95
39 */
40
41 /*
42 * sysctl system call.
43 */
44
45 #include "opt_ddb.h"
46 #include "opt_insecure.h"
47 #include "opt_defcorename.h"
48 #include "opt_sysv.h"
49 #include "pty.h"
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/buf.h>
55 #include <sys/device.h>
56 #include <sys/disklabel.h>
57 #include <sys/dkstat.h>
58 #include <sys/exec.h>
59 #include <sys/file.h>
60 #include <sys/ioctl.h>
61 #include <sys/malloc.h>
62 #include <sys/mount.h>
63 #include <sys/msgbuf.h>
64 #include <sys/pool.h>
65 #include <sys/lwp.h>
66 #include <sys/proc.h>
67 #include <sys/resource.h>
68 #include <sys/resourcevar.h>
69 #include <sys/syscallargs.h>
70 #include <sys/tty.h>
71 #include <sys/unistd.h>
72 #include <sys/vnode.h>
73 #define __SYSCTL_PRIVATE
74 #include <sys/sysctl.h>
75 #include <sys/lock.h>
76
77 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
78 #include <sys/ipc.h>
79 #endif
80 #ifdef SYSVMSG
81 #include <sys/msg.h>
82 #endif
83 #ifdef SYSVSEM
84 #include <sys/sem.h>
85 #endif
86 #ifdef SYSVSHM
87 #include <sys/shm.h>
88 #endif
89
90 #include <dev/cons.h>
91
92 #if defined(DDB)
93 #include <ddb/ddbvar.h>
94 #endif
95
96 #define PTRTOINT64(foo) ((u_int64_t)(uintptr_t)(foo))
97
98 static int sysctl_file __P((void *, size_t *));
99 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
100 static int sysctl_sysvipc __P((int *, u_int, void *, size_t *));
101 #endif
102 static int sysctl_msgbuf __P((void *, size_t *));
103 static int sysctl_doeproc __P((int *, u_int, void *, size_t *));
104 #ifdef MULTIPROCESSOR
105 static int sysctl_docptime __P((void *, size_t *, void *));
106 static int sysctl_ncpus __P((void));
107 #endif
108 static void fill_kproc2 __P((struct proc *, struct kinfo_proc2 *));
109 static int sysctl_procargs __P((int *, u_int, void *, size_t *, struct proc *));
110 #if NPTY > 0
111 static int sysctl_pty __P((void *, size_t *, void *, size_t));
112 #endif
113
114 static struct lwp *proc_representative_lwp(struct proc *);
115
116 /*
117 * The `sysctl_memlock' is intended to keep too many processes from
118 * locking down memory by doing sysctls at once. Whether or not this
119 * is really a good idea to worry about it probably a subject of some
120 * debate.
121 */
122 struct lock sysctl_memlock;
123
124 void
125 sysctl_init(void)
126 {
127
128 lockinit(&sysctl_memlock, PRIBIO|PCATCH, "sysctl", 0, 0);
129 }
130
131 int
132 sys___sysctl(l, v, retval)
133 struct lwp *l;
134 void *v;
135 register_t *retval;
136 {
137 struct sys___sysctl_args /* {
138 syscallarg(int *) name;
139 syscallarg(u_int) namelen;
140 syscallarg(void *) old;
141 syscallarg(size_t *) oldlenp;
142 syscallarg(void *) new;
143 syscallarg(size_t) newlen;
144 } */ *uap = v;
145 struct proc *p = l->l_proc;
146 int error;
147 size_t savelen = 0, oldlen = 0;
148 sysctlfn *fn;
149 int name[CTL_MAXNAME];
150 size_t *oldlenp;
151
152 /*
153 * all top-level sysctl names are non-terminal
154 */
155 if (SCARG(uap, namelen) > CTL_MAXNAME || SCARG(uap, namelen) < 2)
156 return (EINVAL);
157 error = copyin(SCARG(uap, name), &name,
158 SCARG(uap, namelen) * sizeof(int));
159 if (error)
160 return (error);
161
162 /*
163 * For all but CTL_PROC, must be root to change a value.
164 * For CTL_PROC, must be root, or owner of the proc (and not suid),
165 * this is checked in proc_sysctl() (once we know the targer proc).
166 */
167 if (SCARG(uap, new) != NULL && name[0] != CTL_PROC &&
168 (error = suser(p->p_ucred, &p->p_acflag)))
169 return error;
170
171 switch (name[0]) {
172 case CTL_KERN:
173 fn = kern_sysctl;
174 break;
175 case CTL_HW:
176 fn = hw_sysctl;
177 break;
178 case CTL_VM:
179 fn = uvm_sysctl;
180 break;
181 case CTL_NET:
182 fn = net_sysctl;
183 break;
184 case CTL_VFS:
185 fn = vfs_sysctl;
186 break;
187 case CTL_MACHDEP:
188 fn = cpu_sysctl;
189 break;
190 #ifdef DEBUG
191 case CTL_DEBUG:
192 fn = debug_sysctl;
193 break;
194 #endif
195 #ifdef DDB
196 case CTL_DDB:
197 fn = ddb_sysctl;
198 break;
199 #endif
200 case CTL_PROC:
201 fn = proc_sysctl;
202 break;
203 default:
204 return (EOPNOTSUPP);
205 }
206
207 /*
208 * XXX Hey, we wire `old', but what about `new'?
209 */
210
211 oldlenp = SCARG(uap, oldlenp);
212 if (oldlenp) {
213 if ((error = copyin(oldlenp, &oldlen, sizeof(oldlen))))
214 return (error);
215 oldlenp = &oldlen;
216 }
217 if (SCARG(uap, old) != NULL) {
218 error = lockmgr(&sysctl_memlock, LK_EXCLUSIVE, NULL);
219 if (error)
220 return (error);
221 error = uvm_vslock(p, SCARG(uap, old), oldlen,
222 VM_PROT_READ|VM_PROT_WRITE);
223 if (error) {
224 (void) lockmgr(&sysctl_memlock, LK_RELEASE, NULL);
225 return error;
226 }
227 savelen = oldlen;
228 }
229 error = (*fn)(name + 1, SCARG(uap, namelen) - 1, SCARG(uap, old),
230 oldlenp, SCARG(uap, new), SCARG(uap, newlen), p);
231 if (SCARG(uap, old) != NULL) {
232 uvm_vsunlock(p, SCARG(uap, old), savelen);
233 (void) lockmgr(&sysctl_memlock, LK_RELEASE, NULL);
234 }
235 if (error)
236 return (error);
237 if (SCARG(uap, oldlenp))
238 error = copyout(&oldlen, SCARG(uap, oldlenp), sizeof(oldlen));
239 return (error);
240 }
241
242 /*
243 * Attributes stored in the kernel.
244 */
245 char hostname[MAXHOSTNAMELEN];
246 int hostnamelen;
247
248 char domainname[MAXHOSTNAMELEN];
249 int domainnamelen;
250
251 long hostid;
252
253 #ifdef INSECURE
254 int securelevel = -1;
255 #else
256 int securelevel = 0;
257 #endif
258
259 #ifndef DEFCORENAME
260 #define DEFCORENAME "%n.core"
261 #endif
262 char defcorename[MAXPATHLEN] = DEFCORENAME;
263 int defcorenamelen = sizeof(DEFCORENAME);
264
265 extern int kern_logsigexit;
266 extern fixpt_t ccpu;
267
268 #ifndef MULTIPROCESSOR
269 #define sysctl_ncpus() 1
270 #endif
271
272 #ifdef MULTIPROCESSOR
273
274 #ifndef CPU_INFO_FOREACH
275 #define CPU_INFO_ITERATOR int
276 #define CPU_INFO_FOREACH(cii, ci) cii = 0, ci = curcpu(); ci != NULL; ci = NULL
277 #endif
278
279 static int
280 sysctl_docptime(oldp, oldlenp, newp)
281 void *oldp;
282 size_t *oldlenp;
283 void *newp;
284 {
285 u_int64_t cp_time[CPUSTATES];
286 int i;
287 struct cpu_info *ci;
288 CPU_INFO_ITERATOR cii;
289
290 for (i=0; i<CPUSTATES; i++)
291 cp_time[i] = 0;
292
293 for (CPU_INFO_FOREACH(cii, ci)) {
294 for (i=0; i<CPUSTATES; i++)
295 cp_time[i] += ci->ci_schedstate.spc_cp_time[i];
296 }
297 return (sysctl_rdstruct(oldp, oldlenp, newp,
298 cp_time, sizeof(cp_time)));
299 }
300
301 static int
302 sysctl_ncpus(void)
303 {
304 struct cpu_info *ci;
305 CPU_INFO_ITERATOR cii;
306
307 int ncpus = 0;
308 for (CPU_INFO_FOREACH(cii, ci))
309 ncpus++;
310 return ncpus;
311 }
312
313 #endif
314
315 /*
316 * kernel related system variables.
317 */
318 int
319 kern_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
320 int *name;
321 u_int namelen;
322 void *oldp;
323 size_t *oldlenp;
324 void *newp;
325 size_t newlen;
326 struct proc *p;
327 {
328 int error, level, inthostid;
329 int old_autonicetime;
330 int old_vnodes;
331 dev_t consdev;
332
333 /* All sysctl names at this level, except for a few, are terminal. */
334 switch (name[0]) {
335 case KERN_PROC:
336 case KERN_PROC2:
337 case KERN_PROF:
338 case KERN_MBUF:
339 case KERN_PROC_ARGS:
340 case KERN_SYSVIPC_INFO:
341 /* Not terminal. */
342 break;
343 default:
344 if (namelen != 1)
345 return (ENOTDIR); /* overloaded */
346 }
347
348 switch (name[0]) {
349 case KERN_OSTYPE:
350 return (sysctl_rdstring(oldp, oldlenp, newp, ostype));
351 case KERN_OSRELEASE:
352 return (sysctl_rdstring(oldp, oldlenp, newp, osrelease));
353 case KERN_OSREV:
354 return (sysctl_rdint(oldp, oldlenp, newp, __NetBSD_Version__));
355 case KERN_VERSION:
356 return (sysctl_rdstring(oldp, oldlenp, newp, version));
357 case KERN_MAXVNODES:
358 old_vnodes = desiredvnodes;
359 error = sysctl_int(oldp, oldlenp, newp, newlen, &desiredvnodes);
360 if (old_vnodes > desiredvnodes) {
361 desiredvnodes = old_vnodes;
362 return (EINVAL);
363 }
364 return (error);
365 case KERN_MAXPROC:
366 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxproc));
367 case KERN_MAXFILES:
368 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxfiles));
369 case KERN_ARGMAX:
370 return (sysctl_rdint(oldp, oldlenp, newp, ARG_MAX));
371 case KERN_SECURELVL:
372 level = securelevel;
373 if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &level)) ||
374 newp == NULL)
375 return (error);
376 if (level < securelevel && p->p_pid != 1)
377 return (EPERM);
378 securelevel = level;
379 return (0);
380 case KERN_HOSTNAME:
381 error = sysctl_string(oldp, oldlenp, newp, newlen,
382 hostname, sizeof(hostname));
383 if (newp && !error)
384 hostnamelen = newlen;
385 return (error);
386 case KERN_DOMAINNAME:
387 error = sysctl_string(oldp, oldlenp, newp, newlen,
388 domainname, sizeof(domainname));
389 if (newp && !error)
390 domainnamelen = newlen;
391 return (error);
392 case KERN_HOSTID:
393 inthostid = hostid; /* XXX assumes sizeof long <= sizeof int */
394 error = sysctl_int(oldp, oldlenp, newp, newlen, &inthostid);
395 hostid = inthostid;
396 return (error);
397 case KERN_CLOCKRATE:
398 return (sysctl_clockrate(oldp, oldlenp));
399 case KERN_BOOTTIME:
400 return (sysctl_rdstruct(oldp, oldlenp, newp, &boottime,
401 sizeof(struct timeval)));
402 case KERN_VNODE:
403 return (sysctl_vnode(oldp, oldlenp, p));
404 case KERN_PROC:
405 case KERN_PROC2:
406 return (sysctl_doeproc(name, namelen, oldp, oldlenp));
407 case KERN_PROC_ARGS:
408 return (sysctl_procargs(name + 1, namelen - 1,
409 oldp, oldlenp, p));
410 case KERN_FILE:
411 return (sysctl_file(oldp, oldlenp));
412 #ifdef GPROF
413 case KERN_PROF:
414 return (sysctl_doprof(name + 1, namelen - 1, oldp, oldlenp,
415 newp, newlen));
416 #endif
417 case KERN_POSIX1:
418 return (sysctl_rdint(oldp, oldlenp, newp, _POSIX_VERSION));
419 case KERN_NGROUPS:
420 return (sysctl_rdint(oldp, oldlenp, newp, NGROUPS_MAX));
421 case KERN_JOB_CONTROL:
422 return (sysctl_rdint(oldp, oldlenp, newp, 1));
423 case KERN_SAVED_IDS:
424 #ifdef _POSIX_SAVED_IDS
425 return (sysctl_rdint(oldp, oldlenp, newp, 1));
426 #else
427 return (sysctl_rdint(oldp, oldlenp, newp, 0));
428 #endif
429 case KERN_MAXPARTITIONS:
430 return (sysctl_rdint(oldp, oldlenp, newp, MAXPARTITIONS));
431 case KERN_RAWPARTITION:
432 return (sysctl_rdint(oldp, oldlenp, newp, RAW_PART));
433 #ifdef NTP
434 case KERN_NTPTIME:
435 return (sysctl_ntptime(oldp, oldlenp));
436 #endif
437 case KERN_AUTONICETIME:
438 old_autonicetime = autonicetime;
439 error = sysctl_int(oldp, oldlenp, newp, newlen, &autonicetime);
440 if (autonicetime < 0)
441 autonicetime = old_autonicetime;
442 return (error);
443 case KERN_AUTONICEVAL:
444 error = sysctl_int(oldp, oldlenp, newp, newlen, &autoniceval);
445 if (autoniceval < PRIO_MIN)
446 autoniceval = PRIO_MIN;
447 if (autoniceval > PRIO_MAX)
448 autoniceval = PRIO_MAX;
449 return (error);
450 case KERN_RTC_OFFSET:
451 return (sysctl_rdint(oldp, oldlenp, newp, rtc_offset));
452 case KERN_ROOT_DEVICE:
453 return (sysctl_rdstring(oldp, oldlenp, newp,
454 root_device->dv_xname));
455 case KERN_MSGBUFSIZE:
456 /*
457 * deal with cases where the message buffer has
458 * become corrupted.
459 */
460 if (!msgbufenabled || msgbufp->msg_magic != MSG_MAGIC) {
461 msgbufenabled = 0;
462 return (ENXIO);
463 }
464 return (sysctl_rdint(oldp, oldlenp, newp, msgbufp->msg_bufs));
465 case KERN_FSYNC:
466 return (sysctl_rdint(oldp, oldlenp, newp, 1));
467 case KERN_SYSVMSG:
468 #ifdef SYSVMSG
469 return (sysctl_rdint(oldp, oldlenp, newp, 1));
470 #else
471 return (sysctl_rdint(oldp, oldlenp, newp, 0));
472 #endif
473 case KERN_SYSVSEM:
474 #ifdef SYSVSEM
475 return (sysctl_rdint(oldp, oldlenp, newp, 1));
476 #else
477 return (sysctl_rdint(oldp, oldlenp, newp, 0));
478 #endif
479 case KERN_SYSVSHM:
480 #ifdef SYSVSHM
481 return (sysctl_rdint(oldp, oldlenp, newp, 1));
482 #else
483 return (sysctl_rdint(oldp, oldlenp, newp, 0));
484 #endif
485 case KERN_DEFCORENAME:
486 if (newp && newlen < 1)
487 return (EINVAL);
488 error = sysctl_string(oldp, oldlenp, newp, newlen,
489 defcorename, sizeof(defcorename));
490 if (newp && !error)
491 defcorenamelen = newlen;
492 return (error);
493 case KERN_SYNCHRONIZED_IO:
494 return (sysctl_rdint(oldp, oldlenp, newp, 1));
495 case KERN_IOV_MAX:
496 return (sysctl_rdint(oldp, oldlenp, newp, IOV_MAX));
497 case KERN_MBUF:
498 return (sysctl_dombuf(name + 1, namelen - 1, oldp, oldlenp,
499 newp, newlen));
500 case KERN_MAPPED_FILES:
501 return (sysctl_rdint(oldp, oldlenp, newp, 1));
502 case KERN_MEMLOCK:
503 return (sysctl_rdint(oldp, oldlenp, newp, 1));
504 case KERN_MEMLOCK_RANGE:
505 return (sysctl_rdint(oldp, oldlenp, newp, 1));
506 case KERN_MEMORY_PROTECTION:
507 return (sysctl_rdint(oldp, oldlenp, newp, 1));
508 case KERN_LOGIN_NAME_MAX:
509 return (sysctl_rdint(oldp, oldlenp, newp, LOGIN_NAME_MAX));
510 case KERN_LOGSIGEXIT:
511 return (sysctl_int(oldp, oldlenp, newp, newlen,
512 &kern_logsigexit));
513 case KERN_FSCALE:
514 return (sysctl_rdint(oldp, oldlenp, newp, FSCALE));
515 case KERN_CCPU:
516 return (sysctl_rdint(oldp, oldlenp, newp, ccpu));
517 case KERN_CP_TIME:
518 #ifndef MULTIPROCESSOR
519 return (sysctl_rdstruct(oldp, oldlenp, newp,
520 curcpu()->ci_schedstate.spc_cp_time,
521 sizeof(curcpu()->ci_schedstate.spc_cp_time)));
522 #else
523 return (sysctl_docptime(oldp, oldlenp, newp));
524 #endif
525 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
526 case KERN_SYSVIPC_INFO:
527 return (sysctl_sysvipc(name + 1, namelen - 1, oldp, oldlenp));
528 #endif
529 case KERN_MSGBUF:
530 return (sysctl_msgbuf(oldp, oldlenp));
531 case KERN_CONSDEV:
532 if (cn_tab != NULL)
533 consdev = cn_tab->cn_dev;
534 else
535 consdev = NODEV;
536 return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
537 sizeof consdev));
538 #if NPTY > 0
539 case KERN_MAXPTYS:
540 return sysctl_pty(oldp, oldlenp, newp, newlen);
541 #endif
542 default:
543 return (EOPNOTSUPP);
544 }
545 /* NOTREACHED */
546 }
547
548 /*
549 * hardware related system variables.
550 */
551 int
552 hw_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
553 int *name;
554 u_int namelen;
555 void *oldp;
556 size_t *oldlenp;
557 void *newp;
558 size_t newlen;
559 struct proc *p;
560 {
561
562 /* all sysctl names at this level are terminal */
563 if (namelen != 1)
564 return (ENOTDIR); /* overloaded */
565
566 switch (name[0]) {
567 case HW_MACHINE:
568 return (sysctl_rdstring(oldp, oldlenp, newp, machine));
569 case HW_MACHINE_ARCH:
570 return (sysctl_rdstring(oldp, oldlenp, newp, machine_arch));
571 case HW_MODEL:
572 return (sysctl_rdstring(oldp, oldlenp, newp, cpu_model));
573 case HW_NCPU:
574 return (sysctl_rdint(oldp, oldlenp, newp, sysctl_ncpus()));
575 case HW_BYTEORDER:
576 return (sysctl_rdint(oldp, oldlenp, newp, BYTE_ORDER));
577 case HW_PHYSMEM:
578 return (sysctl_rdint(oldp, oldlenp, newp, ctob(physmem)));
579 case HW_USERMEM:
580 return (sysctl_rdint(oldp, oldlenp, newp,
581 ctob(physmem - uvmexp.wired)));
582 case HW_PAGESIZE:
583 return (sysctl_rdint(oldp, oldlenp, newp, PAGE_SIZE));
584 case HW_ALIGNBYTES:
585 return (sysctl_rdint(oldp, oldlenp, newp, ALIGNBYTES));
586 case HW_CNMAGIC: {
587 char magic[CNS_LEN];
588 int error;
589
590 if (oldp)
591 cn_get_magic(magic, CNS_LEN);
592 error = sysctl_string(oldp, oldlenp, newp, newlen,
593 magic, sizeof(magic));
594 if (newp && !error) {
595 error = cn_set_magic(magic);
596 }
597 return (error);
598 }
599 default:
600 return (EOPNOTSUPP);
601 }
602 /* NOTREACHED */
603 }
604
605 #ifdef DEBUG
606 /*
607 * Debugging related system variables.
608 */
609 struct ctldebug debug0, debug1, debug2, debug3, debug4;
610 struct ctldebug debug5, debug6, debug7, debug8, debug9;
611 struct ctldebug debug10, debug11, debug12, debug13, debug14;
612 struct ctldebug debug15, debug16, debug17, debug18, debug19;
613 static struct ctldebug *debugvars[CTL_DEBUG_MAXID] = {
614 &debug0, &debug1, &debug2, &debug3, &debug4,
615 &debug5, &debug6, &debug7, &debug8, &debug9,
616 &debug10, &debug11, &debug12, &debug13, &debug14,
617 &debug15, &debug16, &debug17, &debug18, &debug19,
618 };
619 int
620 debug_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
621 int *name;
622 u_int namelen;
623 void *oldp;
624 size_t *oldlenp;
625 void *newp;
626 size_t newlen;
627 struct proc *p;
628 {
629 struct ctldebug *cdp;
630
631 /* all sysctl names at this level are name and field */
632 if (namelen != 2)
633 return (ENOTDIR); /* overloaded */
634 cdp = debugvars[name[0]];
635 if (name[0] >= CTL_DEBUG_MAXID || cdp->debugname == 0)
636 return (EOPNOTSUPP);
637 switch (name[1]) {
638 case CTL_DEBUG_NAME:
639 return (sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname));
640 case CTL_DEBUG_VALUE:
641 return (sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar));
642 default:
643 return (EOPNOTSUPP);
644 }
645 /* NOTREACHED */
646 }
647 #endif /* DEBUG */
648
649 int
650 proc_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
651 int *name;
652 u_int namelen;
653 void *oldp;
654 size_t *oldlenp;
655 void *newp;
656 size_t newlen;
657 struct proc *p;
658 {
659 struct proc *ptmp = NULL;
660 const struct proclist_desc *pd;
661 int error = 0;
662 struct rlimit alim;
663 struct plimit *newplim;
664 char *tmps = NULL;
665 int i, curlen, len;
666
667 if (namelen < 2)
668 return EINVAL;
669
670 if (name[0] == PROC_CURPROC) {
671 ptmp = p;
672 } else {
673 proclist_lock_read();
674 for (pd = proclists; pd->pd_list != NULL; pd++) {
675 for (ptmp = LIST_FIRST(pd->pd_list); ptmp != NULL;
676 ptmp = LIST_NEXT(ptmp, p_list)) {
677 /* Skip embryonic processes. */
678 if (ptmp->p_stat == SIDL)
679 continue;
680 if (ptmp->p_pid == (pid_t)name[0])
681 break;
682 }
683 if (ptmp != NULL)
684 break;
685 }
686 proclist_unlock_read();
687 if (ptmp == NULL)
688 return(ESRCH);
689 if (p->p_ucred->cr_uid != 0) {
690 if(p->p_cred->p_ruid != ptmp->p_cred->p_ruid ||
691 p->p_cred->p_ruid != ptmp->p_cred->p_svuid)
692 return EPERM;
693 if (ptmp->p_cred->p_rgid != ptmp->p_cred->p_svgid)
694 return EPERM; /* sgid proc */
695 for (i = 0; i < p->p_ucred->cr_ngroups; i++) {
696 if (p->p_ucred->cr_groups[i] ==
697 ptmp->p_cred->p_rgid)
698 break;
699 }
700 if (i == p->p_ucred->cr_ngroups)
701 return EPERM;
702 }
703 }
704 if (name[1] == PROC_PID_CORENAME) {
705 if (namelen != 2)
706 return EINVAL;
707 /*
708 * Can't use sysctl_string() here because we may malloc a new
709 * area during the process, so we have to do it by hand.
710 */
711 curlen = strlen(ptmp->p_limit->pl_corename) + 1;
712 if (oldlenp && *oldlenp < curlen) {
713 if (!oldp)
714 *oldlenp = curlen;
715 return (ENOMEM);
716 }
717 if (newp) {
718 if (securelevel > 2)
719 return EPERM;
720 if (newlen > MAXPATHLEN)
721 return ENAMETOOLONG;
722 tmps = malloc(newlen + 1, M_TEMP, M_WAITOK);
723 if (tmps == NULL)
724 return ENOMEM;
725 error = copyin(newp, tmps, newlen + 1);
726 tmps[newlen] = '\0';
727 if (error)
728 goto cleanup;
729 /* Enforce to be either 'core' for end with '.core' */
730 if (newlen < 4) { /* c.o.r.e */
731 error = EINVAL;
732 goto cleanup;
733 }
734 len = newlen - 4;
735 if (len > 0) {
736 if (tmps[len - 1] != '.' &&
737 tmps[len - 1] != '/') {
738 error = EINVAL;
739 goto cleanup;
740 }
741 }
742 if (strcmp(&tmps[len], "core") != 0) {
743 error = EINVAL;
744 goto cleanup;
745 }
746 }
747 if (oldp && oldlenp) {
748 *oldlenp = curlen;
749 error = copyout(ptmp->p_limit->pl_corename, oldp,
750 curlen);
751 }
752 if (newp && error == 0) {
753 /* if the 2 strings are identical, don't limcopy() */
754 if (strcmp(tmps, ptmp->p_limit->pl_corename) == 0) {
755 error = 0;
756 goto cleanup;
757 }
758 if (ptmp->p_limit->p_refcnt > 1 &&
759 (ptmp->p_limit->p_lflags & PL_SHAREMOD) == 0) {
760 newplim = limcopy(ptmp->p_limit);
761 limfree(ptmp->p_limit);
762 ptmp->p_limit = newplim;
763 } else if (ptmp->p_limit->pl_corename != defcorename) {
764 free(ptmp->p_limit->pl_corename, M_TEMP);
765 }
766 ptmp->p_limit->pl_corename = tmps;
767 return (0);
768 }
769 cleanup:
770 if (tmps)
771 free(tmps, M_TEMP);
772 return (error);
773 }
774 if (name[1] == PROC_PID_LIMIT) {
775 if (namelen != 4 || name[2] >= PROC_PID_LIMIT_MAXID)
776 return EINVAL;
777 memcpy(&alim, &ptmp->p_rlimit[name[2] - 1], sizeof(alim));
778 if (name[3] == PROC_PID_LIMIT_TYPE_HARD)
779 error = sysctl_quad(oldp, oldlenp, newp, newlen,
780 &alim.rlim_max);
781 else if (name[3] == PROC_PID_LIMIT_TYPE_SOFT)
782 error = sysctl_quad(oldp, oldlenp, newp, newlen,
783 &alim.rlim_cur);
784 else
785 error = EINVAL;
786
787 if (error)
788 return error;
789
790 if (newp)
791 error = dosetrlimit(ptmp, p->p_cred,
792 name[2] - 1, &alim);
793 return error;
794 }
795 return (EINVAL);
796 }
797
798 /*
799 * Convenience macros.
800 */
801
802 #define SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, valp, len) \
803 if (oldlenp) { \
804 if (!oldp) \
805 *oldlenp = len; \
806 else { \
807 if (*oldlenp < len) \
808 return(ENOMEM); \
809 *oldlenp = len; \
810 error = copyout((caddr_t)valp, oldp, len); \
811 } \
812 }
813
814 #define SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, valp, typ) \
815 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, valp, sizeof(typ))
816
817 #define SYSCTL_SCALAR_NEWPCHECK_LEN(newp, newlen, len) \
818 if (newp && newlen != len) \
819 return (EINVAL);
820
821 #define SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, typ) \
822 SYSCTL_SCALAR_NEWPCHECK_LEN(newp, newlen, sizeof(typ))
823
824 #define SYSCTL_SCALAR_NEWPCOP_LEN(newp, valp, len) \
825 if (error == 0 && newp) \
826 error = copyin(newp, valp, len);
827
828 #define SYSCTL_SCALAR_NEWPCOP_TYP(newp, valp, typ) \
829 SYSCTL_SCALAR_NEWPCOP_LEN(newp, valp, sizeof(typ))
830
831 #define SYSCTL_STRING_CORE(oldp, oldlenp, str) \
832 if (oldlenp) { \
833 len = strlen(str) + 1; \
834 if (!oldp) \
835 *oldlenp = len; \
836 else { \
837 if (*oldlenp < len) { \
838 err2 = ENOMEM; \
839 len = *oldlenp; \
840 } else \
841 *oldlenp = len; \
842 error = copyout(str, oldp, len);\
843 if (error == 0) \
844 error = err2; \
845 } \
846 }
847
848 /*
849 * Validate parameters and get old / set new parameters
850 * for an integer-valued sysctl function.
851 */
852 int
853 sysctl_int(oldp, oldlenp, newp, newlen, valp)
854 void *oldp;
855 size_t *oldlenp;
856 void *newp;
857 size_t newlen;
858 int *valp;
859 {
860 int error = 0;
861
862 SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, int)
863 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, valp, int)
864 SYSCTL_SCALAR_NEWPCOP_TYP(newp, valp, int)
865
866 return (error);
867 }
868
869
870 /*
871 * As above, but read-only.
872 */
873 int
874 sysctl_rdint(oldp, oldlenp, newp, val)
875 void *oldp;
876 size_t *oldlenp;
877 void *newp;
878 int val;
879 {
880 int error = 0;
881
882 if (newp)
883 return (EPERM);
884
885 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, &val, int)
886
887 return (error);
888 }
889
890 /*
891 * Validate parameters and get old / set new parameters
892 * for an quad-valued sysctl function.
893 */
894 int
895 sysctl_quad(oldp, oldlenp, newp, newlen, valp)
896 void *oldp;
897 size_t *oldlenp;
898 void *newp;
899 size_t newlen;
900 quad_t *valp;
901 {
902 int error = 0;
903
904 SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, quad_t)
905 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, valp, quad_t)
906 SYSCTL_SCALAR_NEWPCOP_TYP(newp, valp, quad_t)
907
908 return (error);
909 }
910
911 /*
912 * As above, but read-only.
913 */
914 int
915 sysctl_rdquad(oldp, oldlenp, newp, val)
916 void *oldp;
917 size_t *oldlenp;
918 void *newp;
919 quad_t val;
920 {
921 int error = 0;
922
923 if (newp)
924 return (EPERM);
925
926 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, &val, quad_t)
927
928 return (error);
929 }
930
931 /*
932 * Validate parameters and get old / set new parameters
933 * for a string-valued sysctl function.
934 */
935 int
936 sysctl_string(oldp, oldlenp, newp, newlen, str, maxlen)
937 void *oldp;
938 size_t *oldlenp;
939 void *newp;
940 size_t newlen;
941 char *str;
942 int maxlen;
943 {
944 int len, error = 0, err2 = 0;
945
946 if (newp && newlen >= maxlen)
947 return (EINVAL);
948
949 SYSCTL_STRING_CORE(oldp, oldlenp, str);
950
951 if (error == 0 && newp) {
952 error = copyin(newp, str, newlen);
953 str[newlen] = 0;
954 }
955 return (error);
956 }
957
958 /*
959 * As above, but read-only.
960 */
961 int
962 sysctl_rdstring(oldp, oldlenp, newp, str)
963 void *oldp;
964 size_t *oldlenp;
965 void *newp;
966 const char *str;
967 {
968 int len, error = 0, err2 = 0;
969
970 if (newp)
971 return (EPERM);
972
973 SYSCTL_STRING_CORE(oldp, oldlenp, str);
974
975 return (error);
976 }
977
978 /*
979 * Validate parameters and get old / set new parameters
980 * for a structure oriented sysctl function.
981 */
982 int
983 sysctl_struct(oldp, oldlenp, newp, newlen, sp, len)
984 void *oldp;
985 size_t *oldlenp;
986 void *newp;
987 size_t newlen;
988 void *sp;
989 int len;
990 {
991 int error = 0;
992
993 SYSCTL_SCALAR_NEWPCHECK_LEN(newp, newlen, len)
994 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, sp, len)
995 SYSCTL_SCALAR_NEWPCOP_LEN(newp, sp, len)
996
997 return (error);
998 }
999
1000 /*
1001 * Validate parameters and get old parameters
1002 * for a structure oriented sysctl function.
1003 */
1004 int
1005 sysctl_rdstruct(oldp, oldlenp, newp, sp, len)
1006 void *oldp;
1007 size_t *oldlenp;
1008 void *newp;
1009 const void *sp;
1010 int len;
1011 {
1012 int error = 0;
1013
1014 if (newp)
1015 return (EPERM);
1016
1017 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, sp, len)
1018
1019 return (error);
1020 }
1021
1022 /*
1023 * As above, but can return a truncated result.
1024 */
1025 int
1026 sysctl_rdminstruct(oldp, oldlenp, newp, sp, len)
1027 void *oldp;
1028 size_t *oldlenp;
1029 void *newp;
1030 const void *sp;
1031 int len;
1032 {
1033 int error = 0;
1034
1035 if (newp)
1036 return (EPERM);
1037
1038 len = min(*oldlenp, len);
1039 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, sp, len)
1040
1041 return (error);
1042 }
1043
1044 /*
1045 * Get file structures.
1046 */
1047 static int
1048 sysctl_file(vwhere, sizep)
1049 void *vwhere;
1050 size_t *sizep;
1051 {
1052 int buflen, error;
1053 struct file *fp;
1054 char *start, *where;
1055
1056 start = where = vwhere;
1057 buflen = *sizep;
1058 if (where == NULL) {
1059 /*
1060 * overestimate by 10 files
1061 */
1062 *sizep = sizeof(filehead) + (nfiles + 10) * sizeof(struct file);
1063 return (0);
1064 }
1065
1066 /*
1067 * first copyout filehead
1068 */
1069 if (buflen < sizeof(filehead)) {
1070 *sizep = 0;
1071 return (0);
1072 }
1073 error = copyout((caddr_t)&filehead, where, sizeof(filehead));
1074 if (error)
1075 return (error);
1076 buflen -= sizeof(filehead);
1077 where += sizeof(filehead);
1078
1079 /*
1080 * followed by an array of file structures
1081 */
1082 for (fp = filehead.lh_first; fp != 0; fp = fp->f_list.le_next) {
1083 if (buflen < sizeof(struct file)) {
1084 *sizep = where - start;
1085 return (ENOMEM);
1086 }
1087 error = copyout((caddr_t)fp, where, sizeof(struct file));
1088 if (error)
1089 return (error);
1090 buflen -= sizeof(struct file);
1091 where += sizeof(struct file);
1092 }
1093 *sizep = where - start;
1094 return (0);
1095 }
1096
1097 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
1098 #define FILL_PERM(src, dst) do { \
1099 (dst)._key = (src)._key; \
1100 (dst).uid = (src).uid; \
1101 (dst).gid = (src).gid; \
1102 (dst).cuid = (src).cuid; \
1103 (dst).cgid = (src).cgid; \
1104 (dst).mode = (src).mode; \
1105 (dst)._seq = (src)._seq; \
1106 } while (0);
1107 #define FILL_MSG(src, dst) do { \
1108 FILL_PERM((src).msg_perm, (dst).msg_perm); \
1109 (dst).msg_qnum = (src).msg_qnum; \
1110 (dst).msg_qbytes = (src).msg_qbytes; \
1111 (dst)._msg_cbytes = (src)._msg_cbytes; \
1112 (dst).msg_lspid = (src).msg_lspid; \
1113 (dst).msg_lrpid = (src).msg_lrpid; \
1114 (dst).msg_stime = (src).msg_stime; \
1115 (dst).msg_rtime = (src).msg_rtime; \
1116 (dst).msg_ctime = (src).msg_ctime; \
1117 } while (0)
1118 #define FILL_SEM(src, dst) do { \
1119 FILL_PERM((src).sem_perm, (dst).sem_perm); \
1120 (dst).sem_nsems = (src).sem_nsems; \
1121 (dst).sem_otime = (src).sem_otime; \
1122 (dst).sem_ctime = (src).sem_ctime; \
1123 } while (0)
1124 #define FILL_SHM(src, dst) do { \
1125 FILL_PERM((src).shm_perm, (dst).shm_perm); \
1126 (dst).shm_segsz = (src).shm_segsz; \
1127 (dst).shm_lpid = (src).shm_lpid; \
1128 (dst).shm_cpid = (src).shm_cpid; \
1129 (dst).shm_atime = (src).shm_atime; \
1130 (dst).shm_dtime = (src).shm_dtime; \
1131 (dst).shm_ctime = (src).shm_ctime; \
1132 (dst).shm_nattch = (src).shm_nattch; \
1133 } while (0)
1134
1135 static int
1136 sysctl_sysvipc(name, namelen, where, sizep)
1137 int *name;
1138 u_int namelen;
1139 void *where;
1140 size_t *sizep;
1141 {
1142 #ifdef SYSVMSG
1143 struct msg_sysctl_info *msgsi;
1144 #endif
1145 #ifdef SYSVSEM
1146 struct sem_sysctl_info *semsi;
1147 #endif
1148 #ifdef SYSVSHM
1149 struct shm_sysctl_info *shmsi;
1150 #endif
1151 size_t infosize, dssize, tsize, buflen;
1152 void *buf = NULL, *buf2;
1153 char *start;
1154 int32_t nds;
1155 int i, error, ret;
1156
1157 if (namelen != 1)
1158 return (EINVAL);
1159
1160 start = where;
1161 buflen = *sizep;
1162
1163 switch (*name) {
1164 case KERN_SYSVIPC_MSG_INFO:
1165 #ifdef SYSVMSG
1166 infosize = sizeof(msgsi->msginfo);
1167 nds = msginfo.msgmni;
1168 dssize = sizeof(msgsi->msgids[0]);
1169 break;
1170 #else
1171 return (EINVAL);
1172 #endif
1173 case KERN_SYSVIPC_SEM_INFO:
1174 #ifdef SYSVSEM
1175 infosize = sizeof(semsi->seminfo);
1176 nds = seminfo.semmni;
1177 dssize = sizeof(semsi->semids[0]);
1178 break;
1179 #else
1180 return (EINVAL);
1181 #endif
1182 case KERN_SYSVIPC_SHM_INFO:
1183 #ifdef SYSVSHM
1184 infosize = sizeof(shmsi->shminfo);
1185 nds = shminfo.shmmni;
1186 dssize = sizeof(shmsi->shmids[0]);
1187 break;
1188 #else
1189 return (EINVAL);
1190 #endif
1191 default:
1192 return (EINVAL);
1193 }
1194 /*
1195 * Round infosize to 64 bit boundary if requesting more than just
1196 * the info structure or getting the total data size.
1197 */
1198 if (where == NULL || *sizep > infosize)
1199 infosize = ((infosize + 7) / 8) * 8;
1200 tsize = infosize + nds * dssize;
1201
1202 /* Return just the total size required. */
1203 if (where == NULL) {
1204 *sizep = tsize;
1205 return (0);
1206 }
1207
1208 /* Not enough room for even the info struct. */
1209 if (buflen < infosize) {
1210 *sizep = 0;
1211 return (ENOMEM);
1212 }
1213 buf = malloc(min(tsize, buflen), M_TEMP, M_WAITOK);
1214 memset(buf, 0, min(tsize, buflen));
1215
1216 switch (*name) {
1217 #ifdef SYSVMSG
1218 case KERN_SYSVIPC_MSG_INFO:
1219 msgsi = (struct msg_sysctl_info *)buf;
1220 buf2 = &msgsi->msgids[0];
1221 msgsi->msginfo = msginfo;
1222 break;
1223 #endif
1224 #ifdef SYSVSEM
1225 case KERN_SYSVIPC_SEM_INFO:
1226 semsi = (struct sem_sysctl_info *)buf;
1227 buf2 = &semsi->semids[0];
1228 semsi->seminfo = seminfo;
1229 break;
1230 #endif
1231 #ifdef SYSVSHM
1232 case KERN_SYSVIPC_SHM_INFO:
1233 shmsi = (struct shm_sysctl_info *)buf;
1234 buf2 = &shmsi->shmids[0];
1235 shmsi->shminfo = shminfo;
1236 break;
1237 #endif
1238 }
1239 buflen -= infosize;
1240
1241 ret = 0;
1242 if (buflen > 0) {
1243 /* Fill in the IPC data structures. */
1244 for (i = 0; i < nds; i++) {
1245 if (buflen < dssize) {
1246 ret = ENOMEM;
1247 break;
1248 }
1249 switch (*name) {
1250 #ifdef SYSVMSG
1251 case KERN_SYSVIPC_MSG_INFO:
1252 FILL_MSG(msqids[i], msgsi->msgids[i]);
1253 break;
1254 #endif
1255 #ifdef SYSVSEM
1256 case KERN_SYSVIPC_SEM_INFO:
1257 FILL_SEM(sema[i], semsi->semids[i]);
1258 break;
1259 #endif
1260 #ifdef SYSVSHM
1261 case KERN_SYSVIPC_SHM_INFO:
1262 FILL_SHM(shmsegs[i], shmsi->shmids[i]);
1263 break;
1264 #endif
1265 }
1266 buflen -= dssize;
1267 }
1268 }
1269 *sizep -= buflen;
1270 error = copyout(buf, start, *sizep);
1271 /* If copyout succeeded, use return code set earlier. */
1272 if (error == 0)
1273 error = ret;
1274 if (buf)
1275 free(buf, M_TEMP);
1276 return (error);
1277 }
1278 #endif /* SYSVMSG || SYSVSEM || SYSVSHM */
1279
1280 static int
1281 sysctl_msgbuf(vwhere, sizep)
1282 void *vwhere;
1283 size_t *sizep;
1284 {
1285 char *where = vwhere;
1286 size_t len, maxlen = *sizep;
1287 long pos;
1288 int error;
1289
1290 /*
1291 * deal with cases where the message buffer has
1292 * become corrupted.
1293 */
1294 if (!msgbufenabled || msgbufp->msg_magic != MSG_MAGIC) {
1295 msgbufenabled = 0;
1296 return (ENXIO);
1297 }
1298
1299 if (where == NULL) {
1300 /* always return full buffer size */
1301 *sizep = msgbufp->msg_bufs;
1302 return (0);
1303 }
1304
1305 error = 0;
1306 maxlen = min(msgbufp->msg_bufs, maxlen);
1307 pos = msgbufp->msg_bufx;
1308 while (maxlen > 0) {
1309 len = pos == 0 ? msgbufp->msg_bufx : msgbufp->msg_bufs - msgbufp->msg_bufx;
1310 len = min(len, maxlen);
1311 if (len == 0)
1312 break;
1313 error = copyout(&msgbufp->msg_bufc[pos], where, len);
1314 if (error)
1315 break;
1316 where += len;
1317 maxlen -= len;
1318 pos = 0;
1319 }
1320 return (error);
1321 }
1322
1323 /*
1324 * try over estimating by 5 procs
1325 */
1326 #define KERN_PROCSLOP (5 * sizeof(struct kinfo_proc))
1327
1328 static int
1329 sysctl_doeproc(name, namelen, vwhere, sizep)
1330 int *name;
1331 u_int namelen;
1332 void *vwhere;
1333 size_t *sizep;
1334 {
1335 struct eproc eproc;
1336 struct kinfo_proc2 kproc2;
1337 struct kinfo_proc *dp;
1338 struct proc *p;
1339 const struct proclist_desc *pd;
1340 char *where, *dp2;
1341 int type, op, arg, elem_size, elem_count;
1342 int buflen, needed, error;
1343
1344 dp = vwhere;
1345 dp2 = where = vwhere;
1346 buflen = where != NULL ? *sizep : 0;
1347 error = needed = 0;
1348 type = name[0];
1349
1350 if (type == KERN_PROC) {
1351 if (namelen != 3 && !(namelen == 2 && name[1] == KERN_PROC_ALL))
1352 return (EINVAL);
1353 op = name[1];
1354 if (op != KERN_PROC_ALL)
1355 arg = name[2];
1356 } else {
1357 if (namelen != 5)
1358 return (EINVAL);
1359 op = name[1];
1360 arg = name[2];
1361 elem_size = name[3];
1362 elem_count = name[4];
1363 }
1364
1365 proclist_lock_read();
1366
1367 pd = proclists;
1368 again:
1369 for (p = LIST_FIRST(pd->pd_list); p != NULL; p = LIST_NEXT(p, p_list)) {
1370 /*
1371 * Skip embryonic processes.
1372 */
1373 if (p->p_stat == SIDL)
1374 continue;
1375 /*
1376 * TODO - make more efficient (see notes below).
1377 * do by session.
1378 */
1379 switch (op) {
1380
1381 case KERN_PROC_PID:
1382 /* could do this with just a lookup */
1383 if (p->p_pid != (pid_t)arg)
1384 continue;
1385 break;
1386
1387 case KERN_PROC_PGRP:
1388 /* could do this by traversing pgrp */
1389 if (p->p_pgrp->pg_id != (pid_t)arg)
1390 continue;
1391 break;
1392
1393 case KERN_PROC_SESSION:
1394 if (p->p_session->s_sid != (pid_t)arg)
1395 continue;
1396 break;
1397
1398 case KERN_PROC_TTY:
1399 if (arg == KERN_PROC_TTY_REVOKE) {
1400 if ((p->p_flag & P_CONTROLT) == 0 ||
1401 p->p_session->s_ttyp == NULL ||
1402 p->p_session->s_ttyvp != NULL)
1403 continue;
1404 } else if ((p->p_flag & P_CONTROLT) == 0 ||
1405 p->p_session->s_ttyp == NULL) {
1406 if ((dev_t)arg != KERN_PROC_TTY_NODEV)
1407 continue;
1408 } else if (p->p_session->s_ttyp->t_dev != (dev_t)arg)
1409 continue;
1410 break;
1411
1412 case KERN_PROC_UID:
1413 if (p->p_ucred->cr_uid != (uid_t)arg)
1414 continue;
1415 break;
1416
1417 case KERN_PROC_RUID:
1418 if (p->p_cred->p_ruid != (uid_t)arg)
1419 continue;
1420 break;
1421
1422 case KERN_PROC_GID:
1423 if (p->p_ucred->cr_gid != (uid_t)arg)
1424 continue;
1425 break;
1426
1427 case KERN_PROC_RGID:
1428 if (p->p_cred->p_rgid != (uid_t)arg)
1429 continue;
1430 break;
1431
1432 case KERN_PROC_ALL:
1433 /* allow everything */
1434 break;
1435
1436 default:
1437 error = EINVAL;
1438 goto cleanup;
1439 }
1440 if (type == KERN_PROC) {
1441 if (buflen >= sizeof(struct kinfo_proc)) {
1442 fill_eproc(p, &eproc);
1443 error = copyout((caddr_t)p, &dp->kp_proc,
1444 sizeof(struct proc));
1445 if (error)
1446 goto cleanup;
1447 error = copyout((caddr_t)&eproc, &dp->kp_eproc,
1448 sizeof(eproc));
1449 if (error)
1450 goto cleanup;
1451 dp++;
1452 buflen -= sizeof(struct kinfo_proc);
1453 }
1454 needed += sizeof(struct kinfo_proc);
1455 } else { /* KERN_PROC2 */
1456 if (buflen >= elem_size && elem_count > 0) {
1457 fill_kproc2(p, &kproc2);
1458 /*
1459 * Copy out elem_size, but not larger than
1460 * the size of a struct kinfo_proc2.
1461 */
1462 error = copyout(&kproc2, dp2,
1463 min(sizeof(kproc2), elem_size));
1464 if (error)
1465 goto cleanup;
1466 dp2 += elem_size;
1467 buflen -= elem_size;
1468 elem_count--;
1469 }
1470 needed += elem_size;
1471 }
1472 }
1473 pd++;
1474 if (pd->pd_list != NULL)
1475 goto again;
1476 proclist_unlock_read();
1477
1478 if (where != NULL) {
1479 if (type == KERN_PROC)
1480 *sizep = (caddr_t)dp - where;
1481 else
1482 *sizep = dp2 - where;
1483 if (needed > *sizep)
1484 return (ENOMEM);
1485 } else {
1486 needed += KERN_PROCSLOP;
1487 *sizep = needed;
1488 }
1489 return (0);
1490 cleanup:
1491 proclist_unlock_read();
1492 return (error);
1493 }
1494
1495 /*
1496 * Fill in an eproc structure for the specified process.
1497 */
1498 void
1499 fill_eproc(p, ep)
1500 struct proc *p;
1501 struct eproc *ep;
1502 {
1503 struct tty *tp;
1504 struct lwp *l;
1505
1506 ep->e_paddr = p;
1507 ep->e_sess = p->p_session;
1508 ep->e_pcred = *p->p_cred;
1509 ep->e_ucred = *p->p_ucred;
1510 if (p->p_stat == SIDL || P_ZOMBIE(p)) {
1511 ep->e_vm.vm_rssize = 0;
1512 ep->e_vm.vm_tsize = 0;
1513 ep->e_vm.vm_dsize = 0;
1514 ep->e_vm.vm_ssize = 0;
1515 /* ep->e_vm.vm_pmap = XXX; */
1516 } else {
1517 struct vmspace *vm = p->p_vmspace;
1518
1519 ep->e_vm.vm_rssize = vm_resident_count(vm);
1520 ep->e_vm.vm_tsize = vm->vm_tsize;
1521 ep->e_vm.vm_dsize = vm->vm_dsize;
1522 ep->e_vm.vm_ssize = vm->vm_ssize;
1523
1524 /* Pick a "representative" LWP */
1525 l = proc_representative_lwp(p);
1526
1527 if (l->l_wmesg)
1528 strncpy(ep->e_wmesg, l->l_wmesg, WMESGLEN);
1529 }
1530 if (p->p_pptr)
1531 ep->e_ppid = p->p_pptr->p_pid;
1532 else
1533 ep->e_ppid = 0;
1534 ep->e_pgid = p->p_pgrp->pg_id;
1535 ep->e_sid = ep->e_sess->s_sid;
1536 ep->e_jobc = p->p_pgrp->pg_jobc;
1537 if ((p->p_flag & P_CONTROLT) &&
1538 (tp = ep->e_sess->s_ttyp)) {
1539 ep->e_tdev = tp->t_dev;
1540 ep->e_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1541 ep->e_tsess = tp->t_session;
1542 } else
1543 ep->e_tdev = NODEV;
1544
1545 ep->e_xsize = ep->e_xrssize = 0;
1546 ep->e_xccount = ep->e_xswrss = 0;
1547 ep->e_flag = ep->e_sess->s_ttyvp ? EPROC_CTTY : 0;
1548 if (SESS_LEADER(p))
1549 ep->e_flag |= EPROC_SLEADER;
1550 strncpy(ep->e_login, ep->e_sess->s_login, MAXLOGNAME);
1551 }
1552
1553 /*
1554 * Fill in an eproc structure for the specified process.
1555 */
1556 static void
1557 fill_kproc2(p, ki)
1558 struct proc *p;
1559 struct kinfo_proc2 *ki;
1560 {
1561 struct tty *tp;
1562 struct lwp *l;
1563 memset(ki, 0, sizeof(*ki));
1564
1565 /* XXX NJWLWP
1566 * These are likely not what the caller was looking for.
1567 * The perils of playing with the kernel data structures...
1568 */
1569 ki->p_paddr = PTRTOINT64(p);
1570 ki->p_fd = PTRTOINT64(p->p_fd);
1571 ki->p_cwdi = PTRTOINT64(p->p_cwdi);
1572 ki->p_stats = PTRTOINT64(p->p_stats);
1573 ki->p_limit = PTRTOINT64(p->p_limit);
1574 ki->p_vmspace = PTRTOINT64(p->p_vmspace);
1575 ki->p_sigacts = PTRTOINT64(p->p_sigacts);
1576 ki->p_sess = PTRTOINT64(p->p_session);
1577 ki->p_tsess = 0; /* may be changed if controlling tty below */
1578 ki->p_ru = PTRTOINT64(p->p_ru);
1579
1580 ki->p_eflag = 0;
1581 ki->p_exitsig = p->p_exitsig;
1582 ki->p_flag = p->p_flag;
1583
1584 ki->p_pid = p->p_pid;
1585 if (p->p_pptr)
1586 ki->p_ppid = p->p_pptr->p_pid;
1587 else
1588 ki->p_ppid = 0;
1589 ki->p_sid = p->p_session->s_sid;
1590 ki->p__pgid = p->p_pgrp->pg_id;
1591
1592 ki->p_tpgid = NO_PID; /* may be changed if controlling tty below */
1593
1594 ki->p_uid = p->p_ucred->cr_uid;
1595 ki->p_ruid = p->p_cred->p_ruid;
1596 ki->p_gid = p->p_ucred->cr_gid;
1597 ki->p_rgid = p->p_cred->p_rgid;
1598
1599 memcpy(ki->p_groups, p->p_cred->pc_ucred->cr_groups,
1600 min(sizeof(ki->p_groups), sizeof(p->p_cred->pc_ucred->cr_groups)));
1601 ki->p_ngroups = p->p_cred->pc_ucred->cr_ngroups;
1602
1603 ki->p_jobc = p->p_pgrp->pg_jobc;
1604 if ((p->p_flag & P_CONTROLT) && (tp = p->p_session->s_ttyp)) {
1605 ki->p_tdev = tp->t_dev;
1606 ki->p_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1607 ki->p_tsess = PTRTOINT64(tp->t_session);
1608 } else {
1609 ki->p_tdev = NODEV;
1610 }
1611
1612 ki->p_estcpu = p->p_estcpu;
1613 ki->p_rtime_sec = p->p_rtime.tv_sec;
1614 ki->p_rtime_usec = p->p_rtime.tv_usec;
1615 ki->p_cpticks = p->p_cpticks;
1616 ki->p_pctcpu = p->p_pctcpu;
1617
1618 ki->p_uticks = p->p_uticks;
1619 ki->p_sticks = p->p_sticks;
1620 ki->p_iticks = p->p_iticks;
1621
1622 ki->p_tracep = PTRTOINT64(p->p_tracep);
1623 ki->p_traceflag = p->p_traceflag;
1624
1625
1626 memcpy(&ki->p_siglist, &p->p_sigctx.ps_siglist, sizeof(ki_sigset_t));
1627 memcpy(&ki->p_sigmask, &p->p_sigctx.ps_sigmask, sizeof(ki_sigset_t));
1628 memcpy(&ki->p_sigignore, &p->p_sigctx.ps_sigignore,sizeof(ki_sigset_t));
1629 memcpy(&ki->p_sigcatch, &p->p_sigctx.ps_sigcatch, sizeof(ki_sigset_t));
1630
1631 ki->p_stat = p->p_stat;
1632 ki->p_nice = p->p_nice;
1633
1634 ki->p_xstat = p->p_xstat;
1635 ki->p_acflag = p->p_acflag;
1636
1637 strncpy(ki->p_comm, p->p_comm,
1638 min(sizeof(ki->p_comm), sizeof(p->p_comm)));
1639
1640 strncpy(ki->p_login, p->p_session->s_login, sizeof(ki->p_login));
1641
1642 if (p->p_stat == SIDL || P_ZOMBIE(p)) {
1643 ki->p_vm_rssize = 0;
1644 ki->p_vm_tsize = 0;
1645 ki->p_vm_dsize = 0;
1646 ki->p_vm_ssize = 0;
1647 } else {
1648 struct vmspace *vm = p->p_vmspace;
1649
1650 ki->p_vm_rssize = vm_resident_count(vm);
1651 ki->p_vm_tsize = vm->vm_tsize;
1652 ki->p_vm_dsize = vm->vm_dsize;
1653 ki->p_vm_ssize = vm->vm_ssize;
1654
1655 /* Pick a "representative" LWP */
1656 l = proc_representative_lwp(p);
1657 ki->p_forw = PTRTOINT64(l->l_forw);
1658 ki->p_back = PTRTOINT64(l->l_back);
1659 ki->p_addr = PTRTOINT64(l->l_addr);
1660 ki->p_swtime = l->l_swtime;
1661 ki->p_slptime = l->l_slptime;
1662 if (l->l_stat == LSONPROC) {
1663 KDASSERT(l->l_cpu != NULL);
1664 ki->p_schedflags = l->l_cpu->ci_schedstate.spc_flags;
1665 } else
1666 ki->p_schedflags = 0;
1667 ki->p_holdcnt = l->l_holdcnt;
1668 ki->p_priority = l->l_priority;
1669 ki->p_usrpri = l->l_usrpri;
1670 if (l->l_wmesg)
1671 strncpy(ki->p_wmesg, l->l_wmesg, sizeof(ki->p_wmesg));
1672 ki->p_wchan = PTRTOINT64(l->l_wchan);
1673
1674 }
1675
1676 if (p->p_session->s_ttyvp)
1677 ki->p_eflag |= EPROC_CTTY;
1678 if (SESS_LEADER(p))
1679 ki->p_eflag |= EPROC_SLEADER;
1680
1681 /* XXX Is this double check necessary? */
1682 if (P_ZOMBIE(p)) {
1683 ki->p_uvalid = 0;
1684 } else {
1685 ki->p_uvalid = 1;
1686
1687 ki->p_ustart_sec = p->p_stats->p_start.tv_sec;
1688 ki->p_ustart_usec = p->p_stats->p_start.tv_usec;
1689
1690 ki->p_uutime_sec = p->p_stats->p_ru.ru_utime.tv_sec;
1691 ki->p_uutime_usec = p->p_stats->p_ru.ru_utime.tv_usec;
1692 ki->p_ustime_sec = p->p_stats->p_ru.ru_stime.tv_sec;
1693 ki->p_ustime_usec = p->p_stats->p_ru.ru_stime.tv_usec;
1694
1695 ki->p_uru_maxrss = p->p_stats->p_ru.ru_maxrss;
1696 ki->p_uru_ixrss = p->p_stats->p_ru.ru_ixrss;
1697 ki->p_uru_idrss = p->p_stats->p_ru.ru_idrss;
1698 ki->p_uru_isrss = p->p_stats->p_ru.ru_isrss;
1699 ki->p_uru_minflt = p->p_stats->p_ru.ru_minflt;
1700 ki->p_uru_majflt = p->p_stats->p_ru.ru_majflt;
1701 ki->p_uru_nswap = p->p_stats->p_ru.ru_nswap;
1702 ki->p_uru_inblock = p->p_stats->p_ru.ru_inblock;
1703 ki->p_uru_oublock = p->p_stats->p_ru.ru_oublock;
1704 ki->p_uru_msgsnd = p->p_stats->p_ru.ru_msgsnd;
1705 ki->p_uru_msgrcv = p->p_stats->p_ru.ru_msgrcv;
1706 ki->p_uru_nsignals = p->p_stats->p_ru.ru_nsignals;
1707 ki->p_uru_nvcsw = p->p_stats->p_ru.ru_nvcsw;
1708 ki->p_uru_nivcsw = p->p_stats->p_ru.ru_nivcsw;
1709
1710 ki->p_uctime_sec = p->p_stats->p_cru.ru_utime.tv_sec +
1711 p->p_stats->p_cru.ru_stime.tv_sec;
1712 ki->p_uctime_usec = p->p_stats->p_cru.ru_utime.tv_usec +
1713 p->p_stats->p_cru.ru_stime.tv_usec;
1714 }
1715 #ifdef MULTIPROCESSOR
1716 if (p->p_cpu != NULL)
1717 ki->p_cpuid = p->p_cpu->ci_cpuid;
1718 else
1719 #endif
1720 ki->p_cpuid = KI_NOCPU;
1721
1722 }
1723
1724
1725 /*
1726 * Pick a LWP to represent the process for those operations which
1727 * want information about a "process" that is actually associated
1728 * with a LWP.
1729 */
1730 static struct lwp *proc_representative_lwp(p)
1731 struct proc *p;
1732 {
1733 struct lwp *l = NULL;
1734
1735 /* Trivial case: only one LWP */
1736 if (p->p_nrlwps == 1)
1737 return (LIST_FIRST(&p->p_lwps));
1738
1739 switch (p->p_stat) {
1740 case SSTOP:
1741 /* Pick the first stopped LWP */
1742 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1743 if (l->l_stat == LSSTOP)
1744 return (l);
1745 }
1746 /* NOTREACHED */
1747 break;
1748 case SACTIVE:
1749 /* Pick the first live LWP */
1750 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1751 if (l->l_stat == LSRUN ||
1752 l->l_stat == LSSLEEP ||
1753 l->l_stat == LSONPROC)
1754 return (l);
1755 }
1756 break;
1757 case SDEAD:
1758 case SZOMB:
1759 /* Doesn't really matter... */
1760 l = LIST_FIRST(&p->p_lwps);
1761 break;
1762 #ifdef DIAGNOSTIC
1763 case SIDL:
1764 /* We have more than one LWP and we're in SIDL?
1765 * How'd that happen?
1766 */
1767 panic("Too many LWPs (%d) in SIDL process %d (%s)",
1768 p->p_nrlwps, p->p_pid, p->p_comm);
1769 default:
1770 panic("Process %d (%s) in unknown state %d",
1771 p->p_pid, p->p_comm, p->p_stat);
1772 #endif
1773 }
1774
1775 panic("proc_representative_lwp: couldn't find a lwp for process"
1776 " %d (%s)", p->p_pid, p->p_comm);
1777 /* NOTREACHED */
1778 return NULL;
1779 }
1780
1781
1782 int
1783 sysctl_procargs(name, namelen, where, sizep, up)
1784 int *name;
1785 u_int namelen;
1786 void *where;
1787 size_t *sizep;
1788 struct proc *up;
1789 {
1790 struct ps_strings pss;
1791 struct proc *p;
1792 size_t len, upper_bound, xlen;
1793 struct uio auio;
1794 struct iovec aiov;
1795 vaddr_t argv;
1796 pid_t pid;
1797 int nargv, type, error, i;
1798 char *arg;
1799 char *tmp;
1800
1801 if (namelen != 2)
1802 return (EINVAL);
1803 pid = name[0];
1804 type = name[1];
1805
1806 switch (type) {
1807 case KERN_PROC_ARGV:
1808 case KERN_PROC_NARGV:
1809 case KERN_PROC_ENV:
1810 case KERN_PROC_NENV:
1811 /* ok */
1812 break;
1813 default:
1814 return (EINVAL);
1815 }
1816
1817 /* check pid */
1818 if ((p = pfind(pid)) == NULL)
1819 return (EINVAL);
1820
1821 /* only root or same user change look at the environment */
1822 if (type == KERN_PROC_ENV || type == KERN_PROC_NENV) {
1823 if (up->p_ucred->cr_uid != 0) {
1824 if (up->p_cred->p_ruid != p->p_cred->p_ruid ||
1825 up->p_cred->p_ruid != p->p_cred->p_svuid)
1826 return (EPERM);
1827 }
1828 }
1829
1830 if (sizep != NULL && where == NULL) {
1831 if (type == KERN_PROC_NARGV || type == KERN_PROC_NENV)
1832 *sizep = sizeof (int);
1833 else
1834 *sizep = ARG_MAX; /* XXX XXX XXX */
1835 return (0);
1836 }
1837 if (where == NULL || sizep == NULL)
1838 return (EINVAL);
1839
1840 /*
1841 * Zombies don't have a stack, so we can't read their psstrings.
1842 * System processes also don't have a user stack.
1843 */
1844 if (P_ZOMBIE(p) || (p->p_flag & P_SYSTEM) != 0)
1845 return (EINVAL);
1846
1847 /*
1848 * Lock the process down in memory.
1849 */
1850 /* XXXCDC: how should locking work here? */
1851 if ((p->p_flag & P_WEXIT) || (p->p_vmspace->vm_refcnt < 1))
1852 return (EFAULT);
1853
1854 p->p_vmspace->vm_refcnt++; /* XXX */
1855
1856 /*
1857 * Allocate a temporary buffer to hold the arguments.
1858 */
1859 arg = malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
1860
1861 /*
1862 * Read in the ps_strings structure.
1863 */
1864 aiov.iov_base = &pss;
1865 aiov.iov_len = sizeof(pss);
1866 auio.uio_iov = &aiov;
1867 auio.uio_iovcnt = 1;
1868 auio.uio_offset = (vaddr_t)p->p_psstr;
1869 auio.uio_resid = sizeof(pss);
1870 auio.uio_segflg = UIO_SYSSPACE;
1871 auio.uio_rw = UIO_READ;
1872 auio.uio_procp = NULL;
1873 error = uvm_io(&p->p_vmspace->vm_map, &auio);
1874 if (error)
1875 goto done;
1876
1877 if (type == KERN_PROC_ARGV || type == KERN_PROC_NARGV)
1878 memcpy(&nargv, (char *)&pss + p->p_psnargv, sizeof(nargv));
1879 else
1880 memcpy(&nargv, (char *)&pss + p->p_psnenv, sizeof(nargv));
1881 if (type == KERN_PROC_NARGV || type == KERN_PROC_NENV) {
1882 error = copyout(&nargv, where, sizeof(nargv));
1883 *sizep = sizeof(nargv);
1884 goto done;
1885 }
1886 /*
1887 * Now read the address of the argument vector.
1888 */
1889 switch (type) {
1890 case KERN_PROC_ARGV:
1891 /* XXX compat32 stuff here */
1892 memcpy(&tmp, (char *)&pss + p->p_psargv, sizeof(tmp));
1893 break;
1894 case KERN_PROC_ENV:
1895 memcpy(&tmp, (char *)&pss + p->p_psenv, sizeof(tmp));
1896 break;
1897 default:
1898 return (EINVAL);
1899 }
1900 auio.uio_offset = (off_t)(long)tmp;
1901 aiov.iov_base = &argv;
1902 aiov.iov_len = sizeof(argv);
1903 auio.uio_iov = &aiov;
1904 auio.uio_iovcnt = 1;
1905 auio.uio_resid = sizeof(argv);
1906 auio.uio_segflg = UIO_SYSSPACE;
1907 auio.uio_rw = UIO_READ;
1908 auio.uio_procp = NULL;
1909 error = uvm_io(&p->p_vmspace->vm_map, &auio);
1910 if (error)
1911 goto done;
1912
1913 /*
1914 * Now copy in the actual argument vector, one page at a time,
1915 * since we don't know how long the vector is (though, we do
1916 * know how many NUL-terminated strings are in the vector).
1917 */
1918 len = 0;
1919 upper_bound = *sizep;
1920 for (; nargv != 0 && len < upper_bound; len += xlen) {
1921 aiov.iov_base = arg;
1922 aiov.iov_len = PAGE_SIZE;
1923 auio.uio_iov = &aiov;
1924 auio.uio_iovcnt = 1;
1925 auio.uio_offset = argv + len;
1926 xlen = PAGE_SIZE - ((argv + len) & PAGE_MASK);
1927 auio.uio_resid = xlen;
1928 auio.uio_segflg = UIO_SYSSPACE;
1929 auio.uio_rw = UIO_READ;
1930 auio.uio_procp = NULL;
1931 error = uvm_io(&p->p_vmspace->vm_map, &auio);
1932 if (error)
1933 goto done;
1934
1935 for (i = 0; i < xlen && nargv != 0; i++) {
1936 if (arg[i] == '\0')
1937 nargv--; /* one full string */
1938 }
1939
1940 /* make sure we don't copyout past the end of the user's buffer */
1941 if (len + i > upper_bound)
1942 i = upper_bound - len;
1943
1944 error = copyout(arg, (char *)where + len, i);
1945 if (error)
1946 break;
1947
1948 if (nargv == 0) {
1949 len += i;
1950 break;
1951 }
1952 }
1953 *sizep = len;
1954
1955 done:
1956 uvmspace_free(p->p_vmspace);
1957
1958 free(arg, M_TEMP);
1959 return (error);
1960 }
1961
1962 #if NPTY > 0
1963 int pty_maxptys __P((int, int)); /* defined in kern/tty_pty.c */
1964
1965 /*
1966 * Validate parameters and get old / set new parameters
1967 * for pty sysctl function.
1968 */
1969 static int
1970 sysctl_pty(oldp, oldlenp, newp, newlen)
1971 void *oldp;
1972 size_t *oldlenp;
1973 void *newp;
1974 size_t newlen;
1975 {
1976 int error = 0;
1977 int oldmax = 0, newmax = 0;
1978
1979 /* get current value of maxptys */
1980 oldmax = pty_maxptys(0, 0);
1981
1982 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, &oldmax, int)
1983
1984 if (!error && newp) {
1985 SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, int)
1986 SYSCTL_SCALAR_NEWPCOP_TYP(newp, &newmax, int)
1987
1988 if (newmax != pty_maxptys(newmax, (newp != NULL)))
1989 return (EINVAL);
1990
1991 }
1992
1993 return (error);
1994 }
1995 #endif /* NPTY > 0 */
1996