kern_sysctl.c revision 1.86.2.8 1 /* $NetBSD: kern_sysctl.c,v 1.86.2.8 2001/09/21 22:36:25 nathanw Exp $ */
2
3 /*-
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Mike Karels at Berkeley Software Design, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_sysctl.c 8.9 (Berkeley) 5/20/95
39 */
40
41 /*
42 * sysctl system call.
43 */
44
45 #include "opt_ddb.h"
46 #include "opt_insecure.h"
47 #include "opt_defcorename.h"
48 #include "opt_new_pipe.h"
49 #include "opt_sysv.h"
50 #include "pty.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/kernel.h>
55 #include <sys/buf.h>
56 #include <sys/device.h>
57 #include <sys/disklabel.h>
58 #include <sys/dkstat.h>
59 #include <sys/exec.h>
60 #include <sys/file.h>
61 #include <sys/ioctl.h>
62 #include <sys/malloc.h>
63 #include <sys/mount.h>
64 #include <sys/msgbuf.h>
65 #include <sys/pool.h>
66 #include <sys/lwp.h>
67 #include <sys/proc.h>
68 #include <sys/resource.h>
69 #include <sys/resourcevar.h>
70 #include <sys/syscallargs.h>
71 #include <sys/tty.h>
72 #include <sys/unistd.h>
73 #include <sys/vnode.h>
74 #include <sys/socketvar.h>
75 #define __SYSCTL_PRIVATE
76 #include <sys/sysctl.h>
77 #include <sys/lock.h>
78
79 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
80 #include <sys/ipc.h>
81 #endif
82 #ifdef SYSVMSG
83 #include <sys/msg.h>
84 #endif
85 #ifdef SYSVSEM
86 #include <sys/sem.h>
87 #endif
88 #ifdef SYSVSHM
89 #include <sys/shm.h>
90 #endif
91
92 #include <dev/cons.h>
93
94 #if defined(DDB)
95 #include <ddb/ddbvar.h>
96 #endif
97
98 #ifdef NEW_PIPE
99 #include <sys/pipe.h>
100 #endif
101
102 #define PTRTOINT64(foo) ((u_int64_t)(uintptr_t)(foo))
103
104 static int sysctl_file(void *, size_t *);
105 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
106 static int sysctl_sysvipc(int *, u_int, void *, size_t *);
107 #endif
108 static int sysctl_msgbuf(void *, size_t *);
109 static int sysctl_doeproc(int *, u_int, void *, size_t *);
110 #ifdef MULTIPROCESSOR
111 static int sysctl_docptime(void *, size_t *, void *);
112 static int sysctl_ncpus(void);
113 #endif
114 static void fill_kproc2(struct proc *, struct kinfo_proc2 *);
115 static int sysctl_procargs(int *, u_int, void *, size_t *, struct proc *);
116 #if NPTY > 0
117 static int sysctl_pty(void *, size_t *, void *, size_t);
118 #endif
119
120 static struct lwp *proc_representative_lwp(struct proc *);
121
122 /*
123 * The `sysctl_memlock' is intended to keep too many processes from
124 * locking down memory by doing sysctls at once. Whether or not this
125 * is really a good idea to worry about it probably a subject of some
126 * debate.
127 */
128 struct lock sysctl_memlock;
129
130 void
131 sysctl_init(void)
132 {
133
134 lockinit(&sysctl_memlock, PRIBIO|PCATCH, "sysctl", 0, 0);
135 }
136
137 int
138 sys___sysctl(struct lwp *l, void *v, register_t *retval)
139 {
140 struct sys___sysctl_args /* {
141 syscallarg(int *) name;
142 syscallarg(u_int) namelen;
143 syscallarg(void *) old;
144 syscallarg(size_t *) oldlenp;
145 syscallarg(void *) new;
146 syscallarg(size_t) newlen;
147 } */ *uap = v;
148 struct proc *p = l->l_proc;
149 int error;
150 size_t savelen = 0, oldlen = 0;
151 sysctlfn *fn;
152 int name[CTL_MAXNAME];
153 size_t *oldlenp;
154
155 /*
156 * all top-level sysctl names are non-terminal
157 */
158 if (SCARG(uap, namelen) > CTL_MAXNAME || SCARG(uap, namelen) < 2)
159 return (EINVAL);
160 error = copyin(SCARG(uap, name), &name,
161 SCARG(uap, namelen) * sizeof(int));
162 if (error)
163 return (error);
164
165 /*
166 * For all but CTL_PROC, must be root to change a value.
167 * For CTL_PROC, must be root, or owner of the proc (and not suid),
168 * this is checked in proc_sysctl() (once we know the targer proc).
169 */
170 if (SCARG(uap, new) != NULL && name[0] != CTL_PROC &&
171 (error = suser(p->p_ucred, &p->p_acflag)))
172 return error;
173
174 switch (name[0]) {
175 case CTL_KERN:
176 fn = kern_sysctl;
177 break;
178 case CTL_HW:
179 fn = hw_sysctl;
180 break;
181 case CTL_VM:
182 fn = uvm_sysctl;
183 break;
184 case CTL_NET:
185 fn = net_sysctl;
186 break;
187 case CTL_VFS:
188 fn = vfs_sysctl;
189 break;
190 case CTL_MACHDEP:
191 fn = cpu_sysctl;
192 break;
193 #ifdef DEBUG
194 case CTL_DEBUG:
195 fn = debug_sysctl;
196 break;
197 #endif
198 #ifdef DDB
199 case CTL_DDB:
200 fn = ddb_sysctl;
201 break;
202 #endif
203 case CTL_PROC:
204 fn = proc_sysctl;
205 break;
206 default:
207 return (EOPNOTSUPP);
208 }
209
210 /*
211 * XXX Hey, we wire `old', but what about `new'?
212 */
213
214 oldlenp = SCARG(uap, oldlenp);
215 if (oldlenp) {
216 if ((error = copyin(oldlenp, &oldlen, sizeof(oldlen))))
217 return (error);
218 oldlenp = &oldlen;
219 }
220 if (SCARG(uap, old) != NULL) {
221 error = lockmgr(&sysctl_memlock, LK_EXCLUSIVE, NULL);
222 if (error)
223 return (error);
224 error = uvm_vslock(p, SCARG(uap, old), oldlen,
225 VM_PROT_READ|VM_PROT_WRITE);
226 if (error) {
227 (void) lockmgr(&sysctl_memlock, LK_RELEASE, NULL);
228 return error;
229 }
230 savelen = oldlen;
231 }
232 error = (*fn)(name + 1, SCARG(uap, namelen) - 1, SCARG(uap, old),
233 oldlenp, SCARG(uap, new), SCARG(uap, newlen), p);
234 if (SCARG(uap, old) != NULL) {
235 uvm_vsunlock(p, SCARG(uap, old), savelen);
236 (void) lockmgr(&sysctl_memlock, LK_RELEASE, NULL);
237 }
238 if (error)
239 return (error);
240 if (SCARG(uap, oldlenp))
241 error = copyout(&oldlen, SCARG(uap, oldlenp), sizeof(oldlen));
242 return (error);
243 }
244
245 /*
246 * Attributes stored in the kernel.
247 */
248 char hostname[MAXHOSTNAMELEN];
249 int hostnamelen;
250
251 char domainname[MAXHOSTNAMELEN];
252 int domainnamelen;
253
254 long hostid;
255
256 #ifdef INSECURE
257 int securelevel = -1;
258 #else
259 int securelevel = 0;
260 #endif
261
262 #ifndef DEFCORENAME
263 #define DEFCORENAME "%n.core"
264 #endif
265 char defcorename[MAXPATHLEN] = DEFCORENAME;
266 int defcorenamelen = sizeof(DEFCORENAME);
267
268 extern int kern_logsigexit;
269 extern fixpt_t ccpu;
270
271 #ifndef MULTIPROCESSOR
272 #define sysctl_ncpus() 1
273 #endif
274
275 #ifdef MULTIPROCESSOR
276
277 #ifndef CPU_INFO_FOREACH
278 #define CPU_INFO_ITERATOR int
279 #define CPU_INFO_FOREACH(cii, ci) cii = 0, ci = curcpu(); ci != NULL; ci = NULL
280 #endif
281
282 static int
283 sysctl_docptime(void *oldp, size_t *oldlenp, void *newp)
284 {
285 u_int64_t cp_time[CPUSTATES];
286 int i;
287 struct cpu_info *ci;
288 CPU_INFO_ITERATOR cii;
289
290 for (i=0; i<CPUSTATES; i++)
291 cp_time[i] = 0;
292
293 for (CPU_INFO_FOREACH(cii, ci)) {
294 for (i=0; i<CPUSTATES; i++)
295 cp_time[i] += ci->ci_schedstate.spc_cp_time[i];
296 }
297 return (sysctl_rdstruct(oldp, oldlenp, newp,
298 cp_time, sizeof(cp_time)));
299 }
300
301 static int
302 sysctl_ncpus(void)
303 {
304 struct cpu_info *ci;
305 CPU_INFO_ITERATOR cii;
306
307 int ncpus = 0;
308 for (CPU_INFO_FOREACH(cii, ci))
309 ncpus++;
310 return ncpus;
311 }
312
313 #endif
314
315 /*
316 * kernel related system variables.
317 */
318 int
319 kern_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
320 void *newp, size_t newlen, struct proc *p)
321 {
322 int error, level, inthostid;
323 int old_autonicetime;
324 int old_vnodes;
325 dev_t consdev;
326
327 /* All sysctl names at this level, except for a few, are terminal. */
328 switch (name[0]) {
329 case KERN_PROC:
330 case KERN_PROC2:
331 case KERN_PROF:
332 case KERN_MBUF:
333 case KERN_PROC_ARGS:
334 case KERN_SYSVIPC_INFO:
335 case KERN_PIPE:
336 /* Not terminal. */
337 break;
338 default:
339 if (namelen != 1)
340 return (ENOTDIR); /* overloaded */
341 }
342
343 switch (name[0]) {
344 case KERN_OSTYPE:
345 return (sysctl_rdstring(oldp, oldlenp, newp, ostype));
346 case KERN_OSRELEASE:
347 return (sysctl_rdstring(oldp, oldlenp, newp, osrelease));
348 case KERN_OSREV:
349 return (sysctl_rdint(oldp, oldlenp, newp, __NetBSD_Version__));
350 case KERN_VERSION:
351 return (sysctl_rdstring(oldp, oldlenp, newp, version));
352 case KERN_MAXVNODES:
353 old_vnodes = desiredvnodes;
354 error = sysctl_int(oldp, oldlenp, newp, newlen, &desiredvnodes);
355 if (old_vnodes > desiredvnodes) {
356 desiredvnodes = old_vnodes;
357 return (EINVAL);
358 }
359 if (error == 0) {
360 vfs_reinit();
361 }
362 return (error);
363 case KERN_MAXPROC:
364 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxproc));
365 case KERN_MAXFILES:
366 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxfiles));
367 case KERN_ARGMAX:
368 return (sysctl_rdint(oldp, oldlenp, newp, ARG_MAX));
369 case KERN_SECURELVL:
370 level = securelevel;
371 if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &level)) ||
372 newp == NULL)
373 return (error);
374 if (level < securelevel && p->p_pid != 1)
375 return (EPERM);
376 securelevel = level;
377 return (0);
378 case KERN_HOSTNAME:
379 error = sysctl_string(oldp, oldlenp, newp, newlen,
380 hostname, sizeof(hostname));
381 if (newp && !error)
382 hostnamelen = newlen;
383 return (error);
384 case KERN_DOMAINNAME:
385 error = sysctl_string(oldp, oldlenp, newp, newlen,
386 domainname, sizeof(domainname));
387 if (newp && !error)
388 domainnamelen = newlen;
389 return (error);
390 case KERN_HOSTID:
391 inthostid = hostid; /* XXX assumes sizeof long <= sizeof int */
392 error = sysctl_int(oldp, oldlenp, newp, newlen, &inthostid);
393 hostid = inthostid;
394 return (error);
395 case KERN_CLOCKRATE:
396 return (sysctl_clockrate(oldp, oldlenp));
397 case KERN_BOOTTIME:
398 return (sysctl_rdstruct(oldp, oldlenp, newp, &boottime,
399 sizeof(struct timeval)));
400 case KERN_VNODE:
401 return (sysctl_vnode(oldp, oldlenp, p));
402 case KERN_PROC:
403 case KERN_PROC2:
404 return (sysctl_doeproc(name, namelen, oldp, oldlenp));
405 case KERN_PROC_ARGS:
406 return (sysctl_procargs(name + 1, namelen - 1,
407 oldp, oldlenp, p));
408 case KERN_FILE:
409 return (sysctl_file(oldp, oldlenp));
410 #ifdef GPROF
411 case KERN_PROF:
412 return (sysctl_doprof(name + 1, namelen - 1, oldp, oldlenp,
413 newp, newlen));
414 #endif
415 case KERN_POSIX1:
416 return (sysctl_rdint(oldp, oldlenp, newp, _POSIX_VERSION));
417 case KERN_NGROUPS:
418 return (sysctl_rdint(oldp, oldlenp, newp, NGROUPS_MAX));
419 case KERN_JOB_CONTROL:
420 return (sysctl_rdint(oldp, oldlenp, newp, 1));
421 case KERN_SAVED_IDS:
422 #ifdef _POSIX_SAVED_IDS
423 return (sysctl_rdint(oldp, oldlenp, newp, 1));
424 #else
425 return (sysctl_rdint(oldp, oldlenp, newp, 0));
426 #endif
427 case KERN_MAXPARTITIONS:
428 return (sysctl_rdint(oldp, oldlenp, newp, MAXPARTITIONS));
429 case KERN_RAWPARTITION:
430 return (sysctl_rdint(oldp, oldlenp, newp, RAW_PART));
431 #ifdef NTP
432 case KERN_NTPTIME:
433 return (sysctl_ntptime(oldp, oldlenp));
434 #endif
435 case KERN_AUTONICETIME:
436 old_autonicetime = autonicetime;
437 error = sysctl_int(oldp, oldlenp, newp, newlen, &autonicetime);
438 if (autonicetime < 0)
439 autonicetime = old_autonicetime;
440 return (error);
441 case KERN_AUTONICEVAL:
442 error = sysctl_int(oldp, oldlenp, newp, newlen, &autoniceval);
443 if (autoniceval < PRIO_MIN)
444 autoniceval = PRIO_MIN;
445 if (autoniceval > PRIO_MAX)
446 autoniceval = PRIO_MAX;
447 return (error);
448 case KERN_RTC_OFFSET:
449 return (sysctl_rdint(oldp, oldlenp, newp, rtc_offset));
450 case KERN_ROOT_DEVICE:
451 return (sysctl_rdstring(oldp, oldlenp, newp,
452 root_device->dv_xname));
453 case KERN_MSGBUFSIZE:
454 /*
455 * deal with cases where the message buffer has
456 * become corrupted.
457 */
458 if (!msgbufenabled || msgbufp->msg_magic != MSG_MAGIC) {
459 msgbufenabled = 0;
460 return (ENXIO);
461 }
462 return (sysctl_rdint(oldp, oldlenp, newp, msgbufp->msg_bufs));
463 case KERN_FSYNC:
464 return (sysctl_rdint(oldp, oldlenp, newp, 1));
465 case KERN_SYSVMSG:
466 #ifdef SYSVMSG
467 return (sysctl_rdint(oldp, oldlenp, newp, 1));
468 #else
469 return (sysctl_rdint(oldp, oldlenp, newp, 0));
470 #endif
471 case KERN_SYSVSEM:
472 #ifdef SYSVSEM
473 return (sysctl_rdint(oldp, oldlenp, newp, 1));
474 #else
475 return (sysctl_rdint(oldp, oldlenp, newp, 0));
476 #endif
477 case KERN_SYSVSHM:
478 #ifdef SYSVSHM
479 return (sysctl_rdint(oldp, oldlenp, newp, 1));
480 #else
481 return (sysctl_rdint(oldp, oldlenp, newp, 0));
482 #endif
483 case KERN_DEFCORENAME:
484 if (newp && newlen < 1)
485 return (EINVAL);
486 error = sysctl_string(oldp, oldlenp, newp, newlen,
487 defcorename, sizeof(defcorename));
488 if (newp && !error)
489 defcorenamelen = newlen;
490 return (error);
491 case KERN_SYNCHRONIZED_IO:
492 return (sysctl_rdint(oldp, oldlenp, newp, 1));
493 case KERN_IOV_MAX:
494 return (sysctl_rdint(oldp, oldlenp, newp, IOV_MAX));
495 case KERN_MBUF:
496 return (sysctl_dombuf(name + 1, namelen - 1, oldp, oldlenp,
497 newp, newlen));
498 case KERN_MAPPED_FILES:
499 return (sysctl_rdint(oldp, oldlenp, newp, 1));
500 case KERN_MEMLOCK:
501 return (sysctl_rdint(oldp, oldlenp, newp, 1));
502 case KERN_MEMLOCK_RANGE:
503 return (sysctl_rdint(oldp, oldlenp, newp, 1));
504 case KERN_MEMORY_PROTECTION:
505 return (sysctl_rdint(oldp, oldlenp, newp, 1));
506 case KERN_LOGIN_NAME_MAX:
507 return (sysctl_rdint(oldp, oldlenp, newp, LOGIN_NAME_MAX));
508 case KERN_LOGSIGEXIT:
509 return (sysctl_int(oldp, oldlenp, newp, newlen,
510 &kern_logsigexit));
511 case KERN_FSCALE:
512 return (sysctl_rdint(oldp, oldlenp, newp, FSCALE));
513 case KERN_CCPU:
514 return (sysctl_rdint(oldp, oldlenp, newp, ccpu));
515 case KERN_CP_TIME:
516 #ifndef MULTIPROCESSOR
517 return (sysctl_rdstruct(oldp, oldlenp, newp,
518 curcpu()->ci_schedstate.spc_cp_time,
519 sizeof(curcpu()->ci_schedstate.spc_cp_time)));
520 #else
521 return (sysctl_docptime(oldp, oldlenp, newp));
522 #endif
523 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
524 case KERN_SYSVIPC_INFO:
525 return (sysctl_sysvipc(name + 1, namelen - 1, oldp, oldlenp));
526 #endif
527 case KERN_MSGBUF:
528 return (sysctl_msgbuf(oldp, oldlenp));
529 case KERN_CONSDEV:
530 if (cn_tab != NULL)
531 consdev = cn_tab->cn_dev;
532 else
533 consdev = NODEV;
534 return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
535 sizeof consdev));
536 #if NPTY > 0
537 case KERN_MAXPTYS:
538 return sysctl_pty(oldp, oldlenp, newp, newlen);
539 #endif
540 #ifdef NEW_PIPE
541 case KERN_PIPE:
542 return (sysctl_dopipe(name + 1, namelen - 1, oldp, oldlenp,
543 newp, newlen));
544 #endif
545 case KERN_MAXPHYS:
546 return (sysctl_rdint(oldp, oldlenp, newp, MAXPHYS));
547 case KERN_SBMAX:
548 {
549 int new_sbmax = sb_max;
550
551 error = sysctl_int(oldp, oldlenp, newp, newlen, &new_sbmax);
552 if (error == 0) {
553 if (new_sbmax < (16 * 1024)) /* sanity */
554 return (EINVAL);
555 sb_max = new_sbmax;
556 }
557 return (error);
558 }
559 default:
560 return (EOPNOTSUPP);
561 }
562 /* NOTREACHED */
563 }
564
565 /*
566 * hardware related system variables.
567 */
568 int
569 hw_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
570 void *newp, size_t newlen, struct proc *p)
571 {
572
573 /* all sysctl names at this level are terminal */
574 if (namelen != 1)
575 return (ENOTDIR); /* overloaded */
576
577 switch (name[0]) {
578 case HW_MACHINE:
579 return (sysctl_rdstring(oldp, oldlenp, newp, machine));
580 case HW_MACHINE_ARCH:
581 return (sysctl_rdstring(oldp, oldlenp, newp, machine_arch));
582 case HW_MODEL:
583 return (sysctl_rdstring(oldp, oldlenp, newp, cpu_model));
584 case HW_NCPU:
585 return (sysctl_rdint(oldp, oldlenp, newp, sysctl_ncpus()));
586 case HW_BYTEORDER:
587 return (sysctl_rdint(oldp, oldlenp, newp, BYTE_ORDER));
588 case HW_PHYSMEM:
589 return (sysctl_rdint(oldp, oldlenp, newp, ctob(physmem)));
590 case HW_USERMEM:
591 return (sysctl_rdint(oldp, oldlenp, newp,
592 ctob(physmem - uvmexp.wired)));
593 case HW_PAGESIZE:
594 return (sysctl_rdint(oldp, oldlenp, newp, PAGE_SIZE));
595 case HW_ALIGNBYTES:
596 return (sysctl_rdint(oldp, oldlenp, newp, ALIGNBYTES));
597 case HW_CNMAGIC: {
598 char magic[CNS_LEN];
599 int error;
600
601 if (oldp)
602 cn_get_magic(magic, CNS_LEN);
603 error = sysctl_string(oldp, oldlenp, newp, newlen,
604 magic, sizeof(magic));
605 if (newp && !error) {
606 error = cn_set_magic(magic);
607 }
608 return (error);
609 }
610 default:
611 return (EOPNOTSUPP);
612 }
613 /* NOTREACHED */
614 }
615
616 #ifdef DEBUG
617 /*
618 * Debugging related system variables.
619 */
620 struct ctldebug debug0, debug1, debug2, debug3, debug4;
621 struct ctldebug debug5, debug6, debug7, debug8, debug9;
622 struct ctldebug debug10, debug11, debug12, debug13, debug14;
623 struct ctldebug debug15, debug16, debug17, debug18, debug19;
624 static struct ctldebug *debugvars[CTL_DEBUG_MAXID] = {
625 &debug0, &debug1, &debug2, &debug3, &debug4,
626 &debug5, &debug6, &debug7, &debug8, &debug9,
627 &debug10, &debug11, &debug12, &debug13, &debug14,
628 &debug15, &debug16, &debug17, &debug18, &debug19,
629 };
630
631 int
632 debug_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
633 void *newp, size_t newlen, struct proc *p)
634 {
635 struct ctldebug *cdp;
636
637 /* all sysctl names at this level are name and field */
638 if (namelen != 2)
639 return (ENOTDIR); /* overloaded */
640 cdp = debugvars[name[0]];
641 if (name[0] >= CTL_DEBUG_MAXID || cdp->debugname == 0)
642 return (EOPNOTSUPP);
643 switch (name[1]) {
644 case CTL_DEBUG_NAME:
645 return (sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname));
646 case CTL_DEBUG_VALUE:
647 return (sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar));
648 default:
649 return (EOPNOTSUPP);
650 }
651 /* NOTREACHED */
652 }
653 #endif /* DEBUG */
654
655 int
656 proc_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
657 void *newp, size_t newlen, struct proc *p)
658 {
659 struct proc *ptmp = NULL;
660 const struct proclist_desc *pd;
661 int error = 0;
662 struct rlimit alim;
663 struct plimit *newplim;
664 char *tmps = NULL;
665 int i, curlen, len;
666
667 if (namelen < 2)
668 return EINVAL;
669
670 if (name[0] == PROC_CURPROC) {
671 ptmp = p;
672 } else {
673 proclist_lock_read();
674 for (pd = proclists; pd->pd_list != NULL; pd++) {
675 for (ptmp = LIST_FIRST(pd->pd_list); ptmp != NULL;
676 ptmp = LIST_NEXT(ptmp, p_list)) {
677 /* Skip embryonic processes. */
678 if (ptmp->p_stat == SIDL)
679 continue;
680 if (ptmp->p_pid == (pid_t)name[0])
681 break;
682 }
683 if (ptmp != NULL)
684 break;
685 }
686 proclist_unlock_read();
687 if (ptmp == NULL)
688 return(ESRCH);
689 if (p->p_ucred->cr_uid != 0) {
690 if(p->p_cred->p_ruid != ptmp->p_cred->p_ruid ||
691 p->p_cred->p_ruid != ptmp->p_cred->p_svuid)
692 return EPERM;
693 if (ptmp->p_cred->p_rgid != ptmp->p_cred->p_svgid)
694 return EPERM; /* sgid proc */
695 for (i = 0; i < p->p_ucred->cr_ngroups; i++) {
696 if (p->p_ucred->cr_groups[i] ==
697 ptmp->p_cred->p_rgid)
698 break;
699 }
700 if (i == p->p_ucred->cr_ngroups)
701 return EPERM;
702 }
703 }
704 if (name[1] == PROC_PID_CORENAME) {
705 if (namelen != 2)
706 return EINVAL;
707 /*
708 * Can't use sysctl_string() here because we may malloc a new
709 * area during the process, so we have to do it by hand.
710 */
711 curlen = strlen(ptmp->p_limit->pl_corename) + 1;
712 if (oldlenp && *oldlenp < curlen) {
713 if (!oldp)
714 *oldlenp = curlen;
715 return (ENOMEM);
716 }
717 if (newp) {
718 if (securelevel > 2)
719 return EPERM;
720 if (newlen > MAXPATHLEN)
721 return ENAMETOOLONG;
722 tmps = malloc(newlen + 1, M_TEMP, M_WAITOK);
723 if (tmps == NULL)
724 return ENOMEM;
725 error = copyin(newp, tmps, newlen + 1);
726 tmps[newlen] = '\0';
727 if (error)
728 goto cleanup;
729 /* Enforce to be either 'core' for end with '.core' */
730 if (newlen < 4) { /* c.o.r.e */
731 error = EINVAL;
732 goto cleanup;
733 }
734 len = newlen - 4;
735 if (len > 0) {
736 if (tmps[len - 1] != '.' &&
737 tmps[len - 1] != '/') {
738 error = EINVAL;
739 goto cleanup;
740 }
741 }
742 if (strcmp(&tmps[len], "core") != 0) {
743 error = EINVAL;
744 goto cleanup;
745 }
746 }
747 if (oldp && oldlenp) {
748 *oldlenp = curlen;
749 error = copyout(ptmp->p_limit->pl_corename, oldp,
750 curlen);
751 }
752 if (newp && error == 0) {
753 /* if the 2 strings are identical, don't limcopy() */
754 if (strcmp(tmps, ptmp->p_limit->pl_corename) == 0) {
755 error = 0;
756 goto cleanup;
757 }
758 if (ptmp->p_limit->p_refcnt > 1 &&
759 (ptmp->p_limit->p_lflags & PL_SHAREMOD) == 0) {
760 newplim = limcopy(ptmp->p_limit);
761 limfree(ptmp->p_limit);
762 ptmp->p_limit = newplim;
763 } else if (ptmp->p_limit->pl_corename != defcorename) {
764 free(ptmp->p_limit->pl_corename, M_TEMP);
765 }
766 ptmp->p_limit->pl_corename = tmps;
767 return (0);
768 }
769 cleanup:
770 if (tmps)
771 free(tmps, M_TEMP);
772 return (error);
773 }
774 if (name[1] == PROC_PID_LIMIT) {
775 if (namelen != 4 || name[2] >= PROC_PID_LIMIT_MAXID)
776 return EINVAL;
777 memcpy(&alim, &ptmp->p_rlimit[name[2] - 1], sizeof(alim));
778 if (name[3] == PROC_PID_LIMIT_TYPE_HARD)
779 error = sysctl_quad(oldp, oldlenp, newp, newlen,
780 &alim.rlim_max);
781 else if (name[3] == PROC_PID_LIMIT_TYPE_SOFT)
782 error = sysctl_quad(oldp, oldlenp, newp, newlen,
783 &alim.rlim_cur);
784 else
785 error = EINVAL;
786
787 if (error)
788 return error;
789
790 if (newp)
791 error = dosetrlimit(ptmp, p->p_cred,
792 name[2] - 1, &alim);
793 return error;
794 }
795 return (EINVAL);
796 }
797
798 /*
799 * Convenience macros.
800 */
801
802 #define SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, valp, len) \
803 if (oldlenp) { \
804 if (!oldp) \
805 *oldlenp = len; \
806 else { \
807 if (*oldlenp < len) \
808 return(ENOMEM); \
809 *oldlenp = len; \
810 error = copyout((caddr_t)valp, oldp, len); \
811 } \
812 }
813
814 #define SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, valp, typ) \
815 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, valp, sizeof(typ))
816
817 #define SYSCTL_SCALAR_NEWPCHECK_LEN(newp, newlen, len) \
818 if (newp && newlen != len) \
819 return (EINVAL);
820
821 #define SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, typ) \
822 SYSCTL_SCALAR_NEWPCHECK_LEN(newp, newlen, sizeof(typ))
823
824 #define SYSCTL_SCALAR_NEWPCOP_LEN(newp, valp, len) \
825 if (error == 0 && newp) \
826 error = copyin(newp, valp, len);
827
828 #define SYSCTL_SCALAR_NEWPCOP_TYP(newp, valp, typ) \
829 SYSCTL_SCALAR_NEWPCOP_LEN(newp, valp, sizeof(typ))
830
831 #define SYSCTL_STRING_CORE(oldp, oldlenp, str) \
832 if (oldlenp) { \
833 len = strlen(str) + 1; \
834 if (!oldp) \
835 *oldlenp = len; \
836 else { \
837 if (*oldlenp < len) { \
838 err2 = ENOMEM; \
839 len = *oldlenp; \
840 } else \
841 *oldlenp = len; \
842 error = copyout(str, oldp, len);\
843 if (error == 0) \
844 error = err2; \
845 } \
846 }
847
848 /*
849 * Validate parameters and get old / set new parameters
850 * for an integer-valued sysctl function.
851 */
852 int
853 sysctl_int(void *oldp, size_t *oldlenp, void *newp, size_t newlen, int *valp)
854 {
855 int error = 0;
856
857 SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, int)
858 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, valp, int)
859 SYSCTL_SCALAR_NEWPCOP_TYP(newp, valp, int)
860
861 return (error);
862 }
863
864
865 /*
866 * As above, but read-only.
867 */
868 int
869 sysctl_rdint(void *oldp, size_t *oldlenp, void *newp, int val)
870 {
871 int error = 0;
872
873 if (newp)
874 return (EPERM);
875
876 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, &val, int)
877
878 return (error);
879 }
880
881 /*
882 * Validate parameters and get old / set new parameters
883 * for an quad-valued sysctl function.
884 */
885 int
886 sysctl_quad(void *oldp, size_t *oldlenp, void *newp, size_t newlen,
887 quad_t *valp)
888 {
889 int error = 0;
890
891 SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, quad_t)
892 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, valp, quad_t)
893 SYSCTL_SCALAR_NEWPCOP_TYP(newp, valp, quad_t)
894
895 return (error);
896 }
897
898 /*
899 * As above, but read-only.
900 */
901 int
902 sysctl_rdquad(void *oldp, size_t *oldlenp, void *newp, quad_t val)
903 {
904 int error = 0;
905
906 if (newp)
907 return (EPERM);
908
909 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, &val, quad_t)
910
911 return (error);
912 }
913
914 /*
915 * Validate parameters and get old / set new parameters
916 * for a string-valued sysctl function.
917 */
918 int
919 sysctl_string(void *oldp, size_t *oldlenp, void *newp, size_t newlen, char *str,
920 int maxlen)
921 {
922 int len, error = 0, err2 = 0;
923
924 if (newp && newlen >= maxlen)
925 return (EINVAL);
926
927 SYSCTL_STRING_CORE(oldp, oldlenp, str);
928
929 if (error == 0 && newp) {
930 error = copyin(newp, str, newlen);
931 str[newlen] = 0;
932 }
933 return (error);
934 }
935
936 /*
937 * As above, but read-only.
938 */
939 int
940 sysctl_rdstring(void *oldp, size_t *oldlenp, void *newp, const char *str)
941 {
942 int len, error = 0, err2 = 0;
943
944 if (newp)
945 return (EPERM);
946
947 SYSCTL_STRING_CORE(oldp, oldlenp, str);
948
949 return (error);
950 }
951
952 /*
953 * Validate parameters and get old / set new parameters
954 * for a structure oriented sysctl function.
955 */
956 int
957 sysctl_struct(void *oldp, size_t *oldlenp, void *newp, size_t newlen, void *sp,
958 int len)
959 {
960 int error = 0;
961
962 SYSCTL_SCALAR_NEWPCHECK_LEN(newp, newlen, len)
963 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, sp, len)
964 SYSCTL_SCALAR_NEWPCOP_LEN(newp, sp, len)
965
966 return (error);
967 }
968
969 /*
970 * Validate parameters and get old parameters
971 * for a structure oriented sysctl function.
972 */
973 int
974 sysctl_rdstruct(void *oldp, size_t *oldlenp, void *newp, const void *sp,
975 int len)
976 {
977 int error = 0;
978
979 if (newp)
980 return (EPERM);
981
982 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, sp, len)
983
984 return (error);
985 }
986
987 /*
988 * As above, but can return a truncated result.
989 */
990 int
991 sysctl_rdminstruct(void *oldp, size_t *oldlenp, void *newp, const void *sp,
992 int len)
993 {
994 int error = 0;
995
996 if (newp)
997 return (EPERM);
998
999 len = min(*oldlenp, len);
1000 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, sp, len)
1001
1002 return (error);
1003 }
1004
1005 /*
1006 * Get file structures.
1007 */
1008 static int
1009 sysctl_file(void *vwhere, size_t *sizep)
1010 {
1011 int buflen, error;
1012 struct file *fp;
1013 char *start, *where;
1014
1015 start = where = vwhere;
1016 buflen = *sizep;
1017 if (where == NULL) {
1018 /*
1019 * overestimate by 10 files
1020 */
1021 *sizep = sizeof(filehead) + (nfiles + 10) * sizeof(struct file);
1022 return (0);
1023 }
1024
1025 /*
1026 * first copyout filehead
1027 */
1028 if (buflen < sizeof(filehead)) {
1029 *sizep = 0;
1030 return (0);
1031 }
1032 error = copyout((caddr_t)&filehead, where, sizeof(filehead));
1033 if (error)
1034 return (error);
1035 buflen -= sizeof(filehead);
1036 where += sizeof(filehead);
1037
1038 /*
1039 * followed by an array of file structures
1040 */
1041 for (fp = filehead.lh_first; fp != 0; fp = fp->f_list.le_next) {
1042 if (buflen < sizeof(struct file)) {
1043 *sizep = where - start;
1044 return (ENOMEM);
1045 }
1046 error = copyout((caddr_t)fp, where, sizeof(struct file));
1047 if (error)
1048 return (error);
1049 buflen -= sizeof(struct file);
1050 where += sizeof(struct file);
1051 }
1052 *sizep = where - start;
1053 return (0);
1054 }
1055
1056 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
1057 #define FILL_PERM(src, dst) do { \
1058 (dst)._key = (src)._key; \
1059 (dst).uid = (src).uid; \
1060 (dst).gid = (src).gid; \
1061 (dst).cuid = (src).cuid; \
1062 (dst).cgid = (src).cgid; \
1063 (dst).mode = (src).mode; \
1064 (dst)._seq = (src)._seq; \
1065 } while (0);
1066 #define FILL_MSG(src, dst) do { \
1067 FILL_PERM((src).msg_perm, (dst).msg_perm); \
1068 (dst).msg_qnum = (src).msg_qnum; \
1069 (dst).msg_qbytes = (src).msg_qbytes; \
1070 (dst)._msg_cbytes = (src)._msg_cbytes; \
1071 (dst).msg_lspid = (src).msg_lspid; \
1072 (dst).msg_lrpid = (src).msg_lrpid; \
1073 (dst).msg_stime = (src).msg_stime; \
1074 (dst).msg_rtime = (src).msg_rtime; \
1075 (dst).msg_ctime = (src).msg_ctime; \
1076 } while (0)
1077 #define FILL_SEM(src, dst) do { \
1078 FILL_PERM((src).sem_perm, (dst).sem_perm); \
1079 (dst).sem_nsems = (src).sem_nsems; \
1080 (dst).sem_otime = (src).sem_otime; \
1081 (dst).sem_ctime = (src).sem_ctime; \
1082 } while (0)
1083 #define FILL_SHM(src, dst) do { \
1084 FILL_PERM((src).shm_perm, (dst).shm_perm); \
1085 (dst).shm_segsz = (src).shm_segsz; \
1086 (dst).shm_lpid = (src).shm_lpid; \
1087 (dst).shm_cpid = (src).shm_cpid; \
1088 (dst).shm_atime = (src).shm_atime; \
1089 (dst).shm_dtime = (src).shm_dtime; \
1090 (dst).shm_ctime = (src).shm_ctime; \
1091 (dst).shm_nattch = (src).shm_nattch; \
1092 } while (0)
1093
1094 static int
1095 sysctl_sysvipc(int *name, u_int namelen, void *where, size_t *sizep)
1096 {
1097 #ifdef SYSVMSG
1098 struct msg_sysctl_info *msgsi;
1099 #endif
1100 #ifdef SYSVSEM
1101 struct sem_sysctl_info *semsi;
1102 #endif
1103 #ifdef SYSVSHM
1104 struct shm_sysctl_info *shmsi;
1105 #endif
1106 size_t infosize, dssize, tsize, buflen;
1107 void *buf = NULL, *buf2;
1108 char *start;
1109 int32_t nds;
1110 int i, error, ret;
1111
1112 if (namelen != 1)
1113 return (EINVAL);
1114
1115 start = where;
1116 buflen = *sizep;
1117
1118 switch (*name) {
1119 case KERN_SYSVIPC_MSG_INFO:
1120 #ifdef SYSVMSG
1121 infosize = sizeof(msgsi->msginfo);
1122 nds = msginfo.msgmni;
1123 dssize = sizeof(msgsi->msgids[0]);
1124 break;
1125 #else
1126 return (EINVAL);
1127 #endif
1128 case KERN_SYSVIPC_SEM_INFO:
1129 #ifdef SYSVSEM
1130 infosize = sizeof(semsi->seminfo);
1131 nds = seminfo.semmni;
1132 dssize = sizeof(semsi->semids[0]);
1133 break;
1134 #else
1135 return (EINVAL);
1136 #endif
1137 case KERN_SYSVIPC_SHM_INFO:
1138 #ifdef SYSVSHM
1139 infosize = sizeof(shmsi->shminfo);
1140 nds = shminfo.shmmni;
1141 dssize = sizeof(shmsi->shmids[0]);
1142 break;
1143 #else
1144 return (EINVAL);
1145 #endif
1146 default:
1147 return (EINVAL);
1148 }
1149 /*
1150 * Round infosize to 64 bit boundary if requesting more than just
1151 * the info structure or getting the total data size.
1152 */
1153 if (where == NULL || *sizep > infosize)
1154 infosize = ((infosize + 7) / 8) * 8;
1155 tsize = infosize + nds * dssize;
1156
1157 /* Return just the total size required. */
1158 if (where == NULL) {
1159 *sizep = tsize;
1160 return (0);
1161 }
1162
1163 /* Not enough room for even the info struct. */
1164 if (buflen < infosize) {
1165 *sizep = 0;
1166 return (ENOMEM);
1167 }
1168 buf = malloc(min(tsize, buflen), M_TEMP, M_WAITOK);
1169 memset(buf, 0, min(tsize, buflen));
1170
1171 switch (*name) {
1172 #ifdef SYSVMSG
1173 case KERN_SYSVIPC_MSG_INFO:
1174 msgsi = (struct msg_sysctl_info *)buf;
1175 buf2 = &msgsi->msgids[0];
1176 msgsi->msginfo = msginfo;
1177 break;
1178 #endif
1179 #ifdef SYSVSEM
1180 case KERN_SYSVIPC_SEM_INFO:
1181 semsi = (struct sem_sysctl_info *)buf;
1182 buf2 = &semsi->semids[0];
1183 semsi->seminfo = seminfo;
1184 break;
1185 #endif
1186 #ifdef SYSVSHM
1187 case KERN_SYSVIPC_SHM_INFO:
1188 shmsi = (struct shm_sysctl_info *)buf;
1189 buf2 = &shmsi->shmids[0];
1190 shmsi->shminfo = shminfo;
1191 break;
1192 #endif
1193 }
1194 buflen -= infosize;
1195
1196 ret = 0;
1197 if (buflen > 0) {
1198 /* Fill in the IPC data structures. */
1199 for (i = 0; i < nds; i++) {
1200 if (buflen < dssize) {
1201 ret = ENOMEM;
1202 break;
1203 }
1204 switch (*name) {
1205 #ifdef SYSVMSG
1206 case KERN_SYSVIPC_MSG_INFO:
1207 FILL_MSG(msqids[i], msgsi->msgids[i]);
1208 break;
1209 #endif
1210 #ifdef SYSVSEM
1211 case KERN_SYSVIPC_SEM_INFO:
1212 FILL_SEM(sema[i], semsi->semids[i]);
1213 break;
1214 #endif
1215 #ifdef SYSVSHM
1216 case KERN_SYSVIPC_SHM_INFO:
1217 FILL_SHM(shmsegs[i], shmsi->shmids[i]);
1218 break;
1219 #endif
1220 }
1221 buflen -= dssize;
1222 }
1223 }
1224 *sizep -= buflen;
1225 error = copyout(buf, start, *sizep);
1226 /* If copyout succeeded, use return code set earlier. */
1227 if (error == 0)
1228 error = ret;
1229 if (buf)
1230 free(buf, M_TEMP);
1231 return (error);
1232 }
1233 #endif /* SYSVMSG || SYSVSEM || SYSVSHM */
1234
1235 static int
1236 sysctl_msgbuf(void *vwhere, size_t *sizep)
1237 {
1238 char *where = vwhere;
1239 size_t len, maxlen = *sizep;
1240 long beg, end;
1241 int error;
1242
1243 /*
1244 * deal with cases where the message buffer has
1245 * become corrupted.
1246 */
1247 if (!msgbufenabled || msgbufp->msg_magic != MSG_MAGIC) {
1248 msgbufenabled = 0;
1249 return (ENXIO);
1250 }
1251
1252 if (where == NULL) {
1253 /* always return full buffer size */
1254 *sizep = msgbufp->msg_bufs;
1255 return (0);
1256 }
1257
1258 error = 0;
1259 maxlen = min(msgbufp->msg_bufs, maxlen);
1260
1261 /*
1262 * First, copy from the write pointer to the end of
1263 * message buffer.
1264 */
1265 beg = msgbufp->msg_bufx;
1266 end = msgbufp->msg_bufs;
1267 while (maxlen > 0) {
1268 len = min(end - beg, maxlen);
1269 if (len == 0)
1270 break;
1271 error = copyout(&msgbufp->msg_bufc[beg], where, len);
1272 if (error)
1273 break;
1274 where += len;
1275 maxlen -= len;
1276
1277 /*
1278 * ... then, copy from the beginning of message buffer to
1279 * the write pointer.
1280 */
1281 beg = 0;
1282 end = msgbufp->msg_bufx;
1283 }
1284 return (error);
1285 }
1286
1287 /*
1288 * try over estimating by 5 procs
1289 */
1290 #define KERN_PROCSLOP (5 * sizeof(struct kinfo_proc))
1291
1292 static int
1293 sysctl_doeproc(int *name, u_int namelen, void *vwhere, size_t *sizep)
1294 {
1295 struct eproc eproc;
1296 struct kinfo_proc2 kproc2;
1297 struct kinfo_proc *dp;
1298 struct proc *p;
1299 const struct proclist_desc *pd;
1300 char *where, *dp2;
1301 int type, op, arg, elem_size, elem_count;
1302 int buflen, needed, error;
1303
1304 dp = vwhere;
1305 dp2 = where = vwhere;
1306 buflen = where != NULL ? *sizep : 0;
1307 error = needed = 0;
1308 type = name[0];
1309
1310 if (type == KERN_PROC) {
1311 if (namelen != 3 && !(namelen == 2 && name[1] == KERN_PROC_ALL))
1312 return (EINVAL);
1313 op = name[1];
1314 if (op != KERN_PROC_ALL)
1315 arg = name[2];
1316 } else {
1317 if (namelen != 5)
1318 return (EINVAL);
1319 op = name[1];
1320 arg = name[2];
1321 elem_size = name[3];
1322 elem_count = name[4];
1323 }
1324
1325 proclist_lock_read();
1326
1327 pd = proclists;
1328 again:
1329 for (p = LIST_FIRST(pd->pd_list); p != NULL; p = LIST_NEXT(p, p_list)) {
1330 /*
1331 * Skip embryonic processes.
1332 */
1333 if (p->p_stat == SIDL)
1334 continue;
1335 /*
1336 * TODO - make more efficient (see notes below).
1337 * do by session.
1338 */
1339 switch (op) {
1340
1341 case KERN_PROC_PID:
1342 /* could do this with just a lookup */
1343 if (p->p_pid != (pid_t)arg)
1344 continue;
1345 break;
1346
1347 case KERN_PROC_PGRP:
1348 /* could do this by traversing pgrp */
1349 if (p->p_pgrp->pg_id != (pid_t)arg)
1350 continue;
1351 break;
1352
1353 case KERN_PROC_SESSION:
1354 if (p->p_session->s_sid != (pid_t)arg)
1355 continue;
1356 break;
1357
1358 case KERN_PROC_TTY:
1359 if (arg == KERN_PROC_TTY_REVOKE) {
1360 if ((p->p_flag & P_CONTROLT) == 0 ||
1361 p->p_session->s_ttyp == NULL ||
1362 p->p_session->s_ttyvp != NULL)
1363 continue;
1364 } else if ((p->p_flag & P_CONTROLT) == 0 ||
1365 p->p_session->s_ttyp == NULL) {
1366 if ((dev_t)arg != KERN_PROC_TTY_NODEV)
1367 continue;
1368 } else if (p->p_session->s_ttyp->t_dev != (dev_t)arg)
1369 continue;
1370 break;
1371
1372 case KERN_PROC_UID:
1373 if (p->p_ucred->cr_uid != (uid_t)arg)
1374 continue;
1375 break;
1376
1377 case KERN_PROC_RUID:
1378 if (p->p_cred->p_ruid != (uid_t)arg)
1379 continue;
1380 break;
1381
1382 case KERN_PROC_GID:
1383 if (p->p_ucred->cr_gid != (uid_t)arg)
1384 continue;
1385 break;
1386
1387 case KERN_PROC_RGID:
1388 if (p->p_cred->p_rgid != (uid_t)arg)
1389 continue;
1390 break;
1391
1392 case KERN_PROC_ALL:
1393 /* allow everything */
1394 break;
1395
1396 default:
1397 error = EINVAL;
1398 goto cleanup;
1399 }
1400 if (type == KERN_PROC) {
1401 if (buflen >= sizeof(struct kinfo_proc)) {
1402 fill_eproc(p, &eproc);
1403 error = copyout((caddr_t)p, &dp->kp_proc,
1404 sizeof(struct proc));
1405 if (error)
1406 goto cleanup;
1407 error = copyout((caddr_t)&eproc, &dp->kp_eproc,
1408 sizeof(eproc));
1409 if (error)
1410 goto cleanup;
1411 dp++;
1412 buflen -= sizeof(struct kinfo_proc);
1413 }
1414 needed += sizeof(struct kinfo_proc);
1415 } else { /* KERN_PROC2 */
1416 if (buflen >= elem_size && elem_count > 0) {
1417 fill_kproc2(p, &kproc2);
1418 /*
1419 * Copy out elem_size, but not larger than
1420 * the size of a struct kinfo_proc2.
1421 */
1422 error = copyout(&kproc2, dp2,
1423 min(sizeof(kproc2), elem_size));
1424 if (error)
1425 goto cleanup;
1426 dp2 += elem_size;
1427 buflen -= elem_size;
1428 elem_count--;
1429 }
1430 needed += elem_size;
1431 }
1432 }
1433 pd++;
1434 if (pd->pd_list != NULL)
1435 goto again;
1436 proclist_unlock_read();
1437
1438 if (where != NULL) {
1439 if (type == KERN_PROC)
1440 *sizep = (caddr_t)dp - where;
1441 else
1442 *sizep = dp2 - where;
1443 if (needed > *sizep)
1444 return (ENOMEM);
1445 } else {
1446 needed += KERN_PROCSLOP;
1447 *sizep = needed;
1448 }
1449 return (0);
1450 cleanup:
1451 proclist_unlock_read();
1452 return (error);
1453 }
1454
1455 /*
1456 * Fill in an eproc structure for the specified process.
1457 */
1458 void
1459 fill_eproc(struct proc *p, struct eproc *ep)
1460 {
1461 struct tty *tp;
1462 struct lwp *l;
1463
1464 ep->e_paddr = p;
1465 ep->e_sess = p->p_session;
1466 ep->e_pcred = *p->p_cred;
1467 ep->e_ucred = *p->p_ucred;
1468 if (p->p_stat == SIDL || P_ZOMBIE(p)) {
1469 ep->e_vm.vm_rssize = 0;
1470 ep->e_vm.vm_tsize = 0;
1471 ep->e_vm.vm_dsize = 0;
1472 ep->e_vm.vm_ssize = 0;
1473 /* ep->e_vm.vm_pmap = XXX; */
1474 } else {
1475 struct vmspace *vm = p->p_vmspace;
1476
1477 ep->e_vm.vm_rssize = vm_resident_count(vm);
1478 ep->e_vm.vm_tsize = vm->vm_tsize;
1479 ep->e_vm.vm_dsize = vm->vm_dsize;
1480 ep->e_vm.vm_ssize = vm->vm_ssize;
1481
1482 /* Pick a "representative" LWP */
1483 l = proc_representative_lwp(p);
1484
1485 if (l->l_wmesg)
1486 strncpy(ep->e_wmesg, l->l_wmesg, WMESGLEN);
1487 }
1488 if (p->p_pptr)
1489 ep->e_ppid = p->p_pptr->p_pid;
1490 else
1491 ep->e_ppid = 0;
1492 ep->e_pgid = p->p_pgrp->pg_id;
1493 ep->e_sid = ep->e_sess->s_sid;
1494 ep->e_jobc = p->p_pgrp->pg_jobc;
1495 if ((p->p_flag & P_CONTROLT) &&
1496 (tp = ep->e_sess->s_ttyp)) {
1497 ep->e_tdev = tp->t_dev;
1498 ep->e_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1499 ep->e_tsess = tp->t_session;
1500 } else
1501 ep->e_tdev = NODEV;
1502
1503 ep->e_xsize = ep->e_xrssize = 0;
1504 ep->e_xccount = ep->e_xswrss = 0;
1505 ep->e_flag = ep->e_sess->s_ttyvp ? EPROC_CTTY : 0;
1506 if (SESS_LEADER(p))
1507 ep->e_flag |= EPROC_SLEADER;
1508 strncpy(ep->e_login, ep->e_sess->s_login, MAXLOGNAME);
1509 }
1510
1511 /*
1512 * Fill in an eproc structure for the specified process.
1513 */
1514 static void
1515 fill_kproc2(struct proc *p, struct kinfo_proc2 *ki)
1516 {
1517 struct tty *tp;
1518 struct lwp *l;
1519 memset(ki, 0, sizeof(*ki));
1520
1521 /* XXX NJWLWP
1522 * These are likely not what the caller was looking for.
1523 * The perils of playing with the kernel data structures...
1524 */
1525 ki->p_paddr = PTRTOINT64(p);
1526 ki->p_fd = PTRTOINT64(p->p_fd);
1527 ki->p_cwdi = PTRTOINT64(p->p_cwdi);
1528 ki->p_stats = PTRTOINT64(p->p_stats);
1529 ki->p_limit = PTRTOINT64(p->p_limit);
1530 ki->p_vmspace = PTRTOINT64(p->p_vmspace);
1531 ki->p_sigacts = PTRTOINT64(p->p_sigacts);
1532 ki->p_sess = PTRTOINT64(p->p_session);
1533 ki->p_tsess = 0; /* may be changed if controlling tty below */
1534 ki->p_ru = PTRTOINT64(p->p_ru);
1535
1536 ki->p_eflag = 0;
1537 ki->p_exitsig = p->p_exitsig;
1538 ki->p_flag = p->p_flag;
1539
1540 ki->p_pid = p->p_pid;
1541 if (p->p_pptr)
1542 ki->p_ppid = p->p_pptr->p_pid;
1543 else
1544 ki->p_ppid = 0;
1545 ki->p_sid = p->p_session->s_sid;
1546 ki->p__pgid = p->p_pgrp->pg_id;
1547
1548 ki->p_tpgid = NO_PID; /* may be changed if controlling tty below */
1549
1550 ki->p_uid = p->p_ucred->cr_uid;
1551 ki->p_ruid = p->p_cred->p_ruid;
1552 ki->p_gid = p->p_ucred->cr_gid;
1553 ki->p_rgid = p->p_cred->p_rgid;
1554
1555 memcpy(ki->p_groups, p->p_cred->pc_ucred->cr_groups,
1556 min(sizeof(ki->p_groups), sizeof(p->p_cred->pc_ucred->cr_groups)));
1557 ki->p_ngroups = p->p_cred->pc_ucred->cr_ngroups;
1558
1559 ki->p_jobc = p->p_pgrp->pg_jobc;
1560 if ((p->p_flag & P_CONTROLT) && (tp = p->p_session->s_ttyp)) {
1561 ki->p_tdev = tp->t_dev;
1562 ki->p_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1563 ki->p_tsess = PTRTOINT64(tp->t_session);
1564 } else {
1565 ki->p_tdev = NODEV;
1566 }
1567
1568 ki->p_estcpu = p->p_estcpu;
1569 ki->p_rtime_sec = p->p_rtime.tv_sec;
1570 ki->p_rtime_usec = p->p_rtime.tv_usec;
1571 ki->p_cpticks = p->p_cpticks;
1572 ki->p_pctcpu = p->p_pctcpu;
1573
1574 ki->p_uticks = p->p_uticks;
1575 ki->p_sticks = p->p_sticks;
1576 ki->p_iticks = p->p_iticks;
1577
1578 ki->p_tracep = PTRTOINT64(p->p_tracep);
1579 ki->p_traceflag = p->p_traceflag;
1580
1581
1582 memcpy(&ki->p_siglist, &p->p_sigctx.ps_siglist, sizeof(ki_sigset_t));
1583 memcpy(&ki->p_sigmask, &p->p_sigctx.ps_sigmask, sizeof(ki_sigset_t));
1584 memcpy(&ki->p_sigignore, &p->p_sigctx.ps_sigignore,sizeof(ki_sigset_t));
1585 memcpy(&ki->p_sigcatch, &p->p_sigctx.ps_sigcatch, sizeof(ki_sigset_t));
1586
1587 ki->p_stat = p->p_stat;
1588 ki->p_nice = p->p_nice;
1589
1590 ki->p_xstat = p->p_xstat;
1591 ki->p_acflag = p->p_acflag;
1592
1593 strncpy(ki->p_comm, p->p_comm,
1594 min(sizeof(ki->p_comm), sizeof(p->p_comm)));
1595
1596 strncpy(ki->p_login, p->p_session->s_login, sizeof(ki->p_login));
1597
1598 if (p->p_stat == SIDL || P_ZOMBIE(p)) {
1599 ki->p_vm_rssize = 0;
1600 ki->p_vm_tsize = 0;
1601 ki->p_vm_dsize = 0;
1602 ki->p_vm_ssize = 0;
1603 } else {
1604 struct vmspace *vm = p->p_vmspace;
1605
1606 ki->p_vm_rssize = vm_resident_count(vm);
1607 ki->p_vm_tsize = vm->vm_tsize;
1608 ki->p_vm_dsize = vm->vm_dsize;
1609 ki->p_vm_ssize = vm->vm_ssize;
1610
1611 /* Pick a "representative" LWP */
1612 l = proc_representative_lwp(p);
1613 ki->p_forw = PTRTOINT64(l->l_forw);
1614 ki->p_back = PTRTOINT64(l->l_back);
1615 ki->p_addr = PTRTOINT64(l->l_addr);
1616 ki->p_stat = l->l_stat;
1617 ki->p_flag |= l->l_flag;
1618 ki->p_swtime = l->l_swtime;
1619 ki->p_slptime = l->l_slptime;
1620 if (l->l_stat == LSONPROC) {
1621 KDASSERT(l->l_cpu != NULL);
1622 ki->p_schedflags = l->l_cpu->ci_schedstate.spc_flags;
1623 } else
1624 ki->p_schedflags = 0;
1625 ki->p_holdcnt = l->l_holdcnt;
1626 ki->p_priority = l->l_priority;
1627 ki->p_usrpri = l->l_usrpri;
1628 if (l->l_wmesg)
1629 strncpy(ki->p_wmesg, l->l_wmesg, sizeof(ki->p_wmesg));
1630 ki->p_wchan = PTRTOINT64(l->l_wchan);
1631
1632 }
1633
1634 if (p->p_session->s_ttyvp)
1635 ki->p_eflag |= EPROC_CTTY;
1636 if (SESS_LEADER(p))
1637 ki->p_eflag |= EPROC_SLEADER;
1638
1639 /* XXX Is this double check necessary? */
1640 if (P_ZOMBIE(p)) {
1641 ki->p_uvalid = 0;
1642 } else {
1643 ki->p_uvalid = 1;
1644
1645 ki->p_ustart_sec = p->p_stats->p_start.tv_sec;
1646 ki->p_ustart_usec = p->p_stats->p_start.tv_usec;
1647
1648 ki->p_uutime_sec = p->p_stats->p_ru.ru_utime.tv_sec;
1649 ki->p_uutime_usec = p->p_stats->p_ru.ru_utime.tv_usec;
1650 ki->p_ustime_sec = p->p_stats->p_ru.ru_stime.tv_sec;
1651 ki->p_ustime_usec = p->p_stats->p_ru.ru_stime.tv_usec;
1652
1653 ki->p_uru_maxrss = p->p_stats->p_ru.ru_maxrss;
1654 ki->p_uru_ixrss = p->p_stats->p_ru.ru_ixrss;
1655 ki->p_uru_idrss = p->p_stats->p_ru.ru_idrss;
1656 ki->p_uru_isrss = p->p_stats->p_ru.ru_isrss;
1657 ki->p_uru_minflt = p->p_stats->p_ru.ru_minflt;
1658 ki->p_uru_majflt = p->p_stats->p_ru.ru_majflt;
1659 ki->p_uru_nswap = p->p_stats->p_ru.ru_nswap;
1660 ki->p_uru_inblock = p->p_stats->p_ru.ru_inblock;
1661 ki->p_uru_oublock = p->p_stats->p_ru.ru_oublock;
1662 ki->p_uru_msgsnd = p->p_stats->p_ru.ru_msgsnd;
1663 ki->p_uru_msgrcv = p->p_stats->p_ru.ru_msgrcv;
1664 ki->p_uru_nsignals = p->p_stats->p_ru.ru_nsignals;
1665 ki->p_uru_nvcsw = p->p_stats->p_ru.ru_nvcsw;
1666 ki->p_uru_nivcsw = p->p_stats->p_ru.ru_nivcsw;
1667
1668 ki->p_uctime_sec = p->p_stats->p_cru.ru_utime.tv_sec +
1669 p->p_stats->p_cru.ru_stime.tv_sec;
1670 ki->p_uctime_usec = p->p_stats->p_cru.ru_utime.tv_usec +
1671 p->p_stats->p_cru.ru_stime.tv_usec;
1672 }
1673 #ifdef MULTIPROCESSOR
1674 if (p->p_cpu != NULL)
1675 ki->p_cpuid = p->p_cpu->ci_cpuid;
1676 else
1677 #endif
1678 ki->p_cpuid = KI_NOCPU;
1679
1680 }
1681
1682
1683 /*
1684 * Pick a LWP to represent the process for those operations which
1685 * want information about a "process" that is actually associated
1686 * with a LWP.
1687 */
1688 static struct lwp *proc_representative_lwp(p)
1689 struct proc *p;
1690 {
1691 struct lwp *l = NULL;
1692
1693 /* Trivial case: only one LWP */
1694 if (p->p_nrlwps == 1)
1695 return (LIST_FIRST(&p->p_lwps));
1696
1697 switch (p->p_stat) {
1698 case SSTOP:
1699 /* Pick the first stopped LWP */
1700 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1701 if (l->l_stat == LSSTOP)
1702 return (l);
1703 }
1704 /* NOTREACHED */
1705 break;
1706 case SACTIVE:
1707 /* Pick the first live LWP */
1708 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1709 if (l->l_stat == LSRUN ||
1710 l->l_stat == LSSLEEP ||
1711 l->l_stat == LSONPROC ||
1712 l->l_stat == LSSUSPENDED)
1713 return (l);
1714 }
1715 break;
1716 case SDEAD:
1717 case SZOMB:
1718 /* Doesn't really matter... */
1719 l = LIST_FIRST(&p->p_lwps);
1720 break;
1721 #ifdef DIAGNOSTIC
1722 case SIDL:
1723 /* We have more than one LWP and we're in SIDL?
1724 * How'd that happen?
1725 */
1726 panic("Too many LWPs (%d) in SIDL process %d (%s)",
1727 p->p_nrlwps, p->p_pid, p->p_comm);
1728 default:
1729 panic("Process %d (%s) in unknown state %d",
1730 p->p_pid, p->p_comm, p->p_stat);
1731 #endif
1732 }
1733
1734 panic("proc_representative_lwp: couldn't find a lwp for process"
1735 " %d (%s)", p->p_pid, p->p_comm);
1736 /* NOTREACHED */
1737 return NULL;
1738 }
1739
1740
1741 int
1742 sysctl_procargs(int *name, u_int namelen, void *where, size_t *sizep,
1743 struct proc *up)
1744 {
1745 struct ps_strings pss;
1746 struct proc *p;
1747 size_t len, upper_bound, xlen;
1748 struct uio auio;
1749 struct iovec aiov;
1750 vaddr_t argv;
1751 pid_t pid;
1752 int nargv, type, error, i;
1753 char *arg;
1754 char *tmp;
1755
1756 if (namelen != 2)
1757 return (EINVAL);
1758 pid = name[0];
1759 type = name[1];
1760
1761 switch (type) {
1762 case KERN_PROC_ARGV:
1763 case KERN_PROC_NARGV:
1764 case KERN_PROC_ENV:
1765 case KERN_PROC_NENV:
1766 /* ok */
1767 break;
1768 default:
1769 return (EINVAL);
1770 }
1771
1772 /* check pid */
1773 if ((p = pfind(pid)) == NULL)
1774 return (EINVAL);
1775
1776 /* only root or same user change look at the environment */
1777 if (type == KERN_PROC_ENV || type == KERN_PROC_NENV) {
1778 if (up->p_ucred->cr_uid != 0) {
1779 if (up->p_cred->p_ruid != p->p_cred->p_ruid ||
1780 up->p_cred->p_ruid != p->p_cred->p_svuid)
1781 return (EPERM);
1782 }
1783 }
1784
1785 if (sizep != NULL && where == NULL) {
1786 if (type == KERN_PROC_NARGV || type == KERN_PROC_NENV)
1787 *sizep = sizeof (int);
1788 else
1789 *sizep = ARG_MAX; /* XXX XXX XXX */
1790 return (0);
1791 }
1792 if (where == NULL || sizep == NULL)
1793 return (EINVAL);
1794
1795 /*
1796 * Zombies don't have a stack, so we can't read their psstrings.
1797 * System processes also don't have a user stack.
1798 */
1799 if (P_ZOMBIE(p) || (p->p_flag & P_SYSTEM) != 0)
1800 return (EINVAL);
1801
1802 /*
1803 * Lock the process down in memory.
1804 */
1805 /* XXXCDC: how should locking work here? */
1806 if ((p->p_flag & P_WEXIT) || (p->p_vmspace->vm_refcnt < 1))
1807 return (EFAULT);
1808
1809 p->p_vmspace->vm_refcnt++; /* XXX */
1810
1811 /*
1812 * Allocate a temporary buffer to hold the arguments.
1813 */
1814 arg = malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
1815
1816 /*
1817 * Read in the ps_strings structure.
1818 */
1819 aiov.iov_base = &pss;
1820 aiov.iov_len = sizeof(pss);
1821 auio.uio_iov = &aiov;
1822 auio.uio_iovcnt = 1;
1823 auio.uio_offset = (vaddr_t)p->p_psstr;
1824 auio.uio_resid = sizeof(pss);
1825 auio.uio_segflg = UIO_SYSSPACE;
1826 auio.uio_rw = UIO_READ;
1827 auio.uio_procp = NULL;
1828 error = uvm_io(&p->p_vmspace->vm_map, &auio);
1829 if (error)
1830 goto done;
1831
1832 if (type == KERN_PROC_ARGV || type == KERN_PROC_NARGV)
1833 memcpy(&nargv, (char *)&pss + p->p_psnargv, sizeof(nargv));
1834 else
1835 memcpy(&nargv, (char *)&pss + p->p_psnenv, sizeof(nargv));
1836 if (type == KERN_PROC_NARGV || type == KERN_PROC_NENV) {
1837 error = copyout(&nargv, where, sizeof(nargv));
1838 *sizep = sizeof(nargv);
1839 goto done;
1840 }
1841 /*
1842 * Now read the address of the argument vector.
1843 */
1844 switch (type) {
1845 case KERN_PROC_ARGV:
1846 /* XXX compat32 stuff here */
1847 memcpy(&tmp, (char *)&pss + p->p_psargv, sizeof(tmp));
1848 break;
1849 case KERN_PROC_ENV:
1850 memcpy(&tmp, (char *)&pss + p->p_psenv, sizeof(tmp));
1851 break;
1852 default:
1853 return (EINVAL);
1854 }
1855 auio.uio_offset = (off_t)(long)tmp;
1856 aiov.iov_base = &argv;
1857 aiov.iov_len = sizeof(argv);
1858 auio.uio_iov = &aiov;
1859 auio.uio_iovcnt = 1;
1860 auio.uio_resid = sizeof(argv);
1861 auio.uio_segflg = UIO_SYSSPACE;
1862 auio.uio_rw = UIO_READ;
1863 auio.uio_procp = NULL;
1864 error = uvm_io(&p->p_vmspace->vm_map, &auio);
1865 if (error)
1866 goto done;
1867
1868 /*
1869 * Now copy in the actual argument vector, one page at a time,
1870 * since we don't know how long the vector is (though, we do
1871 * know how many NUL-terminated strings are in the vector).
1872 */
1873 len = 0;
1874 upper_bound = *sizep;
1875 for (; nargv != 0 && len < upper_bound; len += xlen) {
1876 aiov.iov_base = arg;
1877 aiov.iov_len = PAGE_SIZE;
1878 auio.uio_iov = &aiov;
1879 auio.uio_iovcnt = 1;
1880 auio.uio_offset = argv + len;
1881 xlen = PAGE_SIZE - ((argv + len) & PAGE_MASK);
1882 auio.uio_resid = xlen;
1883 auio.uio_segflg = UIO_SYSSPACE;
1884 auio.uio_rw = UIO_READ;
1885 auio.uio_procp = NULL;
1886 error = uvm_io(&p->p_vmspace->vm_map, &auio);
1887 if (error)
1888 goto done;
1889
1890 for (i = 0; i < xlen && nargv != 0; i++) {
1891 if (arg[i] == '\0')
1892 nargv--; /* one full string */
1893 }
1894
1895 /* make sure we don't copyout past the end of the user's buffer */
1896 if (len + i > upper_bound)
1897 i = upper_bound - len;
1898
1899 error = copyout(arg, (char *)where + len, i);
1900 if (error)
1901 break;
1902
1903 if (nargv == 0) {
1904 len += i;
1905 break;
1906 }
1907 }
1908 *sizep = len;
1909
1910 done:
1911 uvmspace_free(p->p_vmspace);
1912
1913 free(arg, M_TEMP);
1914 return (error);
1915 }
1916
1917 #if NPTY > 0
1918 int pty_maxptys(int, int); /* defined in kern/tty_pty.c */
1919
1920 /*
1921 * Validate parameters and get old / set new parameters
1922 * for pty sysctl function.
1923 */
1924 static int
1925 sysctl_pty(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
1926 {
1927 int error = 0;
1928 int oldmax = 0, newmax = 0;
1929
1930 /* get current value of maxptys */
1931 oldmax = pty_maxptys(0, 0);
1932
1933 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, &oldmax, int)
1934
1935 if (!error && newp) {
1936 SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, int)
1937 SYSCTL_SCALAR_NEWPCOP_TYP(newp, &newmax, int)
1938
1939 if (newmax != pty_maxptys(newmax, (newp != NULL)))
1940 return (EINVAL);
1941
1942 }
1943
1944 return (error);
1945 }
1946 #endif /* NPTY > 0 */
1947