kern_sysctl.c revision 1.115 1 /* $NetBSD: kern_sysctl.c,v 1.115 2002/11/07 00:22:29 manu Exp $ */
2
3 /*-
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Mike Karels at Berkeley Software Design, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_sysctl.c 8.9 (Berkeley) 5/20/95
39 */
40
41 /*
42 * sysctl system call.
43 */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: kern_sysctl.c,v 1.115 2002/11/07 00:22:29 manu Exp $");
47
48 #include "opt_ddb.h"
49 #include "opt_insecure.h"
50 #include "opt_defcorename.h"
51 #include "opt_pipe.h"
52 #include "opt_sysv.h"
53 #include "pty.h"
54 #include "rnd.h"
55
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/kernel.h>
59 #include <sys/buf.h>
60 #include <sys/device.h>
61 #include <sys/disklabel.h>
62 #include <sys/dkstat.h>
63 #include <sys/exec.h>
64 #include <sys/file.h>
65 #include <sys/ioctl.h>
66 #include <sys/malloc.h>
67 #include <sys/mount.h>
68 #include <sys/msgbuf.h>
69 #include <sys/pool.h>
70 #include <sys/proc.h>
71 #include <sys/resource.h>
72 #include <sys/resourcevar.h>
73 #include <sys/syscallargs.h>
74 #include <sys/tty.h>
75 #include <sys/unistd.h>
76 #include <sys/vnode.h>
77 #include <sys/socketvar.h>
78 #define __SYSCTL_PRIVATE
79 #include <sys/sysctl.h>
80 #include <sys/lock.h>
81 #include <sys/namei.h>
82
83 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
84 #include <sys/ipc.h>
85 #endif
86 #ifdef SYSVMSG
87 #include <sys/msg.h>
88 #endif
89 #ifdef SYSVSEM
90 #include <sys/sem.h>
91 #endif
92 #ifdef SYSVSHM
93 #include <sys/shm.h>
94 #endif
95
96 #include <dev/cons.h>
97
98 #if defined(DDB)
99 #include <ddb/ddbvar.h>
100 #endif
101
102 #ifndef PIPE_SOCKETPAIR
103 #include <sys/pipe.h>
104 #endif
105
106 #if NRND > 0
107 #include <sys/rnd.h>
108 #endif
109
110 #define PTRTOINT64(foo) ((u_int64_t)(uintptr_t)(foo))
111
112 static int sysctl_file(void *, size_t *);
113 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
114 static int sysctl_sysvipc(int *, u_int, void *, size_t *);
115 #endif
116 static int sysctl_msgbuf(void *, size_t *);
117 static int sysctl_doeproc(int *, u_int, void *, size_t *);
118 static int sysctl_dotkstat(int *, u_int, void *, size_t *, void *);
119 #ifdef MULTIPROCESSOR
120 static int sysctl_docptime(void *, size_t *, void *);
121 static int sysctl_ncpus(void);
122 #endif
123 static void fill_kproc2(struct proc *, struct kinfo_proc2 *);
124 static int sysctl_procargs(int *, u_int, void *, size_t *, struct proc *);
125 #if NPTY > 0
126 static int sysctl_pty(void *, size_t *, void *, size_t);
127 #endif
128
129 /*
130 * The `sysctl_memlock' is intended to keep too many processes from
131 * locking down memory by doing sysctls at once. Whether or not this
132 * is really a good idea to worry about it probably a subject of some
133 * debate.
134 */
135 struct lock sysctl_memlock;
136
137 void
138 sysctl_init(void)
139 {
140
141 lockinit(&sysctl_memlock, PRIBIO|PCATCH, "sysctl", 0, 0);
142 }
143
144 int
145 sys___sysctl(struct proc *p, void *v, register_t *retval)
146 {
147 struct sys___sysctl_args /* {
148 syscallarg(int *) name;
149 syscallarg(u_int) namelen;
150 syscallarg(void *) old;
151 syscallarg(size_t *) oldlenp;
152 syscallarg(void *) new;
153 syscallarg(size_t) newlen;
154 } */ *uap = v;
155 int error;
156 size_t savelen = 0, oldlen = 0;
157 sysctlfn *fn;
158 int name[CTL_MAXNAME];
159 size_t *oldlenp;
160
161 /*
162 * all top-level sysctl names are non-terminal
163 */
164 if (SCARG(uap, namelen) > CTL_MAXNAME || SCARG(uap, namelen) < 2)
165 return (EINVAL);
166 error = copyin(SCARG(uap, name), &name,
167 SCARG(uap, namelen) * sizeof(int));
168 if (error)
169 return (error);
170
171 /*
172 * For all but CTL_PROC, must be root to change a value.
173 * For CTL_PROC, must be root, or owner of the proc (and not suid),
174 * this is checked in proc_sysctl() (once we know the targer proc).
175 */
176 if (SCARG(uap, new) != NULL && name[0] != CTL_PROC &&
177 (error = suser(p->p_ucred, &p->p_acflag)))
178 return error;
179
180 switch (name[0]) {
181 case CTL_KERN:
182 fn = kern_sysctl;
183 break;
184 case CTL_HW:
185 fn = hw_sysctl;
186 break;
187 case CTL_VM:
188 fn = uvm_sysctl;
189 break;
190 case CTL_NET:
191 fn = net_sysctl;
192 break;
193 case CTL_VFS:
194 fn = vfs_sysctl;
195 break;
196 case CTL_MACHDEP:
197 fn = cpu_sysctl;
198 break;
199 #ifdef DEBUG
200 case CTL_DEBUG:
201 fn = debug_sysctl;
202 break;
203 #endif
204 #ifdef DDB
205 case CTL_DDB:
206 fn = ddb_sysctl;
207 break;
208 #endif
209 case CTL_PROC:
210 fn = proc_sysctl;
211 break;
212
213 case CTL_EMUL:
214 fn = emul_sysctl;
215 break;
216 default:
217 return (EOPNOTSUPP);
218 }
219
220 /*
221 * XXX Hey, we wire `old', but what about `new'?
222 */
223
224 oldlenp = SCARG(uap, oldlenp);
225 if (oldlenp) {
226 if ((error = copyin(oldlenp, &oldlen, sizeof(oldlen))))
227 return (error);
228 oldlenp = &oldlen;
229 }
230 if (SCARG(uap, old) != NULL) {
231 error = lockmgr(&sysctl_memlock, LK_EXCLUSIVE, NULL);
232 if (error)
233 return (error);
234 error = uvm_vslock(p, SCARG(uap, old), oldlen, VM_PROT_WRITE);
235 if (error) {
236 (void) lockmgr(&sysctl_memlock, LK_RELEASE, NULL);
237 return error;
238 }
239 savelen = oldlen;
240 }
241 error = (*fn)(name + 1, SCARG(uap, namelen) - 1, SCARG(uap, old),
242 oldlenp, SCARG(uap, new), SCARG(uap, newlen), p);
243 if (SCARG(uap, old) != NULL) {
244 uvm_vsunlock(p, SCARG(uap, old), savelen);
245 (void) lockmgr(&sysctl_memlock, LK_RELEASE, NULL);
246 }
247 if (error)
248 return (error);
249 if (SCARG(uap, oldlenp))
250 error = copyout(&oldlen, SCARG(uap, oldlenp), sizeof(oldlen));
251 return (error);
252 }
253
254 /*
255 * Attributes stored in the kernel.
256 */
257 char hostname[MAXHOSTNAMELEN];
258 int hostnamelen;
259
260 char domainname[MAXHOSTNAMELEN];
261 int domainnamelen;
262
263 long hostid;
264
265 #ifdef INSECURE
266 int securelevel = -1;
267 #else
268 int securelevel = 0;
269 #endif
270
271 #ifndef DEFCORENAME
272 #define DEFCORENAME "%n.core"
273 #endif
274 char defcorename[MAXPATHLEN] = DEFCORENAME;
275 int defcorenamelen = sizeof(DEFCORENAME);
276
277 extern int kern_logsigexit;
278 extern fixpt_t ccpu;
279
280 #ifndef MULTIPROCESSOR
281 #define sysctl_ncpus() 1
282 #endif
283
284 #ifdef MULTIPROCESSOR
285
286 #ifndef CPU_INFO_FOREACH
287 #define CPU_INFO_ITERATOR int
288 #define CPU_INFO_FOREACH(cii, ci) cii = 0, ci = curcpu(); ci != NULL; ci = NULL
289 #endif
290
291 static int
292 sysctl_docptime(void *oldp, size_t *oldlenp, void *newp)
293 {
294 u_int64_t cp_time[CPUSTATES];
295 int i;
296 struct cpu_info *ci;
297 CPU_INFO_ITERATOR cii;
298
299 for (i=0; i<CPUSTATES; i++)
300 cp_time[i] = 0;
301
302 for (CPU_INFO_FOREACH(cii, ci)) {
303 for (i=0; i<CPUSTATES; i++)
304 cp_time[i] += ci->ci_schedstate.spc_cp_time[i];
305 }
306 return (sysctl_rdstruct(oldp, oldlenp, newp,
307 cp_time, sizeof(cp_time)));
308 }
309
310 static int
311 sysctl_ncpus(void)
312 {
313 struct cpu_info *ci;
314 CPU_INFO_ITERATOR cii;
315
316 int ncpus = 0;
317 for (CPU_INFO_FOREACH(cii, ci))
318 ncpus++;
319 return ncpus;
320 }
321
322 #endif
323
324 /*
325 * kernel related system variables.
326 */
327 int
328 kern_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
329 void *newp, size_t newlen, struct proc *p)
330 {
331 int error, level, inthostid;
332 int old_autonicetime;
333 int old_vnodes;
334 dev_t consdev;
335 #if NRND > 0
336 int v;
337 #endif
338
339 /* All sysctl names at this level, except for a few, are terminal. */
340 switch (name[0]) {
341 case KERN_PROC:
342 case KERN_PROC2:
343 case KERN_PROF:
344 case KERN_MBUF:
345 case KERN_PROC_ARGS:
346 case KERN_SYSVIPC_INFO:
347 case KERN_PIPE:
348 case KERN_TKSTAT:
349 /* Not terminal. */
350 break;
351 default:
352 if (namelen != 1)
353 return (ENOTDIR); /* overloaded */
354 }
355
356 switch (name[0]) {
357 case KERN_OSTYPE:
358 return (sysctl_rdstring(oldp, oldlenp, newp, ostype));
359 case KERN_OSRELEASE:
360 return (sysctl_rdstring(oldp, oldlenp, newp, osrelease));
361 case KERN_OSREV:
362 return (sysctl_rdint(oldp, oldlenp, newp, __NetBSD_Version__));
363 case KERN_VERSION:
364 return (sysctl_rdstring(oldp, oldlenp, newp, version));
365 case KERN_MAXVNODES:
366 old_vnodes = desiredvnodes;
367 error = sysctl_int(oldp, oldlenp, newp, newlen, &desiredvnodes);
368 if (newp && !error) {
369 if (old_vnodes > desiredvnodes) {
370 desiredvnodes = old_vnodes;
371 return (EINVAL);
372 }
373 vfs_reinit();
374 nchreinit();
375 }
376 return (error);
377 case KERN_MAXPROC:
378 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxproc));
379 case KERN_MAXFILES:
380 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxfiles));
381 case KERN_ARGMAX:
382 return (sysctl_rdint(oldp, oldlenp, newp, ARG_MAX));
383 case KERN_SECURELVL:
384 level = securelevel;
385 if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &level)) ||
386 newp == NULL)
387 return (error);
388 if (level < securelevel && p->p_pid != 1)
389 return (EPERM);
390 securelevel = level;
391 return (0);
392 case KERN_HOSTNAME:
393 error = sysctl_string(oldp, oldlenp, newp, newlen,
394 hostname, sizeof(hostname));
395 if (newp && !error)
396 hostnamelen = newlen;
397 return (error);
398 case KERN_DOMAINNAME:
399 error = sysctl_string(oldp, oldlenp, newp, newlen,
400 domainname, sizeof(domainname));
401 if (newp && !error)
402 domainnamelen = newlen;
403 return (error);
404 case KERN_HOSTID:
405 inthostid = hostid; /* XXX assumes sizeof long <= sizeof int */
406 error = sysctl_int(oldp, oldlenp, newp, newlen, &inthostid);
407 if (newp && !error)
408 hostid = inthostid;
409 return (error);
410 case KERN_CLOCKRATE:
411 return (sysctl_clockrate(oldp, oldlenp));
412 case KERN_BOOTTIME:
413 return (sysctl_rdstruct(oldp, oldlenp, newp, &boottime,
414 sizeof(struct timeval)));
415 case KERN_VNODE:
416 return (sysctl_vnode(oldp, oldlenp, p));
417 case KERN_PROC:
418 case KERN_PROC2:
419 return (sysctl_doeproc(name, namelen, oldp, oldlenp));
420 case KERN_PROC_ARGS:
421 return (sysctl_procargs(name + 1, namelen - 1,
422 oldp, oldlenp, p));
423 case KERN_FILE:
424 return (sysctl_file(oldp, oldlenp));
425 #ifdef GPROF
426 case KERN_PROF:
427 return (sysctl_doprof(name + 1, namelen - 1, oldp, oldlenp,
428 newp, newlen));
429 #endif
430 case KERN_POSIX1:
431 return (sysctl_rdint(oldp, oldlenp, newp, _POSIX_VERSION));
432 case KERN_NGROUPS:
433 return (sysctl_rdint(oldp, oldlenp, newp, NGROUPS_MAX));
434 case KERN_JOB_CONTROL:
435 return (sysctl_rdint(oldp, oldlenp, newp, 1));
436 case KERN_SAVED_IDS:
437 #ifdef _POSIX_SAVED_IDS
438 return (sysctl_rdint(oldp, oldlenp, newp, 1));
439 #else
440 return (sysctl_rdint(oldp, oldlenp, newp, 0));
441 #endif
442 case KERN_MAXPARTITIONS:
443 return (sysctl_rdint(oldp, oldlenp, newp, MAXPARTITIONS));
444 case KERN_RAWPARTITION:
445 return (sysctl_rdint(oldp, oldlenp, newp, RAW_PART));
446 #ifdef NTP
447 case KERN_NTPTIME:
448 return (sysctl_ntptime(oldp, oldlenp));
449 #endif
450 case KERN_AUTONICETIME:
451 old_autonicetime = autonicetime;
452 error = sysctl_int(oldp, oldlenp, newp, newlen, &autonicetime);
453 if (autonicetime < 0)
454 autonicetime = old_autonicetime;
455 return (error);
456 case KERN_AUTONICEVAL:
457 error = sysctl_int(oldp, oldlenp, newp, newlen, &autoniceval);
458 if (autoniceval < PRIO_MIN)
459 autoniceval = PRIO_MIN;
460 if (autoniceval > PRIO_MAX)
461 autoniceval = PRIO_MAX;
462 return (error);
463 case KERN_RTC_OFFSET:
464 return (sysctl_rdint(oldp, oldlenp, newp, rtc_offset));
465 case KERN_ROOT_DEVICE:
466 return (sysctl_rdstring(oldp, oldlenp, newp,
467 root_device->dv_xname));
468 case KERN_MSGBUFSIZE:
469 /*
470 * deal with cases where the message buffer has
471 * become corrupted.
472 */
473 if (!msgbufenabled || msgbufp->msg_magic != MSG_MAGIC) {
474 msgbufenabled = 0;
475 return (ENXIO);
476 }
477 return (sysctl_rdint(oldp, oldlenp, newp, msgbufp->msg_bufs));
478 case KERN_FSYNC:
479 return (sysctl_rdint(oldp, oldlenp, newp, 1));
480 case KERN_SYSVMSG:
481 #ifdef SYSVMSG
482 return (sysctl_rdint(oldp, oldlenp, newp, 1));
483 #else
484 return (sysctl_rdint(oldp, oldlenp, newp, 0));
485 #endif
486 case KERN_SYSVSEM:
487 #ifdef SYSVSEM
488 return (sysctl_rdint(oldp, oldlenp, newp, 1));
489 #else
490 return (sysctl_rdint(oldp, oldlenp, newp, 0));
491 #endif
492 case KERN_SYSVSHM:
493 #ifdef SYSVSHM
494 return (sysctl_rdint(oldp, oldlenp, newp, 1));
495 #else
496 return (sysctl_rdint(oldp, oldlenp, newp, 0));
497 #endif
498 case KERN_DEFCORENAME:
499 if (newp && newlen < 1)
500 return (EINVAL);
501 error = sysctl_string(oldp, oldlenp, newp, newlen,
502 defcorename, sizeof(defcorename));
503 if (newp && !error)
504 defcorenamelen = newlen;
505 return (error);
506 case KERN_SYNCHRONIZED_IO:
507 return (sysctl_rdint(oldp, oldlenp, newp, 1));
508 case KERN_IOV_MAX:
509 return (sysctl_rdint(oldp, oldlenp, newp, IOV_MAX));
510 case KERN_MBUF:
511 return (sysctl_dombuf(name + 1, namelen - 1, oldp, oldlenp,
512 newp, newlen));
513 case KERN_MAPPED_FILES:
514 return (sysctl_rdint(oldp, oldlenp, newp, 1));
515 case KERN_MEMLOCK:
516 return (sysctl_rdint(oldp, oldlenp, newp, 1));
517 case KERN_MEMLOCK_RANGE:
518 return (sysctl_rdint(oldp, oldlenp, newp, 1));
519 case KERN_MEMORY_PROTECTION:
520 return (sysctl_rdint(oldp, oldlenp, newp, 1));
521 case KERN_LOGIN_NAME_MAX:
522 return (sysctl_rdint(oldp, oldlenp, newp, LOGIN_NAME_MAX));
523 case KERN_LOGSIGEXIT:
524 return (sysctl_int(oldp, oldlenp, newp, newlen,
525 &kern_logsigexit));
526 case KERN_FSCALE:
527 return (sysctl_rdint(oldp, oldlenp, newp, FSCALE));
528 case KERN_CCPU:
529 return (sysctl_rdint(oldp, oldlenp, newp, ccpu));
530 case KERN_CP_TIME:
531 #ifndef MULTIPROCESSOR
532 return (sysctl_rdstruct(oldp, oldlenp, newp,
533 curcpu()->ci_schedstate.spc_cp_time,
534 sizeof(curcpu()->ci_schedstate.spc_cp_time)));
535 #else
536 return (sysctl_docptime(oldp, oldlenp, newp));
537 #endif
538 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
539 case KERN_SYSVIPC_INFO:
540 return (sysctl_sysvipc(name + 1, namelen - 1, oldp, oldlenp));
541 #endif
542 case KERN_MSGBUF:
543 return (sysctl_msgbuf(oldp, oldlenp));
544 case KERN_CONSDEV:
545 if (cn_tab != NULL)
546 consdev = cn_tab->cn_dev;
547 else
548 consdev = NODEV;
549 return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
550 sizeof consdev));
551 #if NPTY > 0
552 case KERN_MAXPTYS:
553 return sysctl_pty(oldp, oldlenp, newp, newlen);
554 #endif
555 #ifndef PIPE_SOCKETPAIR
556 case KERN_PIPE:
557 return (sysctl_dopipe(name + 1, namelen - 1, oldp, oldlenp,
558 newp, newlen));
559 #endif
560 case KERN_MAXPHYS:
561 return (sysctl_rdint(oldp, oldlenp, newp, MAXPHYS));
562 case KERN_SBMAX:
563 {
564 int new_sbmax = sb_max;
565
566 error = sysctl_int(oldp, oldlenp, newp, newlen, &new_sbmax);
567 if (newp && !error) {
568 if (new_sbmax < (16 * 1024)) /* sanity */
569 return (EINVAL);
570 sb_max = new_sbmax;
571 }
572 return (error);
573 }
574 case KERN_TKSTAT:
575 return (sysctl_dotkstat(name + 1, namelen - 1, oldp, oldlenp,
576 newp));
577 case KERN_MONOTONIC_CLOCK: /* XXX _POSIX_VERSION */
578 return (sysctl_rdint(oldp, oldlenp, newp, 200112));
579 case KERN_URND:
580 #if NRND > 0
581 if (rnd_extract_data(&v, sizeof(v), RND_EXTRACT_ANY) ==
582 sizeof(v))
583 return (sysctl_rdint(oldp, oldlenp, newp, v));
584 else
585 return (EIO); /*XXX*/
586 #else
587 return (EOPNOTSUPP);
588 #endif
589 default:
590 return (EOPNOTSUPP);
591 }
592 /* NOTREACHED */
593 }
594
595 /*
596 * hardware related system variables.
597 */
598 int
599 hw_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
600 void *newp, size_t newlen, struct proc *p)
601 {
602
603 /* All sysctl names at this level, except for a few, are terminal. */
604 switch (name[0]) {
605 case HW_DISKSTATS:
606 /* Not terminal. */
607 break;
608 default:
609 if (namelen != 1)
610 return (ENOTDIR); /* overloaded */
611 }
612
613 switch (name[0]) {
614 case HW_MACHINE:
615 return (sysctl_rdstring(oldp, oldlenp, newp, machine));
616 case HW_MACHINE_ARCH:
617 return (sysctl_rdstring(oldp, oldlenp, newp, machine_arch));
618 case HW_MODEL:
619 return (sysctl_rdstring(oldp, oldlenp, newp, cpu_model));
620 case HW_NCPU:
621 return (sysctl_rdint(oldp, oldlenp, newp, sysctl_ncpus()));
622 case HW_BYTEORDER:
623 return (sysctl_rdint(oldp, oldlenp, newp, BYTE_ORDER));
624 case HW_PHYSMEM:
625 return (sysctl_rdint(oldp, oldlenp, newp, ctob(physmem)));
626 case HW_USERMEM:
627 return (sysctl_rdint(oldp, oldlenp, newp,
628 ctob(physmem - uvmexp.wired)));
629 case HW_PAGESIZE:
630 return (sysctl_rdint(oldp, oldlenp, newp, PAGE_SIZE));
631 case HW_ALIGNBYTES:
632 return (sysctl_rdint(oldp, oldlenp, newp, ALIGNBYTES));
633 case HW_DISKNAMES:
634 return (sysctl_disknames(oldp, oldlenp));
635 case HW_DISKSTATS:
636 return (sysctl_diskstats(name + 1, namelen - 1, oldp, oldlenp));
637 case HW_CNMAGIC: {
638 char magic[CNS_LEN];
639 int error;
640
641 if (oldp)
642 cn_get_magic(magic, CNS_LEN);
643 error = sysctl_string(oldp, oldlenp, newp, newlen,
644 magic, sizeof(magic));
645 if (newp && !error) {
646 error = cn_set_magic(magic);
647 }
648 return (error);
649 }
650 default:
651 return (EOPNOTSUPP);
652 }
653 /* NOTREACHED */
654 }
655
656 #ifdef DEBUG
657 /*
658 * Debugging related system variables.
659 */
660 struct ctldebug /* debug0, */ /* debug1, */ debug2, debug3, debug4;
661 struct ctldebug debug5, debug6, debug7, debug8, debug9;
662 struct ctldebug debug10, debug11, debug12, debug13, debug14;
663 struct ctldebug debug15, debug16, debug17, debug18, debug19;
664 static struct ctldebug *debugvars[CTL_DEBUG_MAXID] = {
665 &debug0, &debug1, &debug2, &debug3, &debug4,
666 &debug5, &debug6, &debug7, &debug8, &debug9,
667 &debug10, &debug11, &debug12, &debug13, &debug14,
668 &debug15, &debug16, &debug17, &debug18, &debug19,
669 };
670
671 int
672 debug_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
673 void *newp, size_t newlen, struct proc *p)
674 {
675 struct ctldebug *cdp;
676
677 /* all sysctl names at this level are name and field */
678 if (namelen != 2)
679 return (ENOTDIR); /* overloaded */
680 if (name[0] >= CTL_DEBUG_MAXID)
681 return (EOPNOTSUPP);
682 cdp = debugvars[name[0]];
683 if (cdp->debugname == 0)
684 return (EOPNOTSUPP);
685 switch (name[1]) {
686 case CTL_DEBUG_NAME:
687 return (sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname));
688 case CTL_DEBUG_VALUE:
689 return (sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar));
690 default:
691 return (EOPNOTSUPP);
692 }
693 /* NOTREACHED */
694 }
695 #endif /* DEBUG */
696
697 int
698 proc_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
699 void *newp, size_t newlen, struct proc *p)
700 {
701 struct proc *ptmp = NULL;
702 const struct proclist_desc *pd;
703 int error = 0;
704 struct rlimit alim;
705 struct plimit *newplim;
706 char *tmps = NULL;
707 size_t len, curlen;
708 u_int i;
709
710 if (namelen < 2)
711 return EINVAL;
712
713 if (name[0] == PROC_CURPROC) {
714 ptmp = p;
715 } else {
716 proclist_lock_read();
717 for (pd = proclists; pd->pd_list != NULL; pd++) {
718 for (ptmp = LIST_FIRST(pd->pd_list); ptmp != NULL;
719 ptmp = LIST_NEXT(ptmp, p_list)) {
720 /* Skip embryonic processes. */
721 if (ptmp->p_stat == SIDL)
722 continue;
723 if (ptmp->p_pid == (pid_t)name[0])
724 break;
725 }
726 if (ptmp != NULL)
727 break;
728 }
729 proclist_unlock_read();
730 if (ptmp == NULL)
731 return(ESRCH);
732 if (p->p_ucred->cr_uid != 0) {
733 if(p->p_cred->p_ruid != ptmp->p_cred->p_ruid ||
734 p->p_cred->p_ruid != ptmp->p_cred->p_svuid)
735 return EPERM;
736 if (ptmp->p_cred->p_rgid != ptmp->p_cred->p_svgid)
737 return EPERM; /* sgid proc */
738 for (i = 0; i < p->p_ucred->cr_ngroups; i++) {
739 if (p->p_ucred->cr_groups[i] ==
740 ptmp->p_cred->p_rgid)
741 break;
742 }
743 if (i == p->p_ucred->cr_ngroups)
744 return EPERM;
745 }
746 }
747 switch(name[1]) {
748 case PROC_PID_STOPFORK:
749 if (namelen != 2)
750 return EINVAL;
751 i = ((ptmp->p_flag & P_STOPFORK) != 0);
752 if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &i)) != 0)
753 return error;
754 if (i != 0)
755 ptmp->p_flag |= P_STOPFORK;
756 else
757 ptmp->p_flag &= ~P_STOPFORK;
758 return 0;
759 break;
760
761 case PROC_PID_STOPEXEC:
762 if (namelen != 2)
763 return EINVAL;
764 i = ((ptmp->p_flag & P_STOPEXEC) != 0);
765 if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &i)) != 0)
766 return error;
767 if (i != 0)
768 ptmp->p_flag |= P_STOPEXEC;
769 else
770 ptmp->p_flag &= ~P_STOPEXEC;
771 return 0;
772 break;
773
774 case PROC_PID_CORENAME:
775 if (namelen != 2)
776 return EINVAL;
777 /*
778 * Can't use sysctl_string() here because we may malloc a new
779 * area during the process, so we have to do it by hand.
780 */
781 curlen = strlen(ptmp->p_limit->pl_corename) + 1;
782 if (oldlenp && *oldlenp < curlen) {
783 if (!oldp)
784 *oldlenp = curlen;
785 return (ENOMEM);
786 }
787 if (newp) {
788 if (securelevel > 2)
789 return EPERM;
790 if (newlen > MAXPATHLEN)
791 return ENAMETOOLONG;
792 tmps = malloc(newlen + 1, M_TEMP, M_WAITOK);
793 if (tmps == NULL)
794 return ENOMEM;
795 error = copyin(newp, tmps, newlen + 1);
796 tmps[newlen] = '\0';
797 if (error)
798 goto cleanup;
799 /* Enforce to be either 'core' for end with '.core' */
800 if (newlen < 4) { /* c.o.r.e */
801 error = EINVAL;
802 goto cleanup;
803 }
804 len = newlen - 4;
805 if (len > 0) {
806 if (tmps[len - 1] != '.' &&
807 tmps[len - 1] != '/') {
808 error = EINVAL;
809 goto cleanup;
810 }
811 }
812 if (strcmp(&tmps[len], "core") != 0) {
813 error = EINVAL;
814 goto cleanup;
815 }
816 }
817 if (oldp && oldlenp) {
818 *oldlenp = curlen;
819 error = copyout(ptmp->p_limit->pl_corename, oldp,
820 curlen);
821 }
822 if (newp && error == 0) {
823 /* if the 2 strings are identical, don't limcopy() */
824 if (strcmp(tmps, ptmp->p_limit->pl_corename) == 0) {
825 error = 0;
826 goto cleanup;
827 }
828 if (ptmp->p_limit->p_refcnt > 1 &&
829 (ptmp->p_limit->p_lflags & PL_SHAREMOD) == 0) {
830 newplim = limcopy(ptmp->p_limit);
831 limfree(ptmp->p_limit);
832 ptmp->p_limit = newplim;
833 }
834 if (ptmp->p_limit->pl_corename != defcorename) {
835 free(ptmp->p_limit->pl_corename, M_TEMP);
836 }
837 ptmp->p_limit->pl_corename = tmps;
838 return (0);
839 }
840 cleanup:
841 if (tmps)
842 free(tmps, M_TEMP);
843 return (error);
844 break;
845
846 case PROC_PID_LIMIT:
847 if (namelen != 4 || name[2] >= PROC_PID_LIMIT_MAXID)
848 return EINVAL;
849 memcpy(&alim, &ptmp->p_rlimit[name[2] - 1], sizeof(alim));
850 if (name[3] == PROC_PID_LIMIT_TYPE_HARD)
851 error = sysctl_quad(oldp, oldlenp, newp, newlen,
852 &alim.rlim_max);
853 else if (name[3] == PROC_PID_LIMIT_TYPE_SOFT)
854 error = sysctl_quad(oldp, oldlenp, newp, newlen,
855 &alim.rlim_cur);
856 else
857 error = EINVAL;
858
859 if (error)
860 return error;
861
862 if (newp)
863 error = dosetrlimit(ptmp, p->p_cred,
864 name[2] - 1, &alim);
865 return error;
866 break;
867
868 default:
869 return (EINVAL);
870 break;
871 }
872 /* NOTREACHED */
873 return (EINVAL);
874 }
875
876 int
877 emul_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
878 void *newp, size_t newlen, struct proc *p)
879 {
880 static struct {
881 const char *name;
882 int type;
883 } emulations[] = CTL_EMUL_NAMES;
884 const struct emul *e;
885 const char *ename;
886 #ifdef LKM
887 extern struct lock exec_lock; /* XXX */
888 int error;
889 #else
890 extern int nexecs_builtin;
891 extern const struct execsw execsw_builtin[];
892 int i;
893 #endif
894
895 /* all sysctl names at this level are name and field */
896 if (namelen < 2)
897 return (ENOTDIR); /* overloaded */
898
899 if ((u_int) name[0] >= EMUL_MAXID || name[0] == 0)
900 return (EOPNOTSUPP);
901
902 ename = emulations[name[0]].name;
903
904 #ifdef LKM
905 lockmgr(&exec_lock, LK_SHARED, NULL);
906 if ((e = emul_search(ename))) {
907 error = (*e->e_sysctl)(name + 1, namelen - 1, oldp, oldlenp,
908 newp, newlen, p);
909 } else
910 error = EOPNOTSUPP;
911 lockmgr(&exec_lock, LK_RELEASE, NULL);
912
913 return (error);
914 #else
915 for (i = 0; i < nexecs_builtin; i++) {
916 e = execsw_builtin[i].es_emul;
917 if (e == NULL || strcmp(ename, e->e_name) != 0 ||
918 e->e_sysctl != NULL)
919 continue;
920
921 return (*e->e_sysctl)(name + 1, namelen - 1, oldp, oldlenp,
922 newp, newlen, p);
923 }
924
925 return (EOPNOTSUPP);
926 #endif
927 }
928 /*
929 * Convenience macros.
930 */
931
932 #define SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, valp, len) \
933 if (oldlenp) { \
934 if (!oldp) \
935 *oldlenp = len; \
936 else { \
937 if (*oldlenp < len) \
938 return(ENOMEM); \
939 *oldlenp = len; \
940 error = copyout((caddr_t)valp, oldp, len); \
941 } \
942 }
943
944 #define SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, valp, typ) \
945 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, valp, sizeof(typ))
946
947 #define SYSCTL_SCALAR_NEWPCHECK_LEN(newp, newlen, len) \
948 if (newp && newlen != len) \
949 return (EINVAL);
950
951 #define SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, typ) \
952 SYSCTL_SCALAR_NEWPCHECK_LEN(newp, newlen, sizeof(typ))
953
954 #define SYSCTL_SCALAR_NEWPCOP_LEN(newp, valp, len) \
955 if (error == 0 && newp) \
956 error = copyin(newp, valp, len);
957
958 #define SYSCTL_SCALAR_NEWPCOP_TYP(newp, valp, typ) \
959 SYSCTL_SCALAR_NEWPCOP_LEN(newp, valp, sizeof(typ))
960
961 #define SYSCTL_STRING_CORE(oldp, oldlenp, str) \
962 if (oldlenp) { \
963 len = strlen(str) + 1; \
964 if (!oldp) \
965 *oldlenp = len; \
966 else { \
967 if (*oldlenp < len) { \
968 err2 = ENOMEM; \
969 len = *oldlenp; \
970 } else \
971 *oldlenp = len; \
972 error = copyout(str, oldp, len);\
973 if (error == 0) \
974 error = err2; \
975 } \
976 }
977
978 /*
979 * Validate parameters and get old / set new parameters
980 * for an integer-valued sysctl function.
981 */
982 int
983 sysctl_int(void *oldp, size_t *oldlenp, void *newp, size_t newlen, int *valp)
984 {
985 int error = 0;
986
987 SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, int)
988 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, valp, int)
989 SYSCTL_SCALAR_NEWPCOP_TYP(newp, valp, int)
990
991 return (error);
992 }
993
994
995 /*
996 * As above, but read-only.
997 */
998 int
999 sysctl_rdint(void *oldp, size_t *oldlenp, void *newp, int val)
1000 {
1001 int error = 0;
1002
1003 if (newp)
1004 return (EPERM);
1005
1006 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, &val, int)
1007
1008 return (error);
1009 }
1010
1011 /*
1012 * Validate parameters and get old / set new parameters
1013 * for an quad-valued sysctl function.
1014 */
1015 int
1016 sysctl_quad(void *oldp, size_t *oldlenp, void *newp, size_t newlen,
1017 quad_t *valp)
1018 {
1019 int error = 0;
1020
1021 SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, quad_t)
1022 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, valp, quad_t)
1023 SYSCTL_SCALAR_NEWPCOP_TYP(newp, valp, quad_t)
1024
1025 return (error);
1026 }
1027
1028 /*
1029 * As above, but read-only.
1030 */
1031 int
1032 sysctl_rdquad(void *oldp, size_t *oldlenp, void *newp, quad_t val)
1033 {
1034 int error = 0;
1035
1036 if (newp)
1037 return (EPERM);
1038
1039 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, &val, quad_t)
1040
1041 return (error);
1042 }
1043
1044 /*
1045 * Validate parameters and get old / set new parameters
1046 * for a string-valued sysctl function.
1047 */
1048 int
1049 sysctl_string(void *oldp, size_t *oldlenp, void *newp, size_t newlen, char *str,
1050 size_t maxlen)
1051 {
1052 int error = 0, err2 = 0;
1053 size_t len;
1054
1055 if (newp && newlen >= maxlen)
1056 return (EINVAL);
1057
1058 SYSCTL_STRING_CORE(oldp, oldlenp, str);
1059
1060 if (error == 0 && newp) {
1061 error = copyin(newp, str, newlen);
1062 str[newlen] = 0;
1063 }
1064 return (error);
1065 }
1066
1067 /*
1068 * As above, but read-only.
1069 */
1070 int
1071 sysctl_rdstring(void *oldp, size_t *oldlenp, void *newp, const char *str)
1072 {
1073 int error = 0, err2 = 0;
1074 size_t len;
1075
1076 if (newp)
1077 return (EPERM);
1078
1079 SYSCTL_STRING_CORE(oldp, oldlenp, str);
1080
1081 return (error);
1082 }
1083
1084 /*
1085 * Validate parameters and get old / set new parameters
1086 * for a structure oriented sysctl function.
1087 */
1088 int
1089 sysctl_struct(void *oldp, size_t *oldlenp, void *newp, size_t newlen, void *sp,
1090 size_t len)
1091 {
1092 int error = 0;
1093
1094 SYSCTL_SCALAR_NEWPCHECK_LEN(newp, newlen, len)
1095 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, sp, len)
1096 SYSCTL_SCALAR_NEWPCOP_LEN(newp, sp, len)
1097
1098 return (error);
1099 }
1100
1101 /*
1102 * Validate parameters and get old parameters
1103 * for a structure oriented sysctl function.
1104 */
1105 int
1106 sysctl_rdstruct(void *oldp, size_t *oldlenp, void *newp, const void *sp,
1107 size_t len)
1108 {
1109 int error = 0;
1110
1111 if (newp)
1112 return (EPERM);
1113
1114 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, sp, len)
1115
1116 return (error);
1117 }
1118
1119 /*
1120 * As above, but can return a truncated result.
1121 */
1122 int
1123 sysctl_rdminstruct(void *oldp, size_t *oldlenp, void *newp, const void *sp,
1124 size_t len)
1125 {
1126 int error = 0;
1127
1128 if (newp)
1129 return (EPERM);
1130
1131 len = min(*oldlenp, len);
1132 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, sp, len)
1133
1134 return (error);
1135 }
1136
1137 /*
1138 * Get file structures.
1139 */
1140 static int
1141 sysctl_file(void *vwhere, size_t *sizep)
1142 {
1143 int error;
1144 size_t buflen;
1145 struct file *fp;
1146 char *start, *where;
1147
1148 start = where = vwhere;
1149 buflen = *sizep;
1150 if (where == NULL) {
1151 /*
1152 * overestimate by 10 files
1153 */
1154 *sizep = sizeof(filehead) + (nfiles + 10) * sizeof(struct file);
1155 return (0);
1156 }
1157
1158 /*
1159 * first copyout filehead
1160 */
1161 if (buflen < sizeof(filehead)) {
1162 *sizep = 0;
1163 return (0);
1164 }
1165 error = copyout((caddr_t)&filehead, where, sizeof(filehead));
1166 if (error)
1167 return (error);
1168 buflen -= sizeof(filehead);
1169 where += sizeof(filehead);
1170
1171 /*
1172 * followed by an array of file structures
1173 */
1174 LIST_FOREACH(fp, &filehead, f_list) {
1175 if (buflen < sizeof(struct file)) {
1176 *sizep = where - start;
1177 return (ENOMEM);
1178 }
1179 error = copyout((caddr_t)fp, where, sizeof(struct file));
1180 if (error)
1181 return (error);
1182 buflen -= sizeof(struct file);
1183 where += sizeof(struct file);
1184 }
1185 *sizep = where - start;
1186 return (0);
1187 }
1188
1189 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
1190 #define FILL_PERM(src, dst) do { \
1191 (dst)._key = (src)._key; \
1192 (dst).uid = (src).uid; \
1193 (dst).gid = (src).gid; \
1194 (dst).cuid = (src).cuid; \
1195 (dst).cgid = (src).cgid; \
1196 (dst).mode = (src).mode; \
1197 (dst)._seq = (src)._seq; \
1198 } while (/*CONSTCOND*/ 0);
1199 #define FILL_MSG(src, dst) do { \
1200 FILL_PERM((src).msg_perm, (dst).msg_perm); \
1201 (dst).msg_qnum = (src).msg_qnum; \
1202 (dst).msg_qbytes = (src).msg_qbytes; \
1203 (dst)._msg_cbytes = (src)._msg_cbytes; \
1204 (dst).msg_lspid = (src).msg_lspid; \
1205 (dst).msg_lrpid = (src).msg_lrpid; \
1206 (dst).msg_stime = (src).msg_stime; \
1207 (dst).msg_rtime = (src).msg_rtime; \
1208 (dst).msg_ctime = (src).msg_ctime; \
1209 } while (/*CONSTCOND*/ 0)
1210 #define FILL_SEM(src, dst) do { \
1211 FILL_PERM((src).sem_perm, (dst).sem_perm); \
1212 (dst).sem_nsems = (src).sem_nsems; \
1213 (dst).sem_otime = (src).sem_otime; \
1214 (dst).sem_ctime = (src).sem_ctime; \
1215 } while (/*CONSTCOND*/ 0)
1216 #define FILL_SHM(src, dst) do { \
1217 FILL_PERM((src).shm_perm, (dst).shm_perm); \
1218 (dst).shm_segsz = (src).shm_segsz; \
1219 (dst).shm_lpid = (src).shm_lpid; \
1220 (dst).shm_cpid = (src).shm_cpid; \
1221 (dst).shm_atime = (src).shm_atime; \
1222 (dst).shm_dtime = (src).shm_dtime; \
1223 (dst).shm_ctime = (src).shm_ctime; \
1224 (dst).shm_nattch = (src).shm_nattch; \
1225 } while (/*CONSTCOND*/ 0)
1226
1227 static int
1228 sysctl_sysvipc(int *name, u_int namelen, void *where, size_t *sizep)
1229 {
1230 #ifdef SYSVMSG
1231 struct msg_sysctl_info *msgsi;
1232 #endif
1233 #ifdef SYSVSEM
1234 struct sem_sysctl_info *semsi;
1235 #endif
1236 #ifdef SYSVSHM
1237 struct shm_sysctl_info *shmsi;
1238 #endif
1239 size_t infosize, dssize, tsize, buflen;
1240 void *buf = NULL;
1241 char *start;
1242 int32_t nds;
1243 int i, error, ret;
1244
1245 if (namelen != 1)
1246 return (EINVAL);
1247
1248 start = where;
1249 buflen = *sizep;
1250
1251 switch (*name) {
1252 case KERN_SYSVIPC_MSG_INFO:
1253 #ifdef SYSVMSG
1254 infosize = sizeof(msgsi->msginfo);
1255 nds = msginfo.msgmni;
1256 dssize = sizeof(msgsi->msgids[0]);
1257 break;
1258 #else
1259 return (EINVAL);
1260 #endif
1261 case KERN_SYSVIPC_SEM_INFO:
1262 #ifdef SYSVSEM
1263 infosize = sizeof(semsi->seminfo);
1264 nds = seminfo.semmni;
1265 dssize = sizeof(semsi->semids[0]);
1266 break;
1267 #else
1268 return (EINVAL);
1269 #endif
1270 case KERN_SYSVIPC_SHM_INFO:
1271 #ifdef SYSVSHM
1272 infosize = sizeof(shmsi->shminfo);
1273 nds = shminfo.shmmni;
1274 dssize = sizeof(shmsi->shmids[0]);
1275 break;
1276 #else
1277 return (EINVAL);
1278 #endif
1279 default:
1280 return (EINVAL);
1281 }
1282 /*
1283 * Round infosize to 64 bit boundary if requesting more than just
1284 * the info structure or getting the total data size.
1285 */
1286 if (where == NULL || *sizep > infosize)
1287 infosize = ((infosize + 7) / 8) * 8;
1288 tsize = infosize + nds * dssize;
1289
1290 /* Return just the total size required. */
1291 if (where == NULL) {
1292 *sizep = tsize;
1293 return (0);
1294 }
1295
1296 /* Not enough room for even the info struct. */
1297 if (buflen < infosize) {
1298 *sizep = 0;
1299 return (ENOMEM);
1300 }
1301 buf = malloc(min(tsize, buflen), M_TEMP, M_WAITOK);
1302 memset(buf, 0, min(tsize, buflen));
1303
1304 switch (*name) {
1305 #ifdef SYSVMSG
1306 case KERN_SYSVIPC_MSG_INFO:
1307 msgsi = (struct msg_sysctl_info *)buf;
1308 msgsi->msginfo = msginfo;
1309 break;
1310 #endif
1311 #ifdef SYSVSEM
1312 case KERN_SYSVIPC_SEM_INFO:
1313 semsi = (struct sem_sysctl_info *)buf;
1314 semsi->seminfo = seminfo;
1315 break;
1316 #endif
1317 #ifdef SYSVSHM
1318 case KERN_SYSVIPC_SHM_INFO:
1319 shmsi = (struct shm_sysctl_info *)buf;
1320 shmsi->shminfo = shminfo;
1321 break;
1322 #endif
1323 }
1324 buflen -= infosize;
1325
1326 ret = 0;
1327 if (buflen > 0) {
1328 /* Fill in the IPC data structures. */
1329 for (i = 0; i < nds; i++) {
1330 if (buflen < dssize) {
1331 ret = ENOMEM;
1332 break;
1333 }
1334 switch (*name) {
1335 #ifdef SYSVMSG
1336 case KERN_SYSVIPC_MSG_INFO:
1337 FILL_MSG(msqids[i], msgsi->msgids[i]);
1338 break;
1339 #endif
1340 #ifdef SYSVSEM
1341 case KERN_SYSVIPC_SEM_INFO:
1342 FILL_SEM(sema[i], semsi->semids[i]);
1343 break;
1344 #endif
1345 #ifdef SYSVSHM
1346 case KERN_SYSVIPC_SHM_INFO:
1347 FILL_SHM(shmsegs[i], shmsi->shmids[i]);
1348 break;
1349 #endif
1350 }
1351 buflen -= dssize;
1352 }
1353 }
1354 *sizep -= buflen;
1355 error = copyout(buf, start, *sizep);
1356 /* If copyout succeeded, use return code set earlier. */
1357 if (error == 0)
1358 error = ret;
1359 if (buf)
1360 free(buf, M_TEMP);
1361 return (error);
1362 }
1363 #endif /* SYSVMSG || SYSVSEM || SYSVSHM */
1364
1365 static int
1366 sysctl_msgbuf(void *vwhere, size_t *sizep)
1367 {
1368 char *where = vwhere;
1369 size_t len, maxlen = *sizep;
1370 long beg, end;
1371 int error;
1372
1373 /*
1374 * deal with cases where the message buffer has
1375 * become corrupted.
1376 */
1377 if (!msgbufenabled || msgbufp->msg_magic != MSG_MAGIC) {
1378 msgbufenabled = 0;
1379 return (ENXIO);
1380 }
1381
1382 if (where == NULL) {
1383 /* always return full buffer size */
1384 *sizep = msgbufp->msg_bufs;
1385 return (0);
1386 }
1387
1388 error = 0;
1389 maxlen = min(msgbufp->msg_bufs, maxlen);
1390
1391 /*
1392 * First, copy from the write pointer to the end of
1393 * message buffer.
1394 */
1395 beg = msgbufp->msg_bufx;
1396 end = msgbufp->msg_bufs;
1397 while (maxlen > 0) {
1398 len = min(end - beg, maxlen);
1399 if (len == 0)
1400 break;
1401 error = copyout(&msgbufp->msg_bufc[beg], where, len);
1402 if (error)
1403 break;
1404 where += len;
1405 maxlen -= len;
1406
1407 /*
1408 * ... then, copy from the beginning of message buffer to
1409 * the write pointer.
1410 */
1411 beg = 0;
1412 end = msgbufp->msg_bufx;
1413 }
1414 return (error);
1415 }
1416
1417 /*
1418 * try over estimating by 5 procs
1419 */
1420 #define KERN_PROCSLOP (5 * sizeof(struct kinfo_proc))
1421
1422 static int
1423 sysctl_doeproc(int *name, u_int namelen, void *vwhere, size_t *sizep)
1424 {
1425 struct eproc eproc;
1426 struct kinfo_proc2 kproc2;
1427 struct kinfo_proc *dp;
1428 struct proc *p;
1429 const struct proclist_desc *pd;
1430 char *where, *dp2;
1431 int type, op, arg;
1432 u_int elem_size, elem_count;
1433 size_t buflen, needed;
1434 int error;
1435
1436 dp = vwhere;
1437 dp2 = where = vwhere;
1438 buflen = where != NULL ? *sizep : 0;
1439 error = 0;
1440 needed = 0;
1441 type = name[0];
1442
1443 if (type == KERN_PROC) {
1444 if (namelen != 3 && !(namelen == 2 && name[1] == KERN_PROC_ALL))
1445 return (EINVAL);
1446 op = name[1];
1447 if (op != KERN_PROC_ALL)
1448 arg = name[2];
1449 } else {
1450 if (namelen != 5)
1451 return (EINVAL);
1452 op = name[1];
1453 arg = name[2];
1454 elem_size = name[3];
1455 elem_count = name[4];
1456 }
1457
1458 proclist_lock_read();
1459
1460 pd = proclists;
1461 again:
1462 for (p = LIST_FIRST(pd->pd_list); p != NULL; p = LIST_NEXT(p, p_list)) {
1463 /*
1464 * Skip embryonic processes.
1465 */
1466 if (p->p_stat == SIDL)
1467 continue;
1468 /*
1469 * TODO - make more efficient (see notes below).
1470 * do by session.
1471 */
1472 switch (op) {
1473
1474 case KERN_PROC_PID:
1475 /* could do this with just a lookup */
1476 if (p->p_pid != (pid_t)arg)
1477 continue;
1478 break;
1479
1480 case KERN_PROC_PGRP:
1481 /* could do this by traversing pgrp */
1482 if (p->p_pgrp->pg_id != (pid_t)arg)
1483 continue;
1484 break;
1485
1486 case KERN_PROC_SESSION:
1487 if (p->p_session->s_sid != (pid_t)arg)
1488 continue;
1489 break;
1490
1491 case KERN_PROC_TTY:
1492 if (arg == (int) KERN_PROC_TTY_REVOKE) {
1493 if ((p->p_flag & P_CONTROLT) == 0 ||
1494 p->p_session->s_ttyp == NULL ||
1495 p->p_session->s_ttyvp != NULL)
1496 continue;
1497 } else if ((p->p_flag & P_CONTROLT) == 0 ||
1498 p->p_session->s_ttyp == NULL) {
1499 if ((dev_t)arg != KERN_PROC_TTY_NODEV)
1500 continue;
1501 } else if (p->p_session->s_ttyp->t_dev != (dev_t)arg)
1502 continue;
1503 break;
1504
1505 case KERN_PROC_UID:
1506 if (p->p_ucred->cr_uid != (uid_t)arg)
1507 continue;
1508 break;
1509
1510 case KERN_PROC_RUID:
1511 if (p->p_cred->p_ruid != (uid_t)arg)
1512 continue;
1513 break;
1514
1515 case KERN_PROC_GID:
1516 if (p->p_ucred->cr_gid != (uid_t)arg)
1517 continue;
1518 break;
1519
1520 case KERN_PROC_RGID:
1521 if (p->p_cred->p_rgid != (uid_t)arg)
1522 continue;
1523 break;
1524
1525 case KERN_PROC_ALL:
1526 /* allow everything */
1527 break;
1528
1529 default:
1530 error = EINVAL;
1531 goto cleanup;
1532 }
1533 if (type == KERN_PROC) {
1534 if (buflen >= sizeof(struct kinfo_proc)) {
1535 fill_eproc(p, &eproc);
1536 error = copyout((caddr_t)p, &dp->kp_proc,
1537 sizeof(struct proc));
1538 if (error)
1539 goto cleanup;
1540 error = copyout((caddr_t)&eproc, &dp->kp_eproc,
1541 sizeof(eproc));
1542 if (error)
1543 goto cleanup;
1544 dp++;
1545 buflen -= sizeof(struct kinfo_proc);
1546 }
1547 needed += sizeof(struct kinfo_proc);
1548 } else { /* KERN_PROC2 */
1549 if (buflen >= elem_size && elem_count > 0) {
1550 fill_kproc2(p, &kproc2);
1551 /*
1552 * Copy out elem_size, but not larger than
1553 * the size of a struct kinfo_proc2.
1554 */
1555 error = copyout(&kproc2, dp2,
1556 min(sizeof(kproc2), elem_size));
1557 if (error)
1558 goto cleanup;
1559 dp2 += elem_size;
1560 buflen -= elem_size;
1561 elem_count--;
1562 }
1563 needed += elem_size;
1564 }
1565 }
1566 pd++;
1567 if (pd->pd_list != NULL)
1568 goto again;
1569 proclist_unlock_read();
1570
1571 if (where != NULL) {
1572 if (type == KERN_PROC)
1573 *sizep = (caddr_t)dp - where;
1574 else
1575 *sizep = dp2 - where;
1576 if (needed > *sizep)
1577 return (ENOMEM);
1578 } else {
1579 needed += KERN_PROCSLOP;
1580 *sizep = needed;
1581 }
1582 return (0);
1583 cleanup:
1584 proclist_unlock_read();
1585 return (error);
1586 }
1587
1588 /*
1589 * Fill in an eproc structure for the specified process.
1590 */
1591 void
1592 fill_eproc(struct proc *p, struct eproc *ep)
1593 {
1594 struct tty *tp;
1595
1596 ep->e_paddr = p;
1597 ep->e_sess = p->p_session;
1598 ep->e_pcred = *p->p_cred;
1599 ep->e_ucred = *p->p_ucred;
1600 if (p->p_stat == SIDL || P_ZOMBIE(p)) {
1601 ep->e_vm.vm_rssize = 0;
1602 ep->e_vm.vm_tsize = 0;
1603 ep->e_vm.vm_dsize = 0;
1604 ep->e_vm.vm_ssize = 0;
1605 /* ep->e_vm.vm_pmap = XXX; */
1606 } else {
1607 struct vmspace *vm = p->p_vmspace;
1608
1609 ep->e_vm.vm_rssize = vm_resident_count(vm);
1610 ep->e_vm.vm_tsize = vm->vm_tsize;
1611 ep->e_vm.vm_dsize = vm->vm_dsize;
1612 ep->e_vm.vm_ssize = vm->vm_ssize;
1613 }
1614 if (p->p_pptr)
1615 ep->e_ppid = p->p_pptr->p_pid;
1616 else
1617 ep->e_ppid = 0;
1618 ep->e_pgid = p->p_pgrp->pg_id;
1619 ep->e_sid = ep->e_sess->s_sid;
1620 ep->e_jobc = p->p_pgrp->pg_jobc;
1621 if ((p->p_flag & P_CONTROLT) &&
1622 (tp = ep->e_sess->s_ttyp)) {
1623 ep->e_tdev = tp->t_dev;
1624 ep->e_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1625 ep->e_tsess = tp->t_session;
1626 } else
1627 ep->e_tdev = NODEV;
1628 if (p->p_wmesg)
1629 strncpy(ep->e_wmesg, p->p_wmesg, WMESGLEN);
1630 ep->e_xsize = ep->e_xrssize = 0;
1631 ep->e_xccount = ep->e_xswrss = 0;
1632 ep->e_flag = ep->e_sess->s_ttyvp ? EPROC_CTTY : 0;
1633 if (SESS_LEADER(p))
1634 ep->e_flag |= EPROC_SLEADER;
1635 strncpy(ep->e_login, ep->e_sess->s_login, MAXLOGNAME);
1636 }
1637
1638 /*
1639 * Fill in an eproc structure for the specified process.
1640 */
1641 static void
1642 fill_kproc2(struct proc *p, struct kinfo_proc2 *ki)
1643 {
1644 struct tty *tp;
1645
1646 memset(ki, 0, sizeof(*ki));
1647
1648 ki->p_forw = PTRTOINT64(p->p_forw);
1649 ki->p_back = PTRTOINT64(p->p_back);
1650 ki->p_paddr = PTRTOINT64(p);
1651
1652 ki->p_addr = PTRTOINT64(p->p_addr);
1653 ki->p_fd = PTRTOINT64(p->p_fd);
1654 ki->p_cwdi = PTRTOINT64(p->p_cwdi);
1655 ki->p_stats = PTRTOINT64(p->p_stats);
1656 ki->p_limit = PTRTOINT64(p->p_limit);
1657 ki->p_vmspace = PTRTOINT64(p->p_vmspace);
1658 ki->p_sigacts = PTRTOINT64(p->p_sigacts);
1659 ki->p_sess = PTRTOINT64(p->p_session);
1660 ki->p_tsess = 0; /* may be changed if controlling tty below */
1661 ki->p_ru = PTRTOINT64(p->p_ru);
1662
1663 ki->p_eflag = 0;
1664 ki->p_exitsig = p->p_exitsig;
1665 ki->p_flag = p->p_flag;
1666
1667 ki->p_pid = p->p_pid;
1668 if (p->p_pptr)
1669 ki->p_ppid = p->p_pptr->p_pid;
1670 else
1671 ki->p_ppid = 0;
1672 ki->p_sid = p->p_session->s_sid;
1673 ki->p__pgid = p->p_pgrp->pg_id;
1674
1675 ki->p_tpgid = NO_PID; /* may be changed if controlling tty below */
1676
1677 ki->p_uid = p->p_ucred->cr_uid;
1678 ki->p_ruid = p->p_cred->p_ruid;
1679 ki->p_gid = p->p_ucred->cr_gid;
1680 ki->p_rgid = p->p_cred->p_rgid;
1681
1682 memcpy(ki->p_groups, p->p_cred->pc_ucred->cr_groups,
1683 min(sizeof(ki->p_groups), sizeof(p->p_cred->pc_ucred->cr_groups)));
1684 ki->p_ngroups = p->p_cred->pc_ucred->cr_ngroups;
1685
1686 ki->p_jobc = p->p_pgrp->pg_jobc;
1687 if ((p->p_flag & P_CONTROLT) && (tp = p->p_session->s_ttyp)) {
1688 ki->p_tdev = tp->t_dev;
1689 ki->p_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1690 ki->p_tsess = PTRTOINT64(tp->t_session);
1691 } else {
1692 ki->p_tdev = NODEV;
1693 }
1694
1695 ki->p_estcpu = p->p_estcpu;
1696 ki->p_rtime_sec = p->p_rtime.tv_sec;
1697 ki->p_rtime_usec = p->p_rtime.tv_usec;
1698 ki->p_cpticks = p->p_cpticks;
1699 ki->p_pctcpu = p->p_pctcpu;
1700 ki->p_swtime = p->p_swtime;
1701 ki->p_slptime = p->p_slptime;
1702 if (p->p_stat == SONPROC) {
1703 KDASSERT(p->p_cpu != NULL);
1704 ki->p_schedflags = p->p_cpu->ci_schedstate.spc_flags;
1705 } else
1706 ki->p_schedflags = 0;
1707
1708 ki->p_uticks = p->p_uticks;
1709 ki->p_sticks = p->p_sticks;
1710 ki->p_iticks = p->p_iticks;
1711
1712 ki->p_tracep = PTRTOINT64(p->p_tracep);
1713 ki->p_traceflag = p->p_traceflag;
1714
1715 ki->p_holdcnt = p->p_holdcnt;
1716
1717 memcpy(&ki->p_siglist, &p->p_sigctx.ps_siglist, sizeof(ki_sigset_t));
1718 memcpy(&ki->p_sigmask, &p->p_sigctx.ps_sigmask, sizeof(ki_sigset_t));
1719 memcpy(&ki->p_sigignore, &p->p_sigctx.ps_sigignore,sizeof(ki_sigset_t));
1720 memcpy(&ki->p_sigcatch, &p->p_sigctx.ps_sigcatch, sizeof(ki_sigset_t));
1721
1722 ki->p_stat = p->p_stat;
1723 ki->p_priority = p->p_priority;
1724 ki->p_usrpri = p->p_usrpri;
1725 ki->p_nice = p->p_nice;
1726
1727 ki->p_xstat = p->p_xstat;
1728 ki->p_acflag = p->p_acflag;
1729
1730 strncpy(ki->p_comm, p->p_comm,
1731 min(sizeof(ki->p_comm), sizeof(p->p_comm)));
1732
1733 if (p->p_wmesg)
1734 strncpy(ki->p_wmesg, p->p_wmesg, sizeof(ki->p_wmesg));
1735 ki->p_wchan = PTRTOINT64(p->p_wchan);
1736
1737 strncpy(ki->p_login, p->p_session->s_login, sizeof(ki->p_login));
1738
1739 if (p->p_stat == SIDL || P_ZOMBIE(p)) {
1740 ki->p_vm_rssize = 0;
1741 ki->p_vm_tsize = 0;
1742 ki->p_vm_dsize = 0;
1743 ki->p_vm_ssize = 0;
1744 } else {
1745 struct vmspace *vm = p->p_vmspace;
1746
1747 ki->p_vm_rssize = vm_resident_count(vm);
1748 ki->p_vm_tsize = vm->vm_tsize;
1749 ki->p_vm_dsize = vm->vm_dsize;
1750 ki->p_vm_ssize = vm->vm_ssize;
1751 }
1752
1753 if (p->p_session->s_ttyvp)
1754 ki->p_eflag |= EPROC_CTTY;
1755 if (SESS_LEADER(p))
1756 ki->p_eflag |= EPROC_SLEADER;
1757
1758 /* XXX Is this double check necessary? */
1759 if ((p->p_flag & P_INMEM) == 0 || P_ZOMBIE(p)) {
1760 ki->p_uvalid = 0;
1761 } else {
1762 ki->p_uvalid = 1;
1763
1764 ki->p_ustart_sec = p->p_stats->p_start.tv_sec;
1765 ki->p_ustart_usec = p->p_stats->p_start.tv_usec;
1766
1767 ki->p_uutime_sec = p->p_stats->p_ru.ru_utime.tv_sec;
1768 ki->p_uutime_usec = p->p_stats->p_ru.ru_utime.tv_usec;
1769 ki->p_ustime_sec = p->p_stats->p_ru.ru_stime.tv_sec;
1770 ki->p_ustime_usec = p->p_stats->p_ru.ru_stime.tv_usec;
1771
1772 ki->p_uru_maxrss = p->p_stats->p_ru.ru_maxrss;
1773 ki->p_uru_ixrss = p->p_stats->p_ru.ru_ixrss;
1774 ki->p_uru_idrss = p->p_stats->p_ru.ru_idrss;
1775 ki->p_uru_isrss = p->p_stats->p_ru.ru_isrss;
1776 ki->p_uru_minflt = p->p_stats->p_ru.ru_minflt;
1777 ki->p_uru_majflt = p->p_stats->p_ru.ru_majflt;
1778 ki->p_uru_nswap = p->p_stats->p_ru.ru_nswap;
1779 ki->p_uru_inblock = p->p_stats->p_ru.ru_inblock;
1780 ki->p_uru_oublock = p->p_stats->p_ru.ru_oublock;
1781 ki->p_uru_msgsnd = p->p_stats->p_ru.ru_msgsnd;
1782 ki->p_uru_msgrcv = p->p_stats->p_ru.ru_msgrcv;
1783 ki->p_uru_nsignals = p->p_stats->p_ru.ru_nsignals;
1784 ki->p_uru_nvcsw = p->p_stats->p_ru.ru_nvcsw;
1785 ki->p_uru_nivcsw = p->p_stats->p_ru.ru_nivcsw;
1786
1787 ki->p_uctime_sec = p->p_stats->p_cru.ru_utime.tv_sec +
1788 p->p_stats->p_cru.ru_stime.tv_sec;
1789 ki->p_uctime_usec = p->p_stats->p_cru.ru_utime.tv_usec +
1790 p->p_stats->p_cru.ru_stime.tv_usec;
1791 }
1792 #ifdef MULTIPROCESSOR
1793 if (p->p_cpu != NULL)
1794 ki->p_cpuid = p->p_cpu->ci_cpuid;
1795 else
1796 #endif
1797 ki->p_cpuid = KI_NOCPU;
1798 }
1799
1800 int
1801 sysctl_procargs(int *name, u_int namelen, void *where, size_t *sizep,
1802 struct proc *up)
1803 {
1804 struct ps_strings pss;
1805 struct proc *p;
1806 size_t len, upper_bound, xlen, i;
1807 struct uio auio;
1808 struct iovec aiov;
1809 vaddr_t argv;
1810 pid_t pid;
1811 int nargv, type, error;
1812 char *arg;
1813 char *tmp;
1814
1815 if (namelen != 2)
1816 return (EINVAL);
1817 pid = name[0];
1818 type = name[1];
1819
1820 switch (type) {
1821 case KERN_PROC_ARGV:
1822 case KERN_PROC_NARGV:
1823 case KERN_PROC_ENV:
1824 case KERN_PROC_NENV:
1825 /* ok */
1826 break;
1827 default:
1828 return (EINVAL);
1829 }
1830
1831 /* check pid */
1832 if ((p = pfind(pid)) == NULL)
1833 return (EINVAL);
1834
1835 /* only root or same user change look at the environment */
1836 if (type == KERN_PROC_ENV || type == KERN_PROC_NENV) {
1837 if (up->p_ucred->cr_uid != 0) {
1838 if (up->p_cred->p_ruid != p->p_cred->p_ruid ||
1839 up->p_cred->p_ruid != p->p_cred->p_svuid)
1840 return (EPERM);
1841 }
1842 }
1843
1844 if (sizep != NULL && where == NULL) {
1845 if (type == KERN_PROC_NARGV || type == KERN_PROC_NENV)
1846 *sizep = sizeof (int);
1847 else
1848 *sizep = ARG_MAX; /* XXX XXX XXX */
1849 return (0);
1850 }
1851 if (where == NULL || sizep == NULL)
1852 return (EINVAL);
1853
1854 /*
1855 * Zombies don't have a stack, so we can't read their psstrings.
1856 * System processes also don't have a user stack.
1857 */
1858 if (P_ZOMBIE(p) || (p->p_flag & P_SYSTEM) != 0)
1859 return (EINVAL);
1860
1861 /*
1862 * Lock the process down in memory.
1863 */
1864 /* XXXCDC: how should locking work here? */
1865 if ((p->p_flag & P_WEXIT) || (p->p_vmspace->vm_refcnt < 1))
1866 return (EFAULT);
1867 p->p_vmspace->vm_refcnt++; /* XXX */
1868
1869 /*
1870 * Allocate a temporary buffer to hold the arguments.
1871 */
1872 arg = malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
1873
1874 /*
1875 * Read in the ps_strings structure.
1876 */
1877 aiov.iov_base = &pss;
1878 aiov.iov_len = sizeof(pss);
1879 auio.uio_iov = &aiov;
1880 auio.uio_iovcnt = 1;
1881 auio.uio_offset = (vaddr_t)p->p_psstr;
1882 auio.uio_resid = sizeof(pss);
1883 auio.uio_segflg = UIO_SYSSPACE;
1884 auio.uio_rw = UIO_READ;
1885 auio.uio_procp = NULL;
1886 error = uvm_io(&p->p_vmspace->vm_map, &auio);
1887 if (error)
1888 goto done;
1889
1890 if (type == KERN_PROC_ARGV || type == KERN_PROC_NARGV)
1891 memcpy(&nargv, (char *)&pss + p->p_psnargv, sizeof(nargv));
1892 else
1893 memcpy(&nargv, (char *)&pss + p->p_psnenv, sizeof(nargv));
1894 if (type == KERN_PROC_NARGV || type == KERN_PROC_NENV) {
1895 error = copyout(&nargv, where, sizeof(nargv));
1896 *sizep = sizeof(nargv);
1897 goto done;
1898 }
1899 /*
1900 * Now read the address of the argument vector.
1901 */
1902 switch (type) {
1903 case KERN_PROC_ARGV:
1904 /* XXX compat32 stuff here */
1905 memcpy(&tmp, (char *)&pss + p->p_psargv, sizeof(tmp));
1906 break;
1907 case KERN_PROC_ENV:
1908 memcpy(&tmp, (char *)&pss + p->p_psenv, sizeof(tmp));
1909 break;
1910 default:
1911 return (EINVAL);
1912 }
1913 auio.uio_offset = (off_t)(long)tmp;
1914 aiov.iov_base = &argv;
1915 aiov.iov_len = sizeof(argv);
1916 auio.uio_iov = &aiov;
1917 auio.uio_iovcnt = 1;
1918 auio.uio_resid = sizeof(argv);
1919 auio.uio_segflg = UIO_SYSSPACE;
1920 auio.uio_rw = UIO_READ;
1921 auio.uio_procp = NULL;
1922 error = uvm_io(&p->p_vmspace->vm_map, &auio);
1923 if (error)
1924 goto done;
1925
1926 /*
1927 * Now copy in the actual argument vector, one page at a time,
1928 * since we don't know how long the vector is (though, we do
1929 * know how many NUL-terminated strings are in the vector).
1930 */
1931 len = 0;
1932 upper_bound = *sizep;
1933 for (; nargv != 0 && len < upper_bound; len += xlen) {
1934 aiov.iov_base = arg;
1935 aiov.iov_len = PAGE_SIZE;
1936 auio.uio_iov = &aiov;
1937 auio.uio_iovcnt = 1;
1938 auio.uio_offset = argv + len;
1939 xlen = PAGE_SIZE - ((argv + len) & PAGE_MASK);
1940 auio.uio_resid = xlen;
1941 auio.uio_segflg = UIO_SYSSPACE;
1942 auio.uio_rw = UIO_READ;
1943 auio.uio_procp = NULL;
1944 error = uvm_io(&p->p_vmspace->vm_map, &auio);
1945 if (error)
1946 goto done;
1947
1948 for (i = 0; i < xlen && nargv != 0; i++) {
1949 if (arg[i] == '\0')
1950 nargv--; /* one full string */
1951 }
1952
1953 /*
1954 * Make sure we don't copyout past the end of the user's
1955 * buffer.
1956 */
1957 if (len + i > upper_bound)
1958 i = upper_bound - len;
1959
1960 error = copyout(arg, (char *)where + len, i);
1961 if (error)
1962 break;
1963
1964 if (nargv == 0) {
1965 len += i;
1966 break;
1967 }
1968 }
1969 *sizep = len;
1970
1971 done:
1972 uvmspace_free(p->p_vmspace);
1973
1974 free(arg, M_TEMP);
1975 return (error);
1976 }
1977
1978 #if NPTY > 0
1979 int pty_maxptys(int, int); /* defined in kern/tty_pty.c */
1980
1981 /*
1982 * Validate parameters and get old / set new parameters
1983 * for pty sysctl function.
1984 */
1985 static int
1986 sysctl_pty(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
1987 {
1988 int error = 0;
1989 int oldmax = 0, newmax = 0;
1990
1991 /* get current value of maxptys */
1992 oldmax = pty_maxptys(0, 0);
1993
1994 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, &oldmax, int)
1995
1996 if (!error && newp) {
1997 SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, int)
1998 SYSCTL_SCALAR_NEWPCOP_TYP(newp, &newmax, int)
1999
2000 if (newmax != pty_maxptys(newmax, (newp != NULL)))
2001 return (EINVAL);
2002
2003 }
2004
2005 return (error);
2006 }
2007 #endif /* NPTY > 0 */
2008
2009 static int
2010 sysctl_dotkstat(name, namelen, where, sizep, newp)
2011 int *name;
2012 u_int namelen;
2013 void *where;
2014 size_t *sizep;
2015 void *newp;
2016 {
2017 /* all sysctl names at this level are terminal */
2018 if (namelen != 1)
2019 return (ENOTDIR); /* overloaded */
2020
2021 switch (name[0]) {
2022 case KERN_TKSTAT_NIN:
2023 return (sysctl_rdquad(where, sizep, newp, tk_nin));
2024 case KERN_TKSTAT_NOUT:
2025 return (sysctl_rdquad(where, sizep, newp, tk_nout));
2026 case KERN_TKSTAT_CANCC:
2027 return (sysctl_rdquad(where, sizep, newp, tk_cancc));
2028 case KERN_TKSTAT_RAWCC:
2029 return (sysctl_rdquad(where, sizep, newp, tk_rawcc));
2030 default:
2031 return (EOPNOTSUPP);
2032 }
2033 }
2034