kern_sysctl.c revision 1.86.2.21 1 /* $NetBSD: kern_sysctl.c,v 1.86.2.21 2002/08/01 02:46:22 nathanw Exp $ */
2
3 /*-
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Mike Karels at Berkeley Software Design, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_sysctl.c 8.9 (Berkeley) 5/20/95
39 */
40
41 /*
42 * sysctl system call.
43 */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: kern_sysctl.c,v 1.86.2.21 2002/08/01 02:46:22 nathanw Exp $");
47
48 #include "opt_ddb.h"
49 #include "opt_insecure.h"
50 #include "opt_defcorename.h"
51 #include "opt_pipe.h"
52 #include "opt_sysv.h"
53 #include "pty.h"
54 #include "rnd.h"
55
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/kernel.h>
59 #include <sys/buf.h>
60 #include <sys/device.h>
61 #include <sys/disklabel.h>
62 #include <sys/dkstat.h>
63 #include <sys/exec.h>
64 #include <sys/file.h>
65 #include <sys/ioctl.h>
66 #include <sys/malloc.h>
67 #include <sys/mount.h>
68 #include <sys/msgbuf.h>
69 #include <sys/pool.h>
70 #include <sys/proc.h>
71 #include <sys/resource.h>
72 #include <sys/resourcevar.h>
73 #include <sys/sa.h>
74 #include <sys/syscallargs.h>
75 #include <sys/tty.h>
76 #include <sys/unistd.h>
77 #include <sys/vnode.h>
78 #include <sys/socketvar.h>
79 #define __SYSCTL_PRIVATE
80 #include <sys/sysctl.h>
81 #include <sys/lock.h>
82 #include <sys/namei.h>
83
84 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
85 #include <sys/ipc.h>
86 #endif
87 #ifdef SYSVMSG
88 #include <sys/msg.h>
89 #endif
90 #ifdef SYSVSEM
91 #include <sys/sem.h>
92 #endif
93 #ifdef SYSVSHM
94 #include <sys/shm.h>
95 #endif
96
97 #include <dev/cons.h>
98
99 #if defined(DDB)
100 #include <ddb/ddbvar.h>
101 #endif
102
103 #ifndef PIPE_SOCKETPAIR
104 #include <sys/pipe.h>
105 #endif
106
107 #if NRND > 0
108 #include <sys/rnd.h>
109 #endif
110
111 #define PTRTOINT64(foo) ((u_int64_t)(uintptr_t)(foo))
112
113 static int sysctl_file(void *, size_t *);
114 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
115 static int sysctl_sysvipc(int *, u_int, void *, size_t *);
116 #endif
117 static int sysctl_msgbuf(void *, size_t *);
118 static int sysctl_doeproc(int *, u_int, void *, size_t *);
119 static int sysctl_dolwp(int *, u_int, void *, size_t *);
120 static int sysctl_dotkstat(int *, u_int, void *, size_t *, void *);
121 #ifdef MULTIPROCESSOR
122 static int sysctl_docptime(void *, size_t *, void *);
123 static int sysctl_ncpus(void);
124 #endif
125 static void fill_kproc2(struct proc *, struct kinfo_proc2 *);
126 static void fill_lwp(struct lwp *, struct kinfo_lwp *);
127 static int sysctl_procargs(int *, u_int, void *, size_t *, struct proc *);
128 #if NPTY > 0
129 static int sysctl_pty(void *, size_t *, void *, size_t);
130 #endif
131
132 /*
133 * The `sysctl_memlock' is intended to keep too many processes from
134 * locking down memory by doing sysctls at once. Whether or not this
135 * is really a good idea to worry about it probably a subject of some
136 * debate.
137 */
138 struct lock sysctl_memlock;
139
140 void
141 sysctl_init(void)
142 {
143
144 lockinit(&sysctl_memlock, PRIBIO|PCATCH, "sysctl", 0, 0);
145 }
146
147 int
148 sys___sysctl(struct lwp *l, void *v, register_t *retval)
149 {
150 struct sys___sysctl_args /* {
151 syscallarg(int *) name;
152 syscallarg(u_int) namelen;
153 syscallarg(void *) old;
154 syscallarg(size_t *) oldlenp;
155 syscallarg(void *) new;
156 syscallarg(size_t) newlen;
157 } */ *uap = v;
158 struct proc *p = l->l_proc;
159 int error;
160 size_t savelen = 0, oldlen = 0;
161 sysctlfn *fn;
162 int name[CTL_MAXNAME];
163 size_t *oldlenp;
164
165 /*
166 * all top-level sysctl names are non-terminal
167 */
168 if (SCARG(uap, namelen) > CTL_MAXNAME || SCARG(uap, namelen) < 2)
169 return (EINVAL);
170 error = copyin(SCARG(uap, name), &name,
171 SCARG(uap, namelen) * sizeof(int));
172 if (error)
173 return (error);
174
175 /*
176 * For all but CTL_PROC, must be root to change a value.
177 * For CTL_PROC, must be root, or owner of the proc (and not suid),
178 * this is checked in proc_sysctl() (once we know the targer proc).
179 */
180 if (SCARG(uap, new) != NULL && name[0] != CTL_PROC &&
181 (error = suser(p->p_ucred, &p->p_acflag)))
182 return error;
183
184 switch (name[0]) {
185 case CTL_KERN:
186 fn = kern_sysctl;
187 break;
188 case CTL_HW:
189 fn = hw_sysctl;
190 break;
191 case CTL_VM:
192 fn = uvm_sysctl;
193 break;
194 case CTL_NET:
195 fn = net_sysctl;
196 break;
197 case CTL_VFS:
198 fn = vfs_sysctl;
199 break;
200 case CTL_MACHDEP:
201 fn = cpu_sysctl;
202 break;
203 #ifdef DEBUG
204 case CTL_DEBUG:
205 fn = debug_sysctl;
206 break;
207 #endif
208 #ifdef DDB
209 case CTL_DDB:
210 fn = ddb_sysctl;
211 break;
212 #endif
213 case CTL_PROC:
214 fn = proc_sysctl;
215 break;
216
217 case CTL_EMUL:
218 fn = emul_sysctl;
219 break;
220 default:
221 return (EOPNOTSUPP);
222 }
223
224 /*
225 * XXX Hey, we wire `old', but what about `new'?
226 */
227
228 oldlenp = SCARG(uap, oldlenp);
229 if (oldlenp) {
230 if ((error = copyin(oldlenp, &oldlen, sizeof(oldlen))))
231 return (error);
232 oldlenp = &oldlen;
233 }
234 if (SCARG(uap, old) != NULL) {
235 error = lockmgr(&sysctl_memlock, LK_EXCLUSIVE, NULL);
236 if (error)
237 return (error);
238 error = uvm_vslock(p, SCARG(uap, old), oldlen, VM_PROT_WRITE);
239 if (error) {
240 (void) lockmgr(&sysctl_memlock, LK_RELEASE, NULL);
241 return error;
242 }
243 savelen = oldlen;
244 }
245 error = (*fn)(name + 1, SCARG(uap, namelen) - 1, SCARG(uap, old),
246 oldlenp, SCARG(uap, new), SCARG(uap, newlen), p);
247 if (SCARG(uap, old) != NULL) {
248 uvm_vsunlock(p, SCARG(uap, old), savelen);
249 (void) lockmgr(&sysctl_memlock, LK_RELEASE, NULL);
250 }
251 if (error)
252 return (error);
253 if (SCARG(uap, oldlenp))
254 error = copyout(&oldlen, SCARG(uap, oldlenp), sizeof(oldlen));
255 return (error);
256 }
257
258 /*
259 * Attributes stored in the kernel.
260 */
261 char hostname[MAXHOSTNAMELEN];
262 int hostnamelen;
263
264 char domainname[MAXHOSTNAMELEN];
265 int domainnamelen;
266
267 long hostid;
268
269 #ifdef INSECURE
270 int securelevel = -1;
271 #else
272 int securelevel = 0;
273 #endif
274
275 #ifndef DEFCORENAME
276 #define DEFCORENAME "%n.core"
277 #endif
278 char defcorename[MAXPATHLEN] = DEFCORENAME;
279 int defcorenamelen = sizeof(DEFCORENAME);
280
281 extern int kern_logsigexit;
282 extern fixpt_t ccpu;
283
284 #ifndef MULTIPROCESSOR
285 #define sysctl_ncpus() 1
286 #endif
287
288 #ifdef MULTIPROCESSOR
289
290 #ifndef CPU_INFO_FOREACH
291 #define CPU_INFO_ITERATOR int
292 #define CPU_INFO_FOREACH(cii, ci) cii = 0, ci = curcpu(); ci != NULL; ci = NULL
293 #endif
294
295 static int
296 sysctl_docptime(void *oldp, size_t *oldlenp, void *newp)
297 {
298 u_int64_t cp_time[CPUSTATES];
299 int i;
300 struct cpu_info *ci;
301 CPU_INFO_ITERATOR cii;
302
303 for (i=0; i<CPUSTATES; i++)
304 cp_time[i] = 0;
305
306 for (CPU_INFO_FOREACH(cii, ci)) {
307 for (i=0; i<CPUSTATES; i++)
308 cp_time[i] += ci->ci_schedstate.spc_cp_time[i];
309 }
310 return (sysctl_rdstruct(oldp, oldlenp, newp,
311 cp_time, sizeof(cp_time)));
312 }
313
314 static int
315 sysctl_ncpus(void)
316 {
317 struct cpu_info *ci;
318 CPU_INFO_ITERATOR cii;
319
320 int ncpus = 0;
321 for (CPU_INFO_FOREACH(cii, ci))
322 ncpus++;
323 return ncpus;
324 }
325
326 #endif
327
328 /*
329 * kernel related system variables.
330 */
331 int
332 kern_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
333 void *newp, size_t newlen, struct proc *p)
334 {
335 int error, level, inthostid;
336 int old_autonicetime;
337 int old_vnodes;
338 dev_t consdev;
339 #if NRND > 0
340 int v;
341 #endif
342
343 /* All sysctl names at this level, except for a few, are terminal. */
344 switch (name[0]) {
345 case KERN_PROC:
346 case KERN_PROC2:
347 case KERN_LWP:
348 case KERN_PROF:
349 case KERN_MBUF:
350 case KERN_PROC_ARGS:
351 case KERN_SYSVIPC_INFO:
352 case KERN_PIPE:
353 case KERN_TKSTAT:
354 /* Not terminal. */
355 break;
356 default:
357 if (namelen != 1)
358 return (ENOTDIR); /* overloaded */
359 }
360
361 switch (name[0]) {
362 case KERN_OSTYPE:
363 return (sysctl_rdstring(oldp, oldlenp, newp, ostype));
364 case KERN_OSRELEASE:
365 return (sysctl_rdstring(oldp, oldlenp, newp, osrelease));
366 case KERN_OSREV:
367 return (sysctl_rdint(oldp, oldlenp, newp, __NetBSD_Version__));
368 case KERN_VERSION:
369 return (sysctl_rdstring(oldp, oldlenp, newp, version));
370 case KERN_MAXVNODES:
371 old_vnodes = desiredvnodes;
372 error = sysctl_int(oldp, oldlenp, newp, newlen, &desiredvnodes);
373 if (newp && !error) {
374 if (old_vnodes > desiredvnodes) {
375 desiredvnodes = old_vnodes;
376 return (EINVAL);
377 }
378 vfs_reinit();
379 nchreinit();
380 }
381 return (error);
382 case KERN_MAXPROC:
383 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxproc));
384 case KERN_MAXFILES:
385 return (sysctl_int(oldp, oldlenp, newp, newlen, &maxfiles));
386 case KERN_ARGMAX:
387 return (sysctl_rdint(oldp, oldlenp, newp, ARG_MAX));
388 case KERN_SECURELVL:
389 level = securelevel;
390 if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &level)) ||
391 newp == NULL)
392 return (error);
393 if (level < securelevel && p->p_pid != 1)
394 return (EPERM);
395 securelevel = level;
396 return (0);
397 case KERN_HOSTNAME:
398 error = sysctl_string(oldp, oldlenp, newp, newlen,
399 hostname, sizeof(hostname));
400 if (newp && !error)
401 hostnamelen = newlen;
402 return (error);
403 case KERN_DOMAINNAME:
404 error = sysctl_string(oldp, oldlenp, newp, newlen,
405 domainname, sizeof(domainname));
406 if (newp && !error)
407 domainnamelen = newlen;
408 return (error);
409 case KERN_HOSTID:
410 inthostid = hostid; /* XXX assumes sizeof long <= sizeof int */
411 error = sysctl_int(oldp, oldlenp, newp, newlen, &inthostid);
412 if (newp && !error)
413 hostid = inthostid;
414 return (error);
415 case KERN_CLOCKRATE:
416 return (sysctl_clockrate(oldp, oldlenp));
417 case KERN_BOOTTIME:
418 return (sysctl_rdstruct(oldp, oldlenp, newp, &boottime,
419 sizeof(struct timeval)));
420 case KERN_VNODE:
421 return (sysctl_vnode(oldp, oldlenp, p));
422 case KERN_PROC:
423 case KERN_PROC2:
424 return (sysctl_doeproc(name, namelen, oldp, oldlenp));
425 case KERN_LWP:
426 return (sysctl_dolwp(name, namelen, oldp, oldlenp));
427 case KERN_PROC_ARGS:
428 return (sysctl_procargs(name + 1, namelen - 1,
429 oldp, oldlenp, p));
430 case KERN_FILE:
431 return (sysctl_file(oldp, oldlenp));
432 #ifdef GPROF
433 case KERN_PROF:
434 return (sysctl_doprof(name + 1, namelen - 1, oldp, oldlenp,
435 newp, newlen));
436 #endif
437 case KERN_POSIX1:
438 return (sysctl_rdint(oldp, oldlenp, newp, _POSIX_VERSION));
439 case KERN_NGROUPS:
440 return (sysctl_rdint(oldp, oldlenp, newp, NGROUPS_MAX));
441 case KERN_JOB_CONTROL:
442 return (sysctl_rdint(oldp, oldlenp, newp, 1));
443 case KERN_SAVED_IDS:
444 #ifdef _POSIX_SAVED_IDS
445 return (sysctl_rdint(oldp, oldlenp, newp, 1));
446 #else
447 return (sysctl_rdint(oldp, oldlenp, newp, 0));
448 #endif
449 case KERN_MAXPARTITIONS:
450 return (sysctl_rdint(oldp, oldlenp, newp, MAXPARTITIONS));
451 case KERN_RAWPARTITION:
452 return (sysctl_rdint(oldp, oldlenp, newp, RAW_PART));
453 #ifdef NTP
454 case KERN_NTPTIME:
455 return (sysctl_ntptime(oldp, oldlenp));
456 #endif
457 case KERN_AUTONICETIME:
458 old_autonicetime = autonicetime;
459 error = sysctl_int(oldp, oldlenp, newp, newlen, &autonicetime);
460 if (autonicetime < 0)
461 autonicetime = old_autonicetime;
462 return (error);
463 case KERN_AUTONICEVAL:
464 error = sysctl_int(oldp, oldlenp, newp, newlen, &autoniceval);
465 if (autoniceval < PRIO_MIN)
466 autoniceval = PRIO_MIN;
467 if (autoniceval > PRIO_MAX)
468 autoniceval = PRIO_MAX;
469 return (error);
470 case KERN_RTC_OFFSET:
471 return (sysctl_rdint(oldp, oldlenp, newp, rtc_offset));
472 case KERN_ROOT_DEVICE:
473 return (sysctl_rdstring(oldp, oldlenp, newp,
474 root_device->dv_xname));
475 case KERN_MSGBUFSIZE:
476 /*
477 * deal with cases where the message buffer has
478 * become corrupted.
479 */
480 if (!msgbufenabled || msgbufp->msg_magic != MSG_MAGIC) {
481 msgbufenabled = 0;
482 return (ENXIO);
483 }
484 return (sysctl_rdint(oldp, oldlenp, newp, msgbufp->msg_bufs));
485 case KERN_FSYNC:
486 return (sysctl_rdint(oldp, oldlenp, newp, 1));
487 case KERN_SYSVMSG:
488 #ifdef SYSVMSG
489 return (sysctl_rdint(oldp, oldlenp, newp, 1));
490 #else
491 return (sysctl_rdint(oldp, oldlenp, newp, 0));
492 #endif
493 case KERN_SYSVSEM:
494 #ifdef SYSVSEM
495 return (sysctl_rdint(oldp, oldlenp, newp, 1));
496 #else
497 return (sysctl_rdint(oldp, oldlenp, newp, 0));
498 #endif
499 case KERN_SYSVSHM:
500 #ifdef SYSVSHM
501 return (sysctl_rdint(oldp, oldlenp, newp, 1));
502 #else
503 return (sysctl_rdint(oldp, oldlenp, newp, 0));
504 #endif
505 case KERN_DEFCORENAME:
506 if (newp && newlen < 1)
507 return (EINVAL);
508 error = sysctl_string(oldp, oldlenp, newp, newlen,
509 defcorename, sizeof(defcorename));
510 if (newp && !error)
511 defcorenamelen = newlen;
512 return (error);
513 case KERN_SYNCHRONIZED_IO:
514 return (sysctl_rdint(oldp, oldlenp, newp, 1));
515 case KERN_IOV_MAX:
516 return (sysctl_rdint(oldp, oldlenp, newp, IOV_MAX));
517 case KERN_MBUF:
518 return (sysctl_dombuf(name + 1, namelen - 1, oldp, oldlenp,
519 newp, newlen));
520 case KERN_MAPPED_FILES:
521 return (sysctl_rdint(oldp, oldlenp, newp, 1));
522 case KERN_MEMLOCK:
523 return (sysctl_rdint(oldp, oldlenp, newp, 1));
524 case KERN_MEMLOCK_RANGE:
525 return (sysctl_rdint(oldp, oldlenp, newp, 1));
526 case KERN_MEMORY_PROTECTION:
527 return (sysctl_rdint(oldp, oldlenp, newp, 1));
528 case KERN_LOGIN_NAME_MAX:
529 return (sysctl_rdint(oldp, oldlenp, newp, LOGIN_NAME_MAX));
530 case KERN_LOGSIGEXIT:
531 return (sysctl_int(oldp, oldlenp, newp, newlen,
532 &kern_logsigexit));
533 case KERN_FSCALE:
534 return (sysctl_rdint(oldp, oldlenp, newp, FSCALE));
535 case KERN_CCPU:
536 return (sysctl_rdint(oldp, oldlenp, newp, ccpu));
537 case KERN_CP_TIME:
538 #ifndef MULTIPROCESSOR
539 return (sysctl_rdstruct(oldp, oldlenp, newp,
540 curcpu()->ci_schedstate.spc_cp_time,
541 sizeof(curcpu()->ci_schedstate.spc_cp_time)));
542 #else
543 return (sysctl_docptime(oldp, oldlenp, newp));
544 #endif
545 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
546 case KERN_SYSVIPC_INFO:
547 return (sysctl_sysvipc(name + 1, namelen - 1, oldp, oldlenp));
548 #endif
549 case KERN_MSGBUF:
550 return (sysctl_msgbuf(oldp, oldlenp));
551 case KERN_CONSDEV:
552 if (cn_tab != NULL)
553 consdev = cn_tab->cn_dev;
554 else
555 consdev = NODEV;
556 return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
557 sizeof consdev));
558 #if NPTY > 0
559 case KERN_MAXPTYS:
560 return sysctl_pty(oldp, oldlenp, newp, newlen);
561 #endif
562 #ifndef PIPE_SOCKETPAIR
563 case KERN_PIPE:
564 return (sysctl_dopipe(name + 1, namelen - 1, oldp, oldlenp,
565 newp, newlen));
566 #endif
567 case KERN_MAXPHYS:
568 return (sysctl_rdint(oldp, oldlenp, newp, MAXPHYS));
569 case KERN_SBMAX:
570 {
571 int new_sbmax = sb_max;
572
573 error = sysctl_int(oldp, oldlenp, newp, newlen, &new_sbmax);
574 if (newp && !error) {
575 if (new_sbmax < (16 * 1024)) /* sanity */
576 return (EINVAL);
577 sb_max = new_sbmax;
578 }
579 return (error);
580 }
581 case KERN_TKSTAT:
582 return (sysctl_dotkstat(name + 1, namelen - 1, oldp, oldlenp,
583 newp));
584 case KERN_MONOTONIC_CLOCK: /* XXX _POSIX_VERSION */
585 return (sysctl_rdint(oldp, oldlenp, newp, 200112));
586 case KERN_URND:
587 #if NRND > 0
588 if (rnd_extract_data(&v, sizeof(v), RND_EXTRACT_ANY) ==
589 sizeof(v))
590 return (sysctl_rdint(oldp, oldlenp, newp, v));
591 else
592 return (EIO); /*XXX*/
593 #else
594 return (EOPNOTSUPP);
595 #endif
596 default:
597 return (EOPNOTSUPP);
598 }
599 /* NOTREACHED */
600 }
601
602 /*
603 * hardware related system variables.
604 */
605 int
606 hw_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
607 void *newp, size_t newlen, struct proc *p)
608 {
609
610 /* All sysctl names at this level, except for a few, are terminal. */
611 switch (name[0]) {
612 case HW_DISKSTATS:
613 /* Not terminal. */
614 break;
615 default:
616 if (namelen != 1)
617 return (ENOTDIR); /* overloaded */
618 }
619
620 switch (name[0]) {
621 case HW_MACHINE:
622 return (sysctl_rdstring(oldp, oldlenp, newp, machine));
623 case HW_MACHINE_ARCH:
624 return (sysctl_rdstring(oldp, oldlenp, newp, machine_arch));
625 case HW_MODEL:
626 return (sysctl_rdstring(oldp, oldlenp, newp, cpu_model));
627 case HW_NCPU:
628 return (sysctl_rdint(oldp, oldlenp, newp, sysctl_ncpus()));
629 case HW_BYTEORDER:
630 return (sysctl_rdint(oldp, oldlenp, newp, BYTE_ORDER));
631 case HW_PHYSMEM:
632 return (sysctl_rdint(oldp, oldlenp, newp, ctob(physmem)));
633 case HW_USERMEM:
634 return (sysctl_rdint(oldp, oldlenp, newp,
635 ctob(physmem - uvmexp.wired)));
636 case HW_PAGESIZE:
637 return (sysctl_rdint(oldp, oldlenp, newp, PAGE_SIZE));
638 case HW_ALIGNBYTES:
639 return (sysctl_rdint(oldp, oldlenp, newp, ALIGNBYTES));
640 case HW_DISKNAMES:
641 return (sysctl_disknames(oldp, oldlenp));
642 case HW_DISKSTATS:
643 return (sysctl_diskstats(name + 1, namelen - 1, oldp, oldlenp));
644 case HW_CNMAGIC: {
645 char magic[CNS_LEN];
646 int error;
647
648 if (oldp)
649 cn_get_magic(magic, CNS_LEN);
650 error = sysctl_string(oldp, oldlenp, newp, newlen,
651 magic, sizeof(magic));
652 if (newp && !error) {
653 error = cn_set_magic(magic);
654 }
655 return (error);
656 }
657 default:
658 return (EOPNOTSUPP);
659 }
660 /* NOTREACHED */
661 }
662
663 #ifdef DEBUG
664 /*
665 * Debugging related system variables.
666 */
667 struct ctldebug /* debug0, */ /* debug1, */ debug2, debug3, debug4;
668 struct ctldebug debug5, debug6, debug7, debug8, debug9;
669 struct ctldebug debug10, debug11, debug12, debug13, debug14;
670 struct ctldebug debug15, debug16, debug17, debug18, debug19;
671 static struct ctldebug *debugvars[CTL_DEBUG_MAXID] = {
672 &debug0, &debug1, &debug2, &debug3, &debug4,
673 &debug5, &debug6, &debug7, &debug8, &debug9,
674 &debug10, &debug11, &debug12, &debug13, &debug14,
675 &debug15, &debug16, &debug17, &debug18, &debug19,
676 };
677
678 int
679 debug_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
680 void *newp, size_t newlen, struct proc *p)
681 {
682 struct ctldebug *cdp;
683
684 /* all sysctl names at this level are name and field */
685 if (namelen != 2)
686 return (ENOTDIR); /* overloaded */
687 if (name[0] >= CTL_DEBUG_MAXID)
688 return (EOPNOTSUPP);
689 cdp = debugvars[name[0]];
690 if (cdp->debugname == 0)
691 return (EOPNOTSUPP);
692 switch (name[1]) {
693 case CTL_DEBUG_NAME:
694 return (sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname));
695 case CTL_DEBUG_VALUE:
696 return (sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar));
697 default:
698 return (EOPNOTSUPP);
699 }
700 /* NOTREACHED */
701 }
702 #endif /* DEBUG */
703
704 int
705 proc_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
706 void *newp, size_t newlen, struct proc *p)
707 {
708 struct proc *ptmp = NULL;
709 const struct proclist_desc *pd;
710 int error = 0;
711 struct rlimit alim;
712 struct plimit *newplim;
713 char *tmps = NULL;
714 int i, curlen, len;
715
716 if (namelen < 2)
717 return EINVAL;
718
719 if (name[0] == PROC_CURPROC) {
720 ptmp = p;
721 } else {
722 proclist_lock_read();
723 for (pd = proclists; pd->pd_list != NULL; pd++) {
724 for (ptmp = LIST_FIRST(pd->pd_list); ptmp != NULL;
725 ptmp = LIST_NEXT(ptmp, p_list)) {
726 /* Skip embryonic processes. */
727 if (ptmp->p_stat == SIDL)
728 continue;
729 if (ptmp->p_pid == (pid_t)name[0])
730 break;
731 }
732 if (ptmp != NULL)
733 break;
734 }
735 proclist_unlock_read();
736 if (ptmp == NULL)
737 return(ESRCH);
738 if (p->p_ucred->cr_uid != 0) {
739 if(p->p_cred->p_ruid != ptmp->p_cred->p_ruid ||
740 p->p_cred->p_ruid != ptmp->p_cred->p_svuid)
741 return EPERM;
742 if (ptmp->p_cred->p_rgid != ptmp->p_cred->p_svgid)
743 return EPERM; /* sgid proc */
744 for (i = 0; i < p->p_ucred->cr_ngroups; i++) {
745 if (p->p_ucred->cr_groups[i] ==
746 ptmp->p_cred->p_rgid)
747 break;
748 }
749 if (i == p->p_ucred->cr_ngroups)
750 return EPERM;
751 }
752 }
753 if (name[1] == PROC_PID_CORENAME) {
754 if (namelen != 2)
755 return EINVAL;
756 /*
757 * Can't use sysctl_string() here because we may malloc a new
758 * area during the process, so we have to do it by hand.
759 */
760 curlen = strlen(ptmp->p_limit->pl_corename) + 1;
761 if (oldlenp && *oldlenp < curlen) {
762 if (!oldp)
763 *oldlenp = curlen;
764 return (ENOMEM);
765 }
766 if (newp) {
767 if (securelevel > 2)
768 return EPERM;
769 if (newlen > MAXPATHLEN)
770 return ENAMETOOLONG;
771 tmps = malloc(newlen + 1, M_TEMP, M_WAITOK);
772 if (tmps == NULL)
773 return ENOMEM;
774 error = copyin(newp, tmps, newlen + 1);
775 tmps[newlen] = '\0';
776 if (error)
777 goto cleanup;
778 /* Enforce to be either 'core' for end with '.core' */
779 if (newlen < 4) { /* c.o.r.e */
780 error = EINVAL;
781 goto cleanup;
782 }
783 len = newlen - 4;
784 if (len > 0) {
785 if (tmps[len - 1] != '.' &&
786 tmps[len - 1] != '/') {
787 error = EINVAL;
788 goto cleanup;
789 }
790 }
791 if (strcmp(&tmps[len], "core") != 0) {
792 error = EINVAL;
793 goto cleanup;
794 }
795 }
796 if (oldp && oldlenp) {
797 *oldlenp = curlen;
798 error = copyout(ptmp->p_limit->pl_corename, oldp,
799 curlen);
800 }
801 if (newp && error == 0) {
802 /* if the 2 strings are identical, don't limcopy() */
803 if (strcmp(tmps, ptmp->p_limit->pl_corename) == 0) {
804 error = 0;
805 goto cleanup;
806 }
807 if (ptmp->p_limit->p_refcnt > 1 &&
808 (ptmp->p_limit->p_lflags & PL_SHAREMOD) == 0) {
809 newplim = limcopy(ptmp->p_limit);
810 limfree(ptmp->p_limit);
811 ptmp->p_limit = newplim;
812 }
813 if (ptmp->p_limit->pl_corename != defcorename) {
814 free(ptmp->p_limit->pl_corename, M_TEMP);
815 }
816 ptmp->p_limit->pl_corename = tmps;
817 return (0);
818 }
819 cleanup:
820 if (tmps)
821 free(tmps, M_TEMP);
822 return (error);
823 }
824 if (name[1] == PROC_PID_LIMIT) {
825 if (namelen != 4 || name[2] >= PROC_PID_LIMIT_MAXID)
826 return EINVAL;
827 memcpy(&alim, &ptmp->p_rlimit[name[2] - 1], sizeof(alim));
828 if (name[3] == PROC_PID_LIMIT_TYPE_HARD)
829 error = sysctl_quad(oldp, oldlenp, newp, newlen,
830 &alim.rlim_max);
831 else if (name[3] == PROC_PID_LIMIT_TYPE_SOFT)
832 error = sysctl_quad(oldp, oldlenp, newp, newlen,
833 &alim.rlim_cur);
834 else
835 error = EINVAL;
836
837 if (error)
838 return error;
839
840 if (newp)
841 error = dosetrlimit(ptmp, p->p_cred,
842 name[2] - 1, &alim);
843 return error;
844 }
845 return (EINVAL);
846 }
847
848 int
849 emul_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
850 void *newp, size_t newlen, struct proc *p)
851 {
852 static struct {
853 const char *name;
854 int type;
855 } emulations[] = CTL_EMUL_NAMES;
856 const struct emul *e;
857 const char *ename;
858 #ifdef LKM
859 extern struct lock exec_lock; /* XXX */
860 int error;
861 #else
862 extern int nexecs_builtin;
863 extern const struct execsw execsw_builtin[];
864 int i;
865 #endif
866
867 /* all sysctl names at this level are name and field */
868 if (namelen < 2)
869 return (ENOTDIR); /* overloaded */
870
871 if ((u_int) name[0] >= EMUL_MAXID || name[0] == 0)
872 return (EOPNOTSUPP);
873
874 ename = emulations[name[0]].name;
875
876 #ifdef LKM
877 lockmgr(&exec_lock, LK_SHARED, NULL);
878 if ((e = emul_search(ename))) {
879 error = (*e->e_sysctl)(name + 1, namelen - 1, oldp, oldlenp,
880 newp, newlen, p);
881 } else
882 error = EOPNOTSUPP;
883 lockmgr(&exec_lock, LK_RELEASE, NULL);
884
885 return (error);
886 #else
887 for (i = 0; i < nexecs_builtin; i++) {
888 e = execsw_builtin[i].es_emul;
889 if (e == NULL || strcmp(ename, e->e_name) != 0 ||
890 e->e_sysctl != NULL)
891 continue;
892
893 return (*e->e_sysctl)(name + 1, namelen - 1, oldp, oldlenp,
894 newp, newlen, p);
895 }
896
897 return (EOPNOTSUPP);
898 #endif
899 }
900 /*
901 * Convenience macros.
902 */
903
904 #define SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, valp, len) \
905 if (oldlenp) { \
906 if (!oldp) \
907 *oldlenp = len; \
908 else { \
909 if (*oldlenp < len) \
910 return(ENOMEM); \
911 *oldlenp = len; \
912 error = copyout((caddr_t)valp, oldp, len); \
913 } \
914 }
915
916 #define SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, valp, typ) \
917 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, valp, sizeof(typ))
918
919 #define SYSCTL_SCALAR_NEWPCHECK_LEN(newp, newlen, len) \
920 if (newp && newlen != len) \
921 return (EINVAL);
922
923 #define SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, typ) \
924 SYSCTL_SCALAR_NEWPCHECK_LEN(newp, newlen, sizeof(typ))
925
926 #define SYSCTL_SCALAR_NEWPCOP_LEN(newp, valp, len) \
927 if (error == 0 && newp) \
928 error = copyin(newp, valp, len);
929
930 #define SYSCTL_SCALAR_NEWPCOP_TYP(newp, valp, typ) \
931 SYSCTL_SCALAR_NEWPCOP_LEN(newp, valp, sizeof(typ))
932
933 #define SYSCTL_STRING_CORE(oldp, oldlenp, str) \
934 if (oldlenp) { \
935 len = strlen(str) + 1; \
936 if (!oldp) \
937 *oldlenp = len; \
938 else { \
939 if (*oldlenp < len) { \
940 err2 = ENOMEM; \
941 len = *oldlenp; \
942 } else \
943 *oldlenp = len; \
944 error = copyout(str, oldp, len);\
945 if (error == 0) \
946 error = err2; \
947 } \
948 }
949
950 /*
951 * Validate parameters and get old / set new parameters
952 * for an integer-valued sysctl function.
953 */
954 int
955 sysctl_int(void *oldp, size_t *oldlenp, void *newp, size_t newlen, int *valp)
956 {
957 int error = 0;
958
959 SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, int)
960 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, valp, int)
961 SYSCTL_SCALAR_NEWPCOP_TYP(newp, valp, int)
962
963 return (error);
964 }
965
966
967 /*
968 * As above, but read-only.
969 */
970 int
971 sysctl_rdint(void *oldp, size_t *oldlenp, void *newp, int val)
972 {
973 int error = 0;
974
975 if (newp)
976 return (EPERM);
977
978 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, &val, int)
979
980 return (error);
981 }
982
983 /*
984 * Validate parameters and get old / set new parameters
985 * for an quad-valued sysctl function.
986 */
987 int
988 sysctl_quad(void *oldp, size_t *oldlenp, void *newp, size_t newlen,
989 quad_t *valp)
990 {
991 int error = 0;
992
993 SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, quad_t)
994 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, valp, quad_t)
995 SYSCTL_SCALAR_NEWPCOP_TYP(newp, valp, quad_t)
996
997 return (error);
998 }
999
1000 /*
1001 * As above, but read-only.
1002 */
1003 int
1004 sysctl_rdquad(void *oldp, size_t *oldlenp, void *newp, quad_t val)
1005 {
1006 int error = 0;
1007
1008 if (newp)
1009 return (EPERM);
1010
1011 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, &val, quad_t)
1012
1013 return (error);
1014 }
1015
1016 /*
1017 * Validate parameters and get old / set new parameters
1018 * for a string-valued sysctl function.
1019 */
1020 int
1021 sysctl_string(void *oldp, size_t *oldlenp, void *newp, size_t newlen, char *str,
1022 int maxlen)
1023 {
1024 int len, error = 0, err2 = 0;
1025
1026 if (newp && newlen >= maxlen)
1027 return (EINVAL);
1028
1029 SYSCTL_STRING_CORE(oldp, oldlenp, str);
1030
1031 if (error == 0 && newp) {
1032 error = copyin(newp, str, newlen);
1033 str[newlen] = 0;
1034 }
1035 return (error);
1036 }
1037
1038 /*
1039 * As above, but read-only.
1040 */
1041 int
1042 sysctl_rdstring(void *oldp, size_t *oldlenp, void *newp, const char *str)
1043 {
1044 int len, error = 0, err2 = 0;
1045
1046 if (newp)
1047 return (EPERM);
1048
1049 SYSCTL_STRING_CORE(oldp, oldlenp, str);
1050
1051 return (error);
1052 }
1053
1054 /*
1055 * Validate parameters and get old / set new parameters
1056 * for a structure oriented sysctl function.
1057 */
1058 int
1059 sysctl_struct(void *oldp, size_t *oldlenp, void *newp, size_t newlen, void *sp,
1060 int len)
1061 {
1062 int error = 0;
1063
1064 SYSCTL_SCALAR_NEWPCHECK_LEN(newp, newlen, len)
1065 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, sp, len)
1066 SYSCTL_SCALAR_NEWPCOP_LEN(newp, sp, len)
1067
1068 return (error);
1069 }
1070
1071 /*
1072 * Validate parameters and get old parameters
1073 * for a structure oriented sysctl function.
1074 */
1075 int
1076 sysctl_rdstruct(void *oldp, size_t *oldlenp, void *newp, const void *sp,
1077 int len)
1078 {
1079 int error = 0;
1080
1081 if (newp)
1082 return (EPERM);
1083
1084 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, sp, len)
1085
1086 return (error);
1087 }
1088
1089 /*
1090 * As above, but can return a truncated result.
1091 */
1092 int
1093 sysctl_rdminstruct(void *oldp, size_t *oldlenp, void *newp, const void *sp,
1094 int len)
1095 {
1096 int error = 0;
1097
1098 if (newp)
1099 return (EPERM);
1100
1101 len = min(*oldlenp, len);
1102 SYSCTL_SCALAR_CORE_LEN(oldp, oldlenp, sp, len)
1103
1104 return (error);
1105 }
1106
1107 /*
1108 * Get file structures.
1109 */
1110 static int
1111 sysctl_file(void *vwhere, size_t *sizep)
1112 {
1113 int buflen, error;
1114 struct file *fp;
1115 char *start, *where;
1116
1117 start = where = vwhere;
1118 buflen = *sizep;
1119 if (where == NULL) {
1120 /*
1121 * overestimate by 10 files
1122 */
1123 *sizep = sizeof(filehead) + (nfiles + 10) * sizeof(struct file);
1124 return (0);
1125 }
1126
1127 /*
1128 * first copyout filehead
1129 */
1130 if (buflen < sizeof(filehead)) {
1131 *sizep = 0;
1132 return (0);
1133 }
1134 error = copyout((caddr_t)&filehead, where, sizeof(filehead));
1135 if (error)
1136 return (error);
1137 buflen -= sizeof(filehead);
1138 where += sizeof(filehead);
1139
1140 /*
1141 * followed by an array of file structures
1142 */
1143 for (fp = filehead.lh_first; fp != 0; fp = fp->f_list.le_next) {
1144 if (buflen < sizeof(struct file)) {
1145 *sizep = where - start;
1146 return (ENOMEM);
1147 }
1148 error = copyout((caddr_t)fp, where, sizeof(struct file));
1149 if (error)
1150 return (error);
1151 buflen -= sizeof(struct file);
1152 where += sizeof(struct file);
1153 }
1154 *sizep = where - start;
1155 return (0);
1156 }
1157
1158 #if defined(SYSVMSG) || defined(SYSVSEM) || defined(SYSVSHM)
1159 #define FILL_PERM(src, dst) do { \
1160 (dst)._key = (src)._key; \
1161 (dst).uid = (src).uid; \
1162 (dst).gid = (src).gid; \
1163 (dst).cuid = (src).cuid; \
1164 (dst).cgid = (src).cgid; \
1165 (dst).mode = (src).mode; \
1166 (dst)._seq = (src)._seq; \
1167 } while (0);
1168 #define FILL_MSG(src, dst) do { \
1169 FILL_PERM((src).msg_perm, (dst).msg_perm); \
1170 (dst).msg_qnum = (src).msg_qnum; \
1171 (dst).msg_qbytes = (src).msg_qbytes; \
1172 (dst)._msg_cbytes = (src)._msg_cbytes; \
1173 (dst).msg_lspid = (src).msg_lspid; \
1174 (dst).msg_lrpid = (src).msg_lrpid; \
1175 (dst).msg_stime = (src).msg_stime; \
1176 (dst).msg_rtime = (src).msg_rtime; \
1177 (dst).msg_ctime = (src).msg_ctime; \
1178 } while (0)
1179 #define FILL_SEM(src, dst) do { \
1180 FILL_PERM((src).sem_perm, (dst).sem_perm); \
1181 (dst).sem_nsems = (src).sem_nsems; \
1182 (dst).sem_otime = (src).sem_otime; \
1183 (dst).sem_ctime = (src).sem_ctime; \
1184 } while (0)
1185 #define FILL_SHM(src, dst) do { \
1186 FILL_PERM((src).shm_perm, (dst).shm_perm); \
1187 (dst).shm_segsz = (src).shm_segsz; \
1188 (dst).shm_lpid = (src).shm_lpid; \
1189 (dst).shm_cpid = (src).shm_cpid; \
1190 (dst).shm_atime = (src).shm_atime; \
1191 (dst).shm_dtime = (src).shm_dtime; \
1192 (dst).shm_ctime = (src).shm_ctime; \
1193 (dst).shm_nattch = (src).shm_nattch; \
1194 } while (0)
1195
1196 static int
1197 sysctl_sysvipc(int *name, u_int namelen, void *where, size_t *sizep)
1198 {
1199 #ifdef SYSVMSG
1200 struct msg_sysctl_info *msgsi;
1201 #endif
1202 #ifdef SYSVSEM
1203 struct sem_sysctl_info *semsi;
1204 #endif
1205 #ifdef SYSVSHM
1206 struct shm_sysctl_info *shmsi;
1207 #endif
1208 size_t infosize, dssize, tsize, buflen;
1209 void *buf = NULL;
1210 char *start;
1211 int32_t nds;
1212 int i, error, ret;
1213
1214 if (namelen != 1)
1215 return (EINVAL);
1216
1217 start = where;
1218 buflen = *sizep;
1219
1220 switch (*name) {
1221 case KERN_SYSVIPC_MSG_INFO:
1222 #ifdef SYSVMSG
1223 infosize = sizeof(msgsi->msginfo);
1224 nds = msginfo.msgmni;
1225 dssize = sizeof(msgsi->msgids[0]);
1226 break;
1227 #else
1228 return (EINVAL);
1229 #endif
1230 case KERN_SYSVIPC_SEM_INFO:
1231 #ifdef SYSVSEM
1232 infosize = sizeof(semsi->seminfo);
1233 nds = seminfo.semmni;
1234 dssize = sizeof(semsi->semids[0]);
1235 break;
1236 #else
1237 return (EINVAL);
1238 #endif
1239 case KERN_SYSVIPC_SHM_INFO:
1240 #ifdef SYSVSHM
1241 infosize = sizeof(shmsi->shminfo);
1242 nds = shminfo.shmmni;
1243 dssize = sizeof(shmsi->shmids[0]);
1244 break;
1245 #else
1246 return (EINVAL);
1247 #endif
1248 default:
1249 return (EINVAL);
1250 }
1251 /*
1252 * Round infosize to 64 bit boundary if requesting more than just
1253 * the info structure or getting the total data size.
1254 */
1255 if (where == NULL || *sizep > infosize)
1256 infosize = ((infosize + 7) / 8) * 8;
1257 tsize = infosize + nds * dssize;
1258
1259 /* Return just the total size required. */
1260 if (where == NULL) {
1261 *sizep = tsize;
1262 return (0);
1263 }
1264
1265 /* Not enough room for even the info struct. */
1266 if (buflen < infosize) {
1267 *sizep = 0;
1268 return (ENOMEM);
1269 }
1270 buf = malloc(min(tsize, buflen), M_TEMP, M_WAITOK);
1271 memset(buf, 0, min(tsize, buflen));
1272
1273 switch (*name) {
1274 #ifdef SYSVMSG
1275 case KERN_SYSVIPC_MSG_INFO:
1276 msgsi = (struct msg_sysctl_info *)buf;
1277 msgsi->msginfo = msginfo;
1278 break;
1279 #endif
1280 #ifdef SYSVSEM
1281 case KERN_SYSVIPC_SEM_INFO:
1282 semsi = (struct sem_sysctl_info *)buf;
1283 semsi->seminfo = seminfo;
1284 break;
1285 #endif
1286 #ifdef SYSVSHM
1287 case KERN_SYSVIPC_SHM_INFO:
1288 shmsi = (struct shm_sysctl_info *)buf;
1289 shmsi->shminfo = shminfo;
1290 break;
1291 #endif
1292 }
1293 buflen -= infosize;
1294
1295 ret = 0;
1296 if (buflen > 0) {
1297 /* Fill in the IPC data structures. */
1298 for (i = 0; i < nds; i++) {
1299 if (buflen < dssize) {
1300 ret = ENOMEM;
1301 break;
1302 }
1303 switch (*name) {
1304 #ifdef SYSVMSG
1305 case KERN_SYSVIPC_MSG_INFO:
1306 FILL_MSG(msqids[i], msgsi->msgids[i]);
1307 break;
1308 #endif
1309 #ifdef SYSVSEM
1310 case KERN_SYSVIPC_SEM_INFO:
1311 FILL_SEM(sema[i], semsi->semids[i]);
1312 break;
1313 #endif
1314 #ifdef SYSVSHM
1315 case KERN_SYSVIPC_SHM_INFO:
1316 FILL_SHM(shmsegs[i], shmsi->shmids[i]);
1317 break;
1318 #endif
1319 }
1320 buflen -= dssize;
1321 }
1322 }
1323 *sizep -= buflen;
1324 error = copyout(buf, start, *sizep);
1325 /* If copyout succeeded, use return code set earlier. */
1326 if (error == 0)
1327 error = ret;
1328 if (buf)
1329 free(buf, M_TEMP);
1330 return (error);
1331 }
1332 #endif /* SYSVMSG || SYSVSEM || SYSVSHM */
1333
1334 static int
1335 sysctl_msgbuf(void *vwhere, size_t *sizep)
1336 {
1337 char *where = vwhere;
1338 size_t len, maxlen = *sizep;
1339 long beg, end;
1340 int error;
1341
1342 /*
1343 * deal with cases where the message buffer has
1344 * become corrupted.
1345 */
1346 if (!msgbufenabled || msgbufp->msg_magic != MSG_MAGIC) {
1347 msgbufenabled = 0;
1348 return (ENXIO);
1349 }
1350
1351 if (where == NULL) {
1352 /* always return full buffer size */
1353 *sizep = msgbufp->msg_bufs;
1354 return (0);
1355 }
1356
1357 error = 0;
1358 maxlen = min(msgbufp->msg_bufs, maxlen);
1359
1360 /*
1361 * First, copy from the write pointer to the end of
1362 * message buffer.
1363 */
1364 beg = msgbufp->msg_bufx;
1365 end = msgbufp->msg_bufs;
1366 while (maxlen > 0) {
1367 len = min(end - beg, maxlen);
1368 if (len == 0)
1369 break;
1370 error = copyout(&msgbufp->msg_bufc[beg], where, len);
1371 if (error)
1372 break;
1373 where += len;
1374 maxlen -= len;
1375
1376 /*
1377 * ... then, copy from the beginning of message buffer to
1378 * the write pointer.
1379 */
1380 beg = 0;
1381 end = msgbufp->msg_bufx;
1382 }
1383 return (error);
1384 }
1385
1386 /*
1387 * try over estimating by 5 procs
1388 */
1389 #define KERN_PROCSLOP (5 * sizeof(struct kinfo_proc))
1390
1391 static int
1392 sysctl_doeproc(int *name, u_int namelen, void *vwhere, size_t *sizep)
1393 {
1394 struct eproc eproc;
1395 struct kinfo_proc2 kproc2;
1396 struct kinfo_proc *dp;
1397 struct proc *p;
1398 const struct proclist_desc *pd;
1399 char *where, *dp2;
1400 int type, op, arg, elem_size, elem_count;
1401 int buflen, needed, error;
1402
1403 dp = vwhere;
1404 dp2 = where = vwhere;
1405 buflen = where != NULL ? *sizep : 0;
1406 error = needed = 0;
1407 type = name[0];
1408
1409 if (type == KERN_PROC) {
1410 if (namelen != 3 && !(namelen == 2 && name[1] == KERN_PROC_ALL))
1411 return (EINVAL);
1412 op = name[1];
1413 if (op != KERN_PROC_ALL)
1414 arg = name[2];
1415 } else {
1416 if (namelen != 5)
1417 return (EINVAL);
1418 op = name[1];
1419 arg = name[2];
1420 elem_size = name[3];
1421 elem_count = name[4];
1422 }
1423
1424 proclist_lock_read();
1425
1426 pd = proclists;
1427 again:
1428 for (p = LIST_FIRST(pd->pd_list); p != NULL; p = LIST_NEXT(p, p_list)) {
1429 /*
1430 * Skip embryonic processes.
1431 */
1432 if (p->p_stat == SIDL)
1433 continue;
1434 /*
1435 * TODO - make more efficient (see notes below).
1436 * do by session.
1437 */
1438 switch (op) {
1439
1440 case KERN_PROC_PID:
1441 /* could do this with just a lookup */
1442 if (p->p_pid != (pid_t)arg)
1443 continue;
1444 break;
1445
1446 case KERN_PROC_PGRP:
1447 /* could do this by traversing pgrp */
1448 if (p->p_pgrp->pg_id != (pid_t)arg)
1449 continue;
1450 break;
1451
1452 case KERN_PROC_SESSION:
1453 if (p->p_session->s_sid != (pid_t)arg)
1454 continue;
1455 break;
1456
1457 case KERN_PROC_TTY:
1458 if (arg == KERN_PROC_TTY_REVOKE) {
1459 if ((p->p_flag & P_CONTROLT) == 0 ||
1460 p->p_session->s_ttyp == NULL ||
1461 p->p_session->s_ttyvp != NULL)
1462 continue;
1463 } else if ((p->p_flag & P_CONTROLT) == 0 ||
1464 p->p_session->s_ttyp == NULL) {
1465 if ((dev_t)arg != KERN_PROC_TTY_NODEV)
1466 continue;
1467 } else if (p->p_session->s_ttyp->t_dev != (dev_t)arg)
1468 continue;
1469 break;
1470
1471 case KERN_PROC_UID:
1472 if (p->p_ucred->cr_uid != (uid_t)arg)
1473 continue;
1474 break;
1475
1476 case KERN_PROC_RUID:
1477 if (p->p_cred->p_ruid != (uid_t)arg)
1478 continue;
1479 break;
1480
1481 case KERN_PROC_GID:
1482 if (p->p_ucred->cr_gid != (uid_t)arg)
1483 continue;
1484 break;
1485
1486 case KERN_PROC_RGID:
1487 if (p->p_cred->p_rgid != (uid_t)arg)
1488 continue;
1489 break;
1490
1491 case KERN_PROC_ALL:
1492 /* allow everything */
1493 break;
1494
1495 default:
1496 error = EINVAL;
1497 goto cleanup;
1498 }
1499 if (type == KERN_PROC) {
1500 if (buflen >= sizeof(struct kinfo_proc)) {
1501 fill_eproc(p, &eproc);
1502 error = copyout((caddr_t)p, &dp->kp_proc,
1503 sizeof(struct proc));
1504 if (error)
1505 goto cleanup;
1506 error = copyout((caddr_t)&eproc, &dp->kp_eproc,
1507 sizeof(eproc));
1508 if (error)
1509 goto cleanup;
1510 dp++;
1511 buflen -= sizeof(struct kinfo_proc);
1512 }
1513 needed += sizeof(struct kinfo_proc);
1514 } else { /* KERN_PROC2 */
1515 if (buflen >= elem_size && elem_count > 0) {
1516 fill_kproc2(p, &kproc2);
1517 /*
1518 * Copy out elem_size, but not larger than
1519 * the size of a struct kinfo_proc2.
1520 */
1521 error = copyout(&kproc2, dp2,
1522 min(sizeof(kproc2), elem_size));
1523 if (error)
1524 goto cleanup;
1525 dp2 += elem_size;
1526 buflen -= elem_size;
1527 elem_count--;
1528 }
1529 needed += elem_size;
1530 }
1531 }
1532 pd++;
1533 if (pd->pd_list != NULL)
1534 goto again;
1535 proclist_unlock_read();
1536
1537 if (where != NULL) {
1538 if (type == KERN_PROC)
1539 *sizep = (caddr_t)dp - where;
1540 else
1541 *sizep = dp2 - where;
1542 if (needed > *sizep)
1543 return (ENOMEM);
1544 } else {
1545 needed += KERN_PROCSLOP;
1546 *sizep = needed;
1547 }
1548 return (0);
1549 cleanup:
1550 proclist_unlock_read();
1551 return (error);
1552 }
1553
1554
1555 /*
1556 * try over estimating by 5 LWPs
1557 */
1558 #define KERN_LWPSLOP (5 * sizeof(struct kinfo_lwp))
1559
1560 static int
1561 sysctl_dolwp(int *name, u_int namelen, void *vwhere, size_t *sizep)
1562 {
1563 struct kinfo_lwp klwp;
1564 struct proc *p;
1565 struct lwp *l;
1566 char *where, *dp;
1567 int type, pid, elem_size, elem_count;
1568 int buflen, needed, error;
1569
1570 dp = where = vwhere;
1571 buflen = where != NULL ? *sizep : 0;
1572 error = needed = 0;
1573 type = name[0];
1574
1575 if (namelen != 4)
1576 return (EINVAL);
1577 pid = name[1];
1578 elem_size = name[2];
1579 elem_count = name[3];
1580
1581 p = pfind(pid);
1582 if (p == NULL)
1583 return (ESRCH);
1584 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1585 if (buflen >= elem_size && elem_count > 0) {
1586 fill_lwp(l, &klwp);
1587 /*
1588 * Copy out elem_size, but not larger than
1589 * the size of a struct kinfo_proc2.
1590 */
1591 error = copyout(&klwp, dp,
1592 min(sizeof(klwp), elem_size));
1593 if (error)
1594 goto cleanup;
1595 dp += elem_size;
1596 buflen -= elem_size;
1597 elem_count--;
1598 }
1599 needed += elem_size;
1600 }
1601
1602 if (where != NULL) {
1603 *sizep = dp - where;
1604 if (needed > *sizep)
1605 return (ENOMEM);
1606 } else {
1607 needed += KERN_PROCSLOP;
1608 *sizep = needed;
1609 }
1610 return (0);
1611 cleanup:
1612 return (error);
1613 }
1614
1615 /*
1616 * Fill in an eproc structure for the specified process.
1617 */
1618 void
1619 fill_eproc(struct proc *p, struct eproc *ep)
1620 {
1621 struct tty *tp;
1622 struct lwp *l;
1623
1624 ep->e_paddr = p;
1625 ep->e_sess = p->p_session;
1626 ep->e_pcred = *p->p_cred;
1627 ep->e_ucred = *p->p_ucred;
1628 if (p->p_stat == SIDL || P_ZOMBIE(p)) {
1629 ep->e_vm.vm_rssize = 0;
1630 ep->e_vm.vm_tsize = 0;
1631 ep->e_vm.vm_dsize = 0;
1632 ep->e_vm.vm_ssize = 0;
1633 /* ep->e_vm.vm_pmap = XXX; */
1634 } else {
1635 struct vmspace *vm = p->p_vmspace;
1636
1637 ep->e_vm.vm_rssize = vm_resident_count(vm);
1638 ep->e_vm.vm_tsize = vm->vm_tsize;
1639 ep->e_vm.vm_dsize = vm->vm_dsize;
1640 ep->e_vm.vm_ssize = vm->vm_ssize;
1641
1642 /* Pick a "representative" LWP */
1643 l = proc_representative_lwp(p);
1644
1645 if (l->l_wmesg)
1646 strncpy(ep->e_wmesg, l->l_wmesg, WMESGLEN);
1647 }
1648 if (p->p_pptr)
1649 ep->e_ppid = p->p_pptr->p_pid;
1650 else
1651 ep->e_ppid = 0;
1652 ep->e_pgid = p->p_pgrp->pg_id;
1653 ep->e_sid = ep->e_sess->s_sid;
1654 ep->e_jobc = p->p_pgrp->pg_jobc;
1655 if ((p->p_flag & P_CONTROLT) &&
1656 (tp = ep->e_sess->s_ttyp)) {
1657 ep->e_tdev = tp->t_dev;
1658 ep->e_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1659 ep->e_tsess = tp->t_session;
1660 } else
1661 ep->e_tdev = NODEV;
1662
1663 ep->e_xsize = ep->e_xrssize = 0;
1664 ep->e_xccount = ep->e_xswrss = 0;
1665 ep->e_flag = ep->e_sess->s_ttyvp ? EPROC_CTTY : 0;
1666 if (SESS_LEADER(p))
1667 ep->e_flag |= EPROC_SLEADER;
1668 strncpy(ep->e_login, ep->e_sess->s_login, MAXLOGNAME);
1669 }
1670
1671 /*
1672 * Fill in an eproc structure for the specified process.
1673 */
1674 static void
1675 fill_kproc2(struct proc *p, struct kinfo_proc2 *ki)
1676 {
1677 struct tty *tp;
1678 struct lwp *l;
1679 memset(ki, 0, sizeof(*ki));
1680
1681 ki->p_paddr = PTRTOINT64(p);
1682 ki->p_fd = PTRTOINT64(p->p_fd);
1683 ki->p_cwdi = PTRTOINT64(p->p_cwdi);
1684 ki->p_stats = PTRTOINT64(p->p_stats);
1685 ki->p_limit = PTRTOINT64(p->p_limit);
1686 ki->p_vmspace = PTRTOINT64(p->p_vmspace);
1687 ki->p_sigacts = PTRTOINT64(p->p_sigacts);
1688 ki->p_sess = PTRTOINT64(p->p_session);
1689 ki->p_tsess = 0; /* may be changed if controlling tty below */
1690 ki->p_ru = PTRTOINT64(p->p_ru);
1691
1692 ki->p_eflag = 0;
1693 ki->p_exitsig = p->p_exitsig;
1694 ki->p_flag = p->p_flag;
1695
1696 ki->p_pid = p->p_pid;
1697 if (p->p_pptr)
1698 ki->p_ppid = p->p_pptr->p_pid;
1699 else
1700 ki->p_ppid = 0;
1701 ki->p_sid = p->p_session->s_sid;
1702 ki->p__pgid = p->p_pgrp->pg_id;
1703
1704 ki->p_tpgid = NO_PID; /* may be changed if controlling tty below */
1705
1706 ki->p_uid = p->p_ucred->cr_uid;
1707 ki->p_ruid = p->p_cred->p_ruid;
1708 ki->p_gid = p->p_ucred->cr_gid;
1709 ki->p_rgid = p->p_cred->p_rgid;
1710
1711 memcpy(ki->p_groups, p->p_cred->pc_ucred->cr_groups,
1712 min(sizeof(ki->p_groups), sizeof(p->p_cred->pc_ucred->cr_groups)));
1713 ki->p_ngroups = p->p_cred->pc_ucred->cr_ngroups;
1714
1715 ki->p_jobc = p->p_pgrp->pg_jobc;
1716 if ((p->p_flag & P_CONTROLT) && (tp = p->p_session->s_ttyp)) {
1717 ki->p_tdev = tp->t_dev;
1718 ki->p_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID;
1719 ki->p_tsess = PTRTOINT64(tp->t_session);
1720 } else {
1721 ki->p_tdev = NODEV;
1722 }
1723
1724 ki->p_estcpu = p->p_estcpu;
1725 ki->p_rtime_sec = p->p_rtime.tv_sec;
1726 ki->p_rtime_usec = p->p_rtime.tv_usec;
1727 ki->p_cpticks = p->p_cpticks;
1728 ki->p_pctcpu = p->p_pctcpu;
1729
1730 ki->p_uticks = p->p_uticks;
1731 ki->p_sticks = p->p_sticks;
1732 ki->p_iticks = p->p_iticks;
1733
1734 ki->p_tracep = PTRTOINT64(p->p_tracep);
1735 ki->p_traceflag = p->p_traceflag;
1736
1737
1738 memcpy(&ki->p_siglist, &p->p_sigctx.ps_siglist, sizeof(ki_sigset_t));
1739 memcpy(&ki->p_sigmask, &p->p_sigctx.ps_sigmask, sizeof(ki_sigset_t));
1740 memcpy(&ki->p_sigignore, &p->p_sigctx.ps_sigignore,sizeof(ki_sigset_t));
1741 memcpy(&ki->p_sigcatch, &p->p_sigctx.ps_sigcatch, sizeof(ki_sigset_t));
1742
1743 ki->p_stat = p->p_stat; /* Will likely be overridden by LWP status */
1744 ki->p_realstat = p->p_stat;
1745 ki->p_nice = p->p_nice;
1746
1747 ki->p_xstat = p->p_xstat;
1748 ki->p_acflag = p->p_acflag;
1749
1750 strncpy(ki->p_comm, p->p_comm,
1751 min(sizeof(ki->p_comm), sizeof(p->p_comm)));
1752
1753 strncpy(ki->p_login, p->p_session->s_login, sizeof(ki->p_login));
1754
1755 ki->p_nlwps = p->p_nlwps;
1756 ki->p_nrlwps = p->p_nrlwps;
1757 ki->p_realflag = p->p_flag;
1758
1759 if (p->p_stat == SIDL || P_ZOMBIE(p)) {
1760 ki->p_vm_rssize = 0;
1761 ki->p_vm_tsize = 0;
1762 ki->p_vm_dsize = 0;
1763 ki->p_vm_ssize = 0;
1764 l = NULL;
1765 } else {
1766 struct vmspace *vm = p->p_vmspace;
1767
1768 ki->p_vm_rssize = vm_resident_count(vm);
1769 ki->p_vm_tsize = vm->vm_tsize;
1770 ki->p_vm_dsize = vm->vm_dsize;
1771 ki->p_vm_ssize = vm->vm_ssize;
1772
1773 /* Pick a "representative" LWP */
1774 l = proc_representative_lwp(p);
1775 ki->p_forw = PTRTOINT64(l->l_forw);
1776 ki->p_back = PTRTOINT64(l->l_back);
1777 ki->p_addr = PTRTOINT64(l->l_addr);
1778 ki->p_stat = l->l_stat;
1779 ki->p_flag |= l->l_flag;
1780 ki->p_swtime = l->l_swtime;
1781 ki->p_slptime = l->l_slptime;
1782 if (l->l_stat == LSONPROC) {
1783 KDASSERT(l->l_cpu != NULL);
1784 ki->p_schedflags = l->l_cpu->ci_schedstate.spc_flags;
1785 } else
1786 ki->p_schedflags = 0;
1787 ki->p_holdcnt = l->l_holdcnt;
1788 ki->p_priority = l->l_priority;
1789 ki->p_usrpri = l->l_usrpri;
1790 if (l->l_wmesg)
1791 strncpy(ki->p_wmesg, l->l_wmesg, sizeof(ki->p_wmesg));
1792 ki->p_wchan = PTRTOINT64(l->l_wchan);
1793
1794 }
1795
1796 if (p->p_session->s_ttyvp)
1797 ki->p_eflag |= EPROC_CTTY;
1798 if (SESS_LEADER(p))
1799 ki->p_eflag |= EPROC_SLEADER;
1800
1801 /* XXX Is this double check necessary? */
1802 if (P_ZOMBIE(p)) {
1803 ki->p_uvalid = 0;
1804 } else {
1805 ki->p_uvalid = 1;
1806
1807 ki->p_ustart_sec = p->p_stats->p_start.tv_sec;
1808 ki->p_ustart_usec = p->p_stats->p_start.tv_usec;
1809
1810 ki->p_uutime_sec = p->p_stats->p_ru.ru_utime.tv_sec;
1811 ki->p_uutime_usec = p->p_stats->p_ru.ru_utime.tv_usec;
1812 ki->p_ustime_sec = p->p_stats->p_ru.ru_stime.tv_sec;
1813 ki->p_ustime_usec = p->p_stats->p_ru.ru_stime.tv_usec;
1814
1815 ki->p_uru_maxrss = p->p_stats->p_ru.ru_maxrss;
1816 ki->p_uru_ixrss = p->p_stats->p_ru.ru_ixrss;
1817 ki->p_uru_idrss = p->p_stats->p_ru.ru_idrss;
1818 ki->p_uru_isrss = p->p_stats->p_ru.ru_isrss;
1819 ki->p_uru_minflt = p->p_stats->p_ru.ru_minflt;
1820 ki->p_uru_majflt = p->p_stats->p_ru.ru_majflt;
1821 ki->p_uru_nswap = p->p_stats->p_ru.ru_nswap;
1822 ki->p_uru_inblock = p->p_stats->p_ru.ru_inblock;
1823 ki->p_uru_oublock = p->p_stats->p_ru.ru_oublock;
1824 ki->p_uru_msgsnd = p->p_stats->p_ru.ru_msgsnd;
1825 ki->p_uru_msgrcv = p->p_stats->p_ru.ru_msgrcv;
1826 ki->p_uru_nsignals = p->p_stats->p_ru.ru_nsignals;
1827 ki->p_uru_nvcsw = p->p_stats->p_ru.ru_nvcsw;
1828 ki->p_uru_nivcsw = p->p_stats->p_ru.ru_nivcsw;
1829
1830 ki->p_uctime_sec = p->p_stats->p_cru.ru_utime.tv_sec +
1831 p->p_stats->p_cru.ru_stime.tv_sec;
1832 ki->p_uctime_usec = p->p_stats->p_cru.ru_utime.tv_usec +
1833 p->p_stats->p_cru.ru_stime.tv_usec;
1834 }
1835 #ifdef MULTIPROCESSOR
1836 if (l && l->l_cpu != NULL)
1837 ki->p_cpuid = l->l_cpu->ci_cpuid;
1838 else
1839 #endif
1840 ki->p_cpuid = KI_NOCPU;
1841
1842 }
1843
1844 /*
1845 * Fill in a kinfo_lwp structure for the specified lwp.
1846 */
1847 static void
1848 fill_lwp(struct lwp *l, struct kinfo_lwp *kl)
1849 {
1850 kl->l_forw = PTRTOINT64(l->l_forw);
1851 kl->l_back = PTRTOINT64(l->l_back);
1852 kl->l_laddr = PTRTOINT64(l);
1853 kl->l_addr = PTRTOINT64(l->l_addr);
1854 kl->l_stat = l->l_stat;
1855 kl->l_lid = l->l_lid;
1856 kl->l_flag = l->l_flag;
1857
1858 kl->l_swtime = l->l_swtime;
1859 kl->l_slptime = l->l_slptime;
1860 if (l->l_stat == LSONPROC) {
1861 KDASSERT(l->l_cpu != NULL);
1862 kl->l_schedflags = l->l_cpu->ci_schedstate.spc_flags;
1863 } else
1864 kl->l_schedflags = 0;
1865 kl->l_holdcnt = l->l_holdcnt;
1866 kl->l_priority = l->l_priority;
1867 kl->l_usrpri = l->l_usrpri;
1868 if (l->l_wmesg)
1869 strncpy(kl->l_wmesg, l->l_wmesg, sizeof(kl->l_wmesg));
1870 kl->l_wchan = PTRTOINT64(l->l_wchan);
1871 #ifdef MULTIPROCESSOR
1872 if (l->l_cpu != NULL)
1873 kl->l_cpuid = l->l_cpu->ci_cpuid;
1874 else
1875 #endif
1876 kl->l_cpuid = KI_NOCPU;
1877 }
1878
1879 int
1880 sysctl_procargs(int *name, u_int namelen, void *where, size_t *sizep,
1881 struct proc *up)
1882 {
1883 struct ps_strings pss;
1884 struct proc *p;
1885 size_t len, upper_bound, xlen;
1886 struct uio auio;
1887 struct iovec aiov;
1888 vaddr_t argv;
1889 pid_t pid;
1890 int nargv, type, error, i;
1891 char *arg;
1892 char *tmp;
1893
1894 if (namelen != 2)
1895 return (EINVAL);
1896 pid = name[0];
1897 type = name[1];
1898
1899 switch (type) {
1900 case KERN_PROC_ARGV:
1901 case KERN_PROC_NARGV:
1902 case KERN_PROC_ENV:
1903 case KERN_PROC_NENV:
1904 /* ok */
1905 break;
1906 default:
1907 return (EINVAL);
1908 }
1909
1910 /* check pid */
1911 if ((p = pfind(pid)) == NULL)
1912 return (EINVAL);
1913
1914 /* only root or same user change look at the environment */
1915 if (type == KERN_PROC_ENV || type == KERN_PROC_NENV) {
1916 if (up->p_ucred->cr_uid != 0) {
1917 if (up->p_cred->p_ruid != p->p_cred->p_ruid ||
1918 up->p_cred->p_ruid != p->p_cred->p_svuid)
1919 return (EPERM);
1920 }
1921 }
1922
1923 if (sizep != NULL && where == NULL) {
1924 if (type == KERN_PROC_NARGV || type == KERN_PROC_NENV)
1925 *sizep = sizeof (int);
1926 else
1927 *sizep = ARG_MAX; /* XXX XXX XXX */
1928 return (0);
1929 }
1930 if (where == NULL || sizep == NULL)
1931 return (EINVAL);
1932
1933 /*
1934 * Zombies don't have a stack, so we can't read their psstrings.
1935 * System processes also don't have a user stack.
1936 */
1937 if (P_ZOMBIE(p) || (p->p_flag & P_SYSTEM) != 0)
1938 return (EINVAL);
1939
1940 /*
1941 * Lock the process down in memory.
1942 */
1943 /* XXXCDC: how should locking work here? */
1944 if ((p->p_flag & P_WEXIT) || (p->p_vmspace->vm_refcnt < 1))
1945 return (EFAULT);
1946
1947 p->p_vmspace->vm_refcnt++; /* XXX */
1948
1949 /*
1950 * Allocate a temporary buffer to hold the arguments.
1951 */
1952 arg = malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
1953
1954 /*
1955 * Read in the ps_strings structure.
1956 */
1957 aiov.iov_base = &pss;
1958 aiov.iov_len = sizeof(pss);
1959 auio.uio_iov = &aiov;
1960 auio.uio_iovcnt = 1;
1961 auio.uio_offset = (vaddr_t)p->p_psstr;
1962 auio.uio_resid = sizeof(pss);
1963 auio.uio_segflg = UIO_SYSSPACE;
1964 auio.uio_rw = UIO_READ;
1965 auio.uio_procp = NULL;
1966 error = uvm_io(&p->p_vmspace->vm_map, &auio);
1967 if (error)
1968 goto done;
1969
1970 if (type == KERN_PROC_ARGV || type == KERN_PROC_NARGV)
1971 memcpy(&nargv, (char *)&pss + p->p_psnargv, sizeof(nargv));
1972 else
1973 memcpy(&nargv, (char *)&pss + p->p_psnenv, sizeof(nargv));
1974 if (type == KERN_PROC_NARGV || type == KERN_PROC_NENV) {
1975 error = copyout(&nargv, where, sizeof(nargv));
1976 *sizep = sizeof(nargv);
1977 goto done;
1978 }
1979 /*
1980 * Now read the address of the argument vector.
1981 */
1982 switch (type) {
1983 case KERN_PROC_ARGV:
1984 /* XXX compat32 stuff here */
1985 memcpy(&tmp, (char *)&pss + p->p_psargv, sizeof(tmp));
1986 break;
1987 case KERN_PROC_ENV:
1988 memcpy(&tmp, (char *)&pss + p->p_psenv, sizeof(tmp));
1989 break;
1990 default:
1991 return (EINVAL);
1992 }
1993 auio.uio_offset = (off_t)(long)tmp;
1994 aiov.iov_base = &argv;
1995 aiov.iov_len = sizeof(argv);
1996 auio.uio_iov = &aiov;
1997 auio.uio_iovcnt = 1;
1998 auio.uio_resid = sizeof(argv);
1999 auio.uio_segflg = UIO_SYSSPACE;
2000 auio.uio_rw = UIO_READ;
2001 auio.uio_procp = NULL;
2002 error = uvm_io(&p->p_vmspace->vm_map, &auio);
2003 if (error)
2004 goto done;
2005
2006 /*
2007 * Now copy in the actual argument vector, one page at a time,
2008 * since we don't know how long the vector is (though, we do
2009 * know how many NUL-terminated strings are in the vector).
2010 */
2011 len = 0;
2012 upper_bound = *sizep;
2013 for (; nargv != 0 && len < upper_bound; len += xlen) {
2014 aiov.iov_base = arg;
2015 aiov.iov_len = PAGE_SIZE;
2016 auio.uio_iov = &aiov;
2017 auio.uio_iovcnt = 1;
2018 auio.uio_offset = argv + len;
2019 xlen = PAGE_SIZE - ((argv + len) & PAGE_MASK);
2020 auio.uio_resid = xlen;
2021 auio.uio_segflg = UIO_SYSSPACE;
2022 auio.uio_rw = UIO_READ;
2023 auio.uio_procp = NULL;
2024 error = uvm_io(&p->p_vmspace->vm_map, &auio);
2025 if (error)
2026 goto done;
2027
2028 for (i = 0; i < xlen && nargv != 0; i++) {
2029 if (arg[i] == '\0')
2030 nargv--; /* one full string */
2031 }
2032
2033 /* make sure we don't copyout past the end of the user's buffer */
2034 if (len + i > upper_bound)
2035 i = upper_bound - len;
2036
2037 error = copyout(arg, (char *)where + len, i);
2038 if (error)
2039 break;
2040
2041 if (nargv == 0) {
2042 len += i;
2043 break;
2044 }
2045 }
2046 *sizep = len;
2047
2048 done:
2049 uvmspace_free(p->p_vmspace);
2050
2051 free(arg, M_TEMP);
2052 return (error);
2053 }
2054
2055 #if NPTY > 0
2056 int pty_maxptys(int, int); /* defined in kern/tty_pty.c */
2057
2058 /*
2059 * Validate parameters and get old / set new parameters
2060 * for pty sysctl function.
2061 */
2062 static int
2063 sysctl_pty(void *oldp, size_t *oldlenp, void *newp, size_t newlen)
2064 {
2065 int error = 0;
2066 int oldmax = 0, newmax = 0;
2067
2068 /* get current value of maxptys */
2069 oldmax = pty_maxptys(0, 0);
2070
2071 SYSCTL_SCALAR_CORE_TYP(oldp, oldlenp, &oldmax, int)
2072
2073 if (!error && newp) {
2074 SYSCTL_SCALAR_NEWPCHECK_TYP(newp, newlen, int)
2075 SYSCTL_SCALAR_NEWPCOP_TYP(newp, &newmax, int)
2076
2077 if (newmax != pty_maxptys(newmax, (newp != NULL)))
2078 return (EINVAL);
2079
2080 }
2081
2082 return (error);
2083 }
2084 #endif /* NPTY > 0 */
2085
2086 static int
2087 sysctl_dotkstat(name, namelen, where, sizep, newp)
2088 int *name;
2089 u_int namelen;
2090 void *where;
2091 size_t *sizep;
2092 void *newp;
2093 {
2094 /* all sysctl names at this level are terminal */
2095 if (namelen != 1)
2096 return (ENOTDIR); /* overloaded */
2097
2098 switch (name[0]) {
2099 case KERN_TKSTAT_NIN:
2100 return (sysctl_rdquad(where, sizep, newp, tk_nin));
2101 case KERN_TKSTAT_NOUT:
2102 return (sysctl_rdquad(where, sizep, newp, tk_nout));
2103 case KERN_TKSTAT_CANCC:
2104 return (sysctl_rdquad(where, sizep, newp, tk_cancc));
2105 case KERN_TKSTAT_RAWCC:
2106 return (sysctl_rdquad(where, sizep, newp, tk_rawcc));
2107 default:
2108 return (EOPNOTSUPP);
2109 }
2110 }
2111