kern_resource.c revision 1.103.4.5 1 /* $NetBSD: kern_resource.c,v 1.103.4.5 2006/11/18 21:39:22 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1982, 1986, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_resource.c 8.8 (Berkeley) 2/14/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.103.4.5 2006/11/18 21:39:22 ad Exp $");
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/file.h>
46 #include <sys/resourcevar.h>
47 #include <sys/malloc.h>
48 #include <sys/namei.h>
49 #include <sys/pool.h>
50 #include <sys/proc.h>
51 #include <sys/sysctl.h>
52 #include <sys/kauth.h>
53
54 #include <sys/mount.h>
55 #include <sys/sa.h>
56 #include <sys/syscallargs.h>
57
58 #include <uvm/uvm_extern.h>
59
60 /*
61 * Maximum process data and stack limits.
62 * They are variables so they are patchable.
63 */
64 rlim_t maxdmap = MAXDSIZ;
65 rlim_t maxsmap = MAXSSIZ;
66
67 struct uihashhead *uihashtbl;
68 u_long uihash; /* size of hash table - 1 */
69 struct simplelock uihashtbl_slock = SIMPLELOCK_INITIALIZER;
70
71
72 /*
73 * Resource controls and accounting.
74 */
75
76 int
77 sys_getpriority(struct lwp *l, void *v, register_t *retval)
78 {
79 struct sys_getpriority_args /* {
80 syscallarg(int) which;
81 syscallarg(id_t) who;
82 } */ *uap = v;
83 struct proc *curp = l->l_proc, *p;
84 int low = NZERO + PRIO_MAX + 1;
85 int who = SCARG(uap, who);
86
87 switch (SCARG(uap, which)) {
88
89 case PRIO_PROCESS:
90 if (who == 0)
91 p = curp;
92 else
93 p = p_find(who, 0);
94 if (p != NULL)
95 low = p->p_nice;
96 if (who != 0)
97 rw_exit(&proclist_lock);
98 break;
99
100 case PRIO_PGRP: {
101 struct pgrp *pg;
102
103 rw_enter(&proclist_lock, RW_READER);
104 if (who == 0)
105 pg = curp->p_pgrp;
106 else if ((pg = pg_find(who, PFIND_LOCKED | PFIND_UNLOCK_FAIL))
107 == NULL)
108 break;
109 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
110 if (p->p_nice < low)
111 low = p->p_nice;
112 }
113 rw_exit(&proclist_lock);
114 break;
115 }
116
117 case PRIO_USER:
118 if (who == 0)
119 who = (int)kauth_cred_geteuid(l->l_cred);
120 rw_enter(&proclist_lock, RW_READER);
121 PROCLIST_FOREACH(p, &allproc) {
122 mutex_enter(&p->p_mutex);
123 if (kauth_cred_geteuid(p->p_cred) ==
124 (uid_t)who && p->p_nice < low)
125 low = p->p_nice;
126 mutex_exit(&p->p_mutex);
127 }
128 rw_exit(&proclist_lock);
129 break;
130
131 default:
132 return (EINVAL);
133 }
134 if (low == NZERO + PRIO_MAX + 1)
135 return (ESRCH);
136 *retval = low - NZERO;
137 return (0);
138 }
139
140 /* ARGSUSED */
141 int
142 sys_setpriority(struct lwp *l, void *v, register_t *retval)
143 {
144 struct sys_setpriority_args /* {
145 syscallarg(int) which;
146 syscallarg(id_t) who;
147 syscallarg(int) prio;
148 } */ *uap = v;
149 struct proc *curp = l->l_proc, *p;
150 int found = 0, error = 0;
151 int who = SCARG(uap, who);
152
153 switch (SCARG(uap, which)) {
154
155 case PRIO_PROCESS:
156 if (who == 0)
157 p = curp;
158 else
159 p = p_find(who, 0);
160 if (p != 0) {
161 mutex_enter(&p->p_mutex);
162 error = donice(l, p, SCARG(uap, prio));
163 mutex_exit(&p->p_mutex);
164 }
165 if (who != 0)
166 rw_exit(&proclist_lock);
167 found++;
168 break;
169
170 case PRIO_PGRP: {
171 struct pgrp *pg;
172
173 rw_enter(&proclist_lock, RW_READER);
174 if (who == 0)
175 pg = curp->p_pgrp;
176 else if ((pg = pg_find(who, PFIND_LOCKED | PFIND_UNLOCK_FAIL)) == NULL)
177 break;
178 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
179 mutex_enter(&p->p_mutex);
180 error = donice(l, p, SCARG(uap, prio));
181 mutex_exit(&p->p_mutex);
182 found++;
183 }
184 rw_exit(&proclist_lock);
185 break;
186 }
187
188 case PRIO_USER:
189 if (who == 0)
190 who = (int)kauth_cred_geteuid(l->l_cred);
191 rw_enter(&proclist_lock, RW_READER);
192 PROCLIST_FOREACH(p, &allproc) {
193 mutex_enter(&p->p_mutex);
194 if (kauth_cred_geteuid(p->p_cred) ==
195 (uid_t)SCARG(uap, who)) {
196 error = donice(l, p, SCARG(uap, prio));
197 found++;
198 }
199 mutex_exit(&p->p_mutex);
200 }
201 rw_exit(&proclist_lock);
202 break;
203
204 default:
205 return (EINVAL);
206 }
207 if (found == 0)
208 return (ESRCH);
209 return (error);
210 }
211
212 /*
213 * Renice a process.
214 *
215 * Call with the target process' credentials locked.
216 */
217 int
218 donice(struct lwp *l, struct proc *chgp, int n)
219 {
220 kauth_cred_t cred = l->l_cred;
221 int onice;
222
223 LOCK_ASSERT(mutex_owned(&chgp->p_mutex));
224
225 if (kauth_cred_geteuid(cred) && kauth_cred_getuid(cred) &&
226 kauth_cred_geteuid(cred) != kauth_cred_geteuid(chgp->p_cred) &&
227 kauth_cred_getuid(cred) != kauth_cred_geteuid(chgp->p_cred))
228 return (EPERM);
229 if (n > PRIO_MAX)
230 n = PRIO_MAX;
231 if (n < PRIO_MIN)
232 n = PRIO_MIN;
233 n += NZERO;
234
235 again:
236 if (n < (onice = chgp->p_nice) && kauth_authorize_process(cred,
237 KAUTH_PROCESS_RESOURCE, chgp, (void *)KAUTH_REQ_PROCESS_RESOURCE_NICE,
238 (void *)(u_long)n, NULL))
239 return (EACCES);
240 mutex_enter(&chgp->p_smutex);
241 if (onice != chgp->p_nice) {
242 mutex_exit(&chgp->p_smutex);
243 goto again;
244 }
245 chgp->p_nice = n;
246 (void)resetprocpriority(chgp);
247 mutex_exit(&chgp->p_smutex);
248 return (0);
249 }
250
251 /* ARGSUSED */
252 int
253 sys_setrlimit(struct lwp *l, void *v, register_t *retval)
254 {
255 struct sys_setrlimit_args /* {
256 syscallarg(int) which;
257 syscallarg(const struct rlimit *) rlp;
258 } */ *uap = v;
259 int which = SCARG(uap, which);
260 struct rlimit alim;
261 int error;
262
263 error = copyin(SCARG(uap, rlp), &alim, sizeof(struct rlimit));
264 if (error)
265 return (error);
266 return (dosetrlimit(l, l->l_proc, which, &alim));
267 }
268
269 int
270 dosetrlimit(struct lwp *l, struct proc *p, int which, struct rlimit *limp)
271 {
272 struct rlimit *alimp;
273 struct plimit *oldplim;
274 int error;
275
276 if ((u_int)which >= RLIM_NLIMITS)
277 return (EINVAL);
278
279 if (limp->rlim_cur < 0 || limp->rlim_max < 0)
280 return (EINVAL);
281
282 alimp = &p->p_rlimit[which];
283 /* if we don't change the value, no need to limcopy() */
284 if (limp->rlim_cur == alimp->rlim_cur &&
285 limp->rlim_max == alimp->rlim_max)
286 return 0;
287
288 if (limp->rlim_cur > limp->rlim_max) {
289 /*
290 * This is programming error. According to SUSv2, we should
291 * return error in this case.
292 */
293 return (EINVAL);
294 }
295 if (limp->rlim_max > alimp->rlim_max && (error =
296 kauth_authorize_process(l->l_cred, KAUTH_PROCESS_RESOURCE,
297 p, (void *)KAUTH_REQ_PROCESS_RESOURCE_RLIMIT, limp,
298 (void *)(u_long)which)))
299 return (error);
300
301 if (p->p_limit->p_refcnt > 1 &&
302 (p->p_limit->p_lflags & PL_SHAREMOD) == 0) {
303 p->p_limit = limcopy(oldplim = p->p_limit);
304 limfree(oldplim);
305 alimp = &p->p_rlimit[which];
306 }
307
308 switch (which) {
309
310 case RLIMIT_DATA:
311 if (limp->rlim_cur > maxdmap)
312 limp->rlim_cur = maxdmap;
313 if (limp->rlim_max > maxdmap)
314 limp->rlim_max = maxdmap;
315 break;
316
317 case RLIMIT_STACK:
318 if (limp->rlim_cur > maxsmap)
319 limp->rlim_cur = maxsmap;
320 if (limp->rlim_max > maxsmap)
321 limp->rlim_max = maxsmap;
322
323 /*
324 * Return EINVAL if the new stack size limit is lower than
325 * current usage. Otherwise, the process would get SIGSEGV the
326 * moment it would try to access anything on it's current stack.
327 * This conforms to SUSv2.
328 */
329 if (limp->rlim_cur < p->p_vmspace->vm_ssize * PAGE_SIZE
330 || limp->rlim_max < p->p_vmspace->vm_ssize * PAGE_SIZE)
331 return (EINVAL);
332
333 /*
334 * Stack is allocated to the max at exec time with
335 * only "rlim_cur" bytes accessible (In other words,
336 * allocates stack dividing two contiguous regions at
337 * "rlim_cur" bytes boundary).
338 *
339 * Since allocation is done in terms of page, roundup
340 * "rlim_cur" (otherwise, contiguous regions
341 * overlap). If stack limit is going up make more
342 * accessible, if going down make inaccessible.
343 */
344 limp->rlim_cur = round_page(limp->rlim_cur);
345 if (limp->rlim_cur != alimp->rlim_cur) {
346 vaddr_t addr;
347 vsize_t size;
348 vm_prot_t prot;
349
350 if (limp->rlim_cur > alimp->rlim_cur) {
351 prot = VM_PROT_READ | VM_PROT_WRITE;
352 size = limp->rlim_cur - alimp->rlim_cur;
353 addr = (vaddr_t)p->p_vmspace->vm_minsaddr -
354 limp->rlim_cur;
355 } else {
356 prot = VM_PROT_NONE;
357 size = alimp->rlim_cur - limp->rlim_cur;
358 addr = (vaddr_t)p->p_vmspace->vm_minsaddr -
359 alimp->rlim_cur;
360 }
361 (void) uvm_map_protect(&p->p_vmspace->vm_map,
362 addr, addr+size, prot, FALSE);
363 }
364 break;
365
366 case RLIMIT_NOFILE:
367 if (limp->rlim_cur > maxfiles)
368 limp->rlim_cur = maxfiles;
369 if (limp->rlim_max > maxfiles)
370 limp->rlim_max = maxfiles;
371 break;
372
373 case RLIMIT_NPROC:
374 if (limp->rlim_cur > maxproc)
375 limp->rlim_cur = maxproc;
376 if (limp->rlim_max > maxproc)
377 limp->rlim_max = maxproc;
378 break;
379 }
380 *alimp = *limp;
381 return (0);
382 }
383
384 /* ARGSUSED */
385 int
386 sys_getrlimit(struct lwp *l, void *v, register_t *retval)
387 {
388 struct sys_getrlimit_args /* {
389 syscallarg(int) which;
390 syscallarg(struct rlimit *) rlp;
391 } */ *uap = v;
392 struct proc *p = l->l_proc;
393 int which = SCARG(uap, which);
394
395 if ((u_int)which >= RLIM_NLIMITS)
396 return (EINVAL);
397 return (copyout(&p->p_rlimit[which], SCARG(uap, rlp),
398 sizeof(struct rlimit)));
399 }
400
401 /*
402 * Transform the running time and tick information in proc p into user,
403 * system, and interrupt time usage.
404 */
405 void
406 calcru(struct proc *p, struct timeval *up, struct timeval *sp,
407 struct timeval *ip, struct timeval *rp)
408 {
409 u_quad_t u, st, ut, it, tot;
410 unsigned long sec;
411 long usec;
412 int s;
413 struct timeval tv;
414 struct lwp *l;
415
416 LOCK_ASSERT(mutex_owned(&p->p_smutex));
417
418 s = splstatclock();
419 st = p->p_sticks;
420 ut = p->p_uticks;
421 it = p->p_iticks;
422 splx(s);
423
424 sec = 0;
425 usec = 0;
426 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
427 lwp_lock(l);
428 sec += l->l_rtime.tv_sec;
429 usec += l->l_rtime.tv_usec;
430 if (l->l_stat == LSONPROC) {
431 struct schedstate_percpu *spc;
432
433 KDASSERT(l->l_cpu != NULL);
434 spc = &l->l_cpu->ci_schedstate;
435
436 /*
437 * Adjust for the current time slice. This is
438 * actually fairly important since the error
439 * here is on the order of a time quantum,
440 * which is much greater than the sampling
441 * error.
442 */
443 microtime(&tv);
444 sec += tv.tv_sec - spc->spc_runtime.tv_sec;
445 usec += tv.tv_usec - spc->spc_runtime.tv_usec;
446 }
447 lwp_unlock(l);
448 }
449
450 tot = st + ut + it;
451 u = sec * 1000000ull + usec;
452
453 if (tot == 0) {
454 /* No ticks, so can't use to share time out, split 50-50 */
455 st = ut = u / 2;
456 } else {
457 st = (u * st) / tot;
458 ut = (u * ut) / tot;
459 }
460 if (sp != NULL) {
461 if (tot == 0) {
462 /* No ticks, so can't use to share time out, split 50-50 */
463 st = u / 2;
464 } else
465 st = (u * st) / tot;
466 sp->tv_sec = st / 1000000;
467 sp->tv_usec = st % 1000000;
468 }
469 if (up != NULL) {
470 if (tot == 0) {
471 /* No ticks, so can't use to share time out, split 50-50 */
472 ut = u / 2;
473 } else
474 ut = (u * ut) / tot;
475 up->tv_sec = ut / 1000000;
476 up->tv_usec = ut % 1000000;
477 }
478 if (ip != NULL) {
479 if (it != 0)
480 it = (u * it) / tot;
481 ip->tv_sec = it / 1000000;
482 ip->tv_usec = it % 1000000;
483 }
484 if (rp != NULL) {
485 rp->tv_sec = sec;
486 rp->tv_usec = usec;
487 }
488 }
489
490 /* ARGSUSED */
491 int
492 sys_getrusage(struct lwp *l, void *v, register_t *retval)
493 {
494 struct sys_getrusage_args /* {
495 syscallarg(int) who;
496 syscallarg(struct rusage *) rusage;
497 } */ *uap = v;
498 struct rusage *rup;
499 struct proc *p = l->l_proc;
500
501 switch (SCARG(uap, who)) {
502
503 case RUSAGE_SELF:
504 rup = &p->p_stats->p_ru;
505 mutex_enter(&p->p_smutex);
506 calcru(p, &rup->ru_utime, &rup->ru_stime, NULL, NULL);
507 mutex_exit(&p->p_smutex);
508 break;
509
510 case RUSAGE_CHILDREN:
511 rup = &p->p_stats->p_cru;
512 break;
513
514 default:
515 return (EINVAL);
516 }
517 return (copyout(rup, SCARG(uap, rusage), sizeof(struct rusage)));
518 }
519
520 void
521 ruadd(struct rusage *ru, struct rusage *ru2)
522 {
523 long *ip, *ip2;
524 int i;
525
526 timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime);
527 timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime);
528 if (ru->ru_maxrss < ru2->ru_maxrss)
529 ru->ru_maxrss = ru2->ru_maxrss;
530 ip = &ru->ru_first; ip2 = &ru2->ru_first;
531 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
532 *ip++ += *ip2++;
533 }
534
535 /*
536 * Make a copy of the plimit structure.
537 * We share these structures copy-on-write after fork,
538 * and copy when a limit is changed.
539 */
540 struct plimit *
541 limcopy(struct plimit *lim)
542 {
543 struct plimit *newlim;
544 size_t l = 0;
545
546 simple_lock(&lim->p_slock);
547 if (lim->pl_corename != defcorename)
548 l = strlen(lim->pl_corename) + 1;
549 simple_unlock(&lim->p_slock);
550
551 newlim = pool_get(&plimit_pool, PR_WAITOK);
552 simple_lock_init(&newlim->p_slock);
553 newlim->p_lflags = 0;
554 newlim->p_refcnt = 1;
555 newlim->pl_corename = (l != 0)
556 ? malloc(l, M_TEMP, M_WAITOK)
557 : defcorename;
558
559 simple_lock(&lim->p_slock);
560 memcpy(newlim->pl_rlimit, lim->pl_rlimit,
561 sizeof(struct rlimit) * RLIM_NLIMITS);
562
563 if (l != 0)
564 strlcpy(newlim->pl_corename, lim->pl_corename, l);
565 simple_unlock(&lim->p_slock);
566
567 return (newlim);
568 }
569
570 void
571 limfree(struct plimit *lim)
572 {
573 int n;
574
575 simple_lock(&lim->p_slock);
576 n = --lim->p_refcnt;
577 simple_unlock(&lim->p_slock);
578 if (n > 0)
579 return;
580 #ifdef DIAGNOSTIC
581 if (n < 0)
582 panic("limfree");
583 #endif
584 if (lim->pl_corename != defcorename)
585 free(lim->pl_corename, M_TEMP);
586 pool_put(&plimit_pool, lim);
587 }
588
589 struct pstats *
590 pstatscopy(struct pstats *ps)
591 {
592
593 struct pstats *newps;
594
595 newps = pool_get(&pstats_pool, PR_WAITOK);
596
597 memset(&newps->pstat_startzero, 0,
598 (unsigned) ((caddr_t)&newps->pstat_endzero -
599 (caddr_t)&newps->pstat_startzero));
600 memcpy(&newps->pstat_startcopy, &ps->pstat_startcopy,
601 ((caddr_t)&newps->pstat_endcopy -
602 (caddr_t)&newps->pstat_startcopy));
603
604 return (newps);
605
606 }
607
608 void
609 pstatsfree(struct pstats *ps)
610 {
611
612 pool_put(&pstats_pool, ps);
613 }
614
615 /*
616 * sysctl interface in five parts
617 */
618
619 /*
620 * a routine for sysctl proc subtree helpers that need to pick a valid
621 * process by pid.
622 */
623 static int
624 sysctl_proc_findproc(struct lwp *l, struct proc **p2, pid_t pid)
625 {
626 struct proc *ptmp;
627 int error = 0;
628
629 if (pid == PROC_CURPROC)
630 ptmp = l->l_proc;
631 else if ((ptmp = pfind(pid)) == NULL)
632 error = ESRCH;
633 else {
634 boolean_t isroot = kauth_authorize_generic(l->l_cred,
635 KAUTH_GENERIC_ISSUSER, NULL);
636 /*
637 * suid proc of ours or proc not ours
638 */
639 if (kauth_cred_getuid(l->l_cred) !=
640 kauth_cred_getuid(ptmp->p_cred) ||
641 kauth_cred_getuid(l->l_cred) !=
642 kauth_cred_getsvuid(ptmp->p_cred))
643 error = isroot ? 0 : EPERM;
644
645 /*
646 * sgid proc has sgid back to us temporarily
647 */
648 else if (kauth_cred_getgid(ptmp->p_cred) !=
649 kauth_cred_getsvgid(ptmp->p_cred))
650 error = isroot ? 0 : EPERM;
651
652 /*
653 * our rgid must be in target's group list (ie,
654 * sub-processes started by a sgid process)
655 */
656 else {
657 int ismember = 0;
658
659 if (kauth_cred_ismember_gid(l->l_cred,
660 kauth_cred_getgid(ptmp->p_cred), &ismember) != 0 ||
661 !ismember) {
662 error = isroot ? 0 : EPERM;
663 }
664 }
665 }
666
667 *p2 = ptmp;
668 return (error);
669 }
670
671 /*
672 * sysctl helper routine for setting a process's specific corefile
673 * name. picks the process based on the given pid and checks the
674 * correctness of the new value.
675 */
676 static int
677 sysctl_proc_corename(SYSCTLFN_ARGS)
678 {
679 struct proc *ptmp;
680 struct plimit *lim;
681 int error = 0, len;
682 char *cname;
683 char *tmp;
684 struct sysctlnode node;
685
686 /*
687 * is this all correct?
688 */
689 if (namelen != 0)
690 return (EINVAL);
691 if (name[-1] != PROC_PID_CORENAME)
692 return (EINVAL);
693
694 /*
695 * whom are we tweaking?
696 */
697 error = sysctl_proc_findproc(l, &ptmp, (pid_t)name[-2]);
698 if (error)
699 return (error);
700
701 cname = PNBUF_GET();
702 /*
703 * let them modify a temporary copy of the core name
704 */
705 node = *rnode;
706 strlcpy(cname, ptmp->p_limit->pl_corename, MAXPATHLEN);
707 node.sysctl_data = cname;
708 error = sysctl_lookup(SYSCTLFN_CALL(&node));
709
710 /*
711 * if that failed, or they have nothing new to say, or we've
712 * heard it before...
713 */
714 if (error || newp == NULL ||
715 strcmp(cname, ptmp->p_limit->pl_corename) == 0) {
716 goto done;
717 }
718
719 if (kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CORENAME,
720 l->l_proc, NULL, NULL, NULL) != 0)
721 return (EPERM);
722
723 /*
724 * no error yet and cname now has the new core name in it.
725 * let's see if it looks acceptable. it must be either "core"
726 * or end in ".core" or "/core".
727 */
728 len = strlen(cname);
729 if (len < 4) {
730 error = EINVAL;
731 } else if (strcmp(cname + len - 4, "core") != 0) {
732 error = EINVAL;
733 } else if (len > 4 && cname[len - 5] != '/' && cname[len - 5] != '.') {
734 error = EINVAL;
735 }
736 if (error != 0) {
737 goto done;
738 }
739
740 /*
741 * hmm...looks good. now...where do we put it?
742 */
743 tmp = malloc(len + 1, M_TEMP, M_WAITOK|M_CANFAIL);
744 if (tmp == NULL) {
745 error = ENOMEM;
746 goto done;
747 }
748 strlcpy(tmp, cname, len + 1);
749
750 lim = ptmp->p_limit;
751 if (lim->p_refcnt > 1 && (lim->p_lflags & PL_SHAREMOD) == 0) {
752 ptmp->p_limit = limcopy(lim);
753 limfree(lim);
754 lim = ptmp->p_limit;
755 }
756 if (lim->pl_corename != defcorename)
757 free(lim->pl_corename, M_TEMP);
758 lim->pl_corename = tmp;
759 done:
760 PNBUF_PUT(cname);
761 return error;
762 }
763
764 /*
765 * sysctl helper routine for checking/setting a process's stop flags,
766 * one for fork and one for exec.
767 */
768 static int
769 sysctl_proc_stop(SYSCTLFN_ARGS)
770 {
771 struct proc *ptmp;
772 int i, f, error = 0;
773 struct sysctlnode node;
774
775 if (namelen != 0)
776 return (EINVAL);
777
778 error = sysctl_proc_findproc(l, &ptmp, (pid_t)name[-2]);
779 if (error)
780 return (error);
781
782 switch (rnode->sysctl_num) {
783 case PROC_PID_STOPFORK:
784 f = PS_STOPFORK;
785 break;
786 case PROC_PID_STOPEXEC:
787 f = PS_STOPEXEC;
788 break;
789 case PROC_PID_STOPEXIT:
790 f = PS_STOPEXIT;
791 break;
792 default:
793 return (EINVAL);
794 }
795
796 i = (ptmp->p_flag & f) ? 1 : 0;
797 node = *rnode;
798 node.sysctl_data = &i;
799 error = sysctl_lookup(SYSCTLFN_CALL(&node));
800 if (error || newp == NULL)
801 return (error);
802
803 mutex_enter(&ptmp->p_smutex);
804 if (i)
805 ptmp->p_sflag |= f;
806 else
807 ptmp->p_sflag &= ~f;
808 mutex_exit(&ptmp->p_smutex);
809
810 return (0);
811 }
812
813 /*
814 * sysctl helper routine for a process's rlimits as exposed by sysctl.
815 */
816 static int
817 sysctl_proc_plimit(SYSCTLFN_ARGS)
818 {
819 struct proc *ptmp;
820 u_int limitno;
821 int which, error = 0;
822 struct rlimit alim;
823 struct sysctlnode node;
824
825 if (namelen != 0)
826 return (EINVAL);
827
828 which = name[-1];
829 if (which != PROC_PID_LIMIT_TYPE_SOFT &&
830 which != PROC_PID_LIMIT_TYPE_HARD)
831 return (EINVAL);
832
833 limitno = name[-2] - 1;
834 if (limitno >= RLIM_NLIMITS)
835 return (EINVAL);
836
837 if (name[-3] != PROC_PID_LIMIT)
838 return (EINVAL);
839
840 error = sysctl_proc_findproc(l, &ptmp, (pid_t)name[-4]);
841 if (error)
842 return (error);
843
844 node = *rnode;
845 memcpy(&alim, &ptmp->p_rlimit[limitno], sizeof(alim));
846 if (which == PROC_PID_LIMIT_TYPE_HARD)
847 node.sysctl_data = &alim.rlim_max;
848 else
849 node.sysctl_data = &alim.rlim_cur;
850
851 error = sysctl_lookup(SYSCTLFN_CALL(&node));
852 if (error || newp == NULL)
853 return (error);
854
855 return (dosetrlimit(l, ptmp, limitno, &alim));
856 }
857
858 /*
859 * and finally, the actually glue that sticks it to the tree
860 */
861 SYSCTL_SETUP(sysctl_proc_setup, "sysctl proc subtree setup")
862 {
863
864 sysctl_createv(clog, 0, NULL, NULL,
865 CTLFLAG_PERMANENT,
866 CTLTYPE_NODE, "proc", NULL,
867 NULL, 0, NULL, 0,
868 CTL_PROC, CTL_EOL);
869 sysctl_createv(clog, 0, NULL, NULL,
870 CTLFLAG_PERMANENT|CTLFLAG_ANYNUMBER,
871 CTLTYPE_NODE, "curproc",
872 SYSCTL_DESCR("Per-process settings"),
873 NULL, 0, NULL, 0,
874 CTL_PROC, PROC_CURPROC, CTL_EOL);
875
876 sysctl_createv(clog, 0, NULL, NULL,
877 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE,
878 CTLTYPE_STRING, "corename",
879 SYSCTL_DESCR("Core file name"),
880 sysctl_proc_corename, 0, NULL, MAXPATHLEN,
881 CTL_PROC, PROC_CURPROC, PROC_PID_CORENAME, CTL_EOL);
882 sysctl_createv(clog, 0, NULL, NULL,
883 CTLFLAG_PERMANENT,
884 CTLTYPE_NODE, "rlimit",
885 SYSCTL_DESCR("Process limits"),
886 NULL, 0, NULL, 0,
887 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, CTL_EOL);
888
889 #define create_proc_plimit(s, n) do { \
890 sysctl_createv(clog, 0, NULL, NULL, \
891 CTLFLAG_PERMANENT, \
892 CTLTYPE_NODE, s, \
893 SYSCTL_DESCR("Process " s " limits"), \
894 NULL, 0, NULL, 0, \
895 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \
896 CTL_EOL); \
897 sysctl_createv(clog, 0, NULL, NULL, \
898 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, \
899 CTLTYPE_QUAD, "soft", \
900 SYSCTL_DESCR("Process soft " s " limit"), \
901 sysctl_proc_plimit, 0, NULL, 0, \
902 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \
903 PROC_PID_LIMIT_TYPE_SOFT, CTL_EOL); \
904 sysctl_createv(clog, 0, NULL, NULL, \
905 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, \
906 CTLTYPE_QUAD, "hard", \
907 SYSCTL_DESCR("Process hard " s " limit"), \
908 sysctl_proc_plimit, 0, NULL, 0, \
909 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \
910 PROC_PID_LIMIT_TYPE_HARD, CTL_EOL); \
911 } while (0/*CONSTCOND*/)
912
913 create_proc_plimit("cputime", PROC_PID_LIMIT_CPU);
914 create_proc_plimit("filesize", PROC_PID_LIMIT_FSIZE);
915 create_proc_plimit("datasize", PROC_PID_LIMIT_DATA);
916 create_proc_plimit("stacksize", PROC_PID_LIMIT_STACK);
917 create_proc_plimit("coredumpsize", PROC_PID_LIMIT_CORE);
918 create_proc_plimit("memoryuse", PROC_PID_LIMIT_RSS);
919 create_proc_plimit("memorylocked", PROC_PID_LIMIT_MEMLOCK);
920 create_proc_plimit("maxproc", PROC_PID_LIMIT_NPROC);
921 create_proc_plimit("descriptors", PROC_PID_LIMIT_NOFILE);
922 create_proc_plimit("sbsize", PROC_PID_LIMIT_SBSIZE);
923
924 #undef create_proc_plimit
925
926 sysctl_createv(clog, 0, NULL, NULL,
927 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE,
928 CTLTYPE_INT, "stopfork",
929 SYSCTL_DESCR("Stop process at fork(2)"),
930 sysctl_proc_stop, 0, NULL, 0,
931 CTL_PROC, PROC_CURPROC, PROC_PID_STOPFORK, CTL_EOL);
932 sysctl_createv(clog, 0, NULL, NULL,
933 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE,
934 CTLTYPE_INT, "stopexec",
935 SYSCTL_DESCR("Stop process at execve(2)"),
936 sysctl_proc_stop, 0, NULL, 0,
937 CTL_PROC, PROC_CURPROC, PROC_PID_STOPEXEC, CTL_EOL);
938 sysctl_createv(clog, 0, NULL, NULL,
939 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE,
940 CTLTYPE_INT, "stopexit",
941 SYSCTL_DESCR("Stop process before completing exit"),
942 sysctl_proc_stop, 0, NULL, 0,
943 CTL_PROC, PROC_CURPROC, PROC_PID_STOPEXIT, CTL_EOL);
944 }
945
946 struct uidinfo *
947 uid_find(uid_t uid)
948 {
949 struct uidinfo *uip;
950 struct uidinfo *newuip = NULL;
951 struct uihashhead *uipp;
952
953 uipp = UIHASH(uid);
954
955 again:
956 simple_lock(&uihashtbl_slock);
957 LIST_FOREACH(uip, uipp, ui_hash)
958 if (uip->ui_uid == uid) {
959 simple_unlock(&uihashtbl_slock);
960 if (newuip)
961 free(newuip, M_PROC);
962 return uip;
963 }
964
965 if (newuip == NULL) {
966 simple_unlock(&uihashtbl_slock);
967 newuip = malloc(sizeof(*uip), M_PROC, M_WAITOK | M_ZERO);
968 goto again;
969 }
970 uip = newuip;
971
972 LIST_INSERT_HEAD(uipp, uip, ui_hash);
973 uip->ui_uid = uid;
974 simple_lock_init(&uip->ui_slock);
975 simple_unlock(&uihashtbl_slock);
976
977 return uip;
978 }
979
980 /*
981 * Change the count associated with number of processes
982 * a given user is using.
983 */
984 int
985 chgproccnt(uid_t uid, int diff)
986 {
987 struct uidinfo *uip;
988 int s;
989
990 if (diff == 0)
991 return 0;
992
993 uip = uid_find(uid);
994 UILOCK(uip, s);
995 uip->ui_proccnt += diff;
996 KASSERT(uip->ui_proccnt >= 0);
997 UIUNLOCK(uip, s);
998 return uip->ui_proccnt;
999 }
1000
1001 int
1002 chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t xmax)
1003 {
1004 rlim_t nsb;
1005 int s;
1006
1007 UILOCK(uip, s);
1008 nsb = uip->ui_sbsize + to - *hiwat;
1009 if (to > *hiwat && nsb > xmax) {
1010 UIUNLOCK(uip, s);
1011 splx(s);
1012 return 0;
1013 }
1014 *hiwat = to;
1015 uip->ui_sbsize = nsb;
1016 KASSERT(uip->ui_sbsize >= 0);
1017 UIUNLOCK(uip, s);
1018 return 1;
1019 }
1020