kern_resource.c revision 1.98.2.11 1 1.98.2.11 yamt /* $NetBSD: kern_resource.c,v 1.98.2.11 2008/03/17 09:15:33 yamt Exp $ */
2 1.20 cgd
3 1.17 cgd /*-
4 1.19 cgd * Copyright (c) 1982, 1986, 1991, 1993
5 1.19 cgd * The Regents of the University of California. All rights reserved.
6 1.17 cgd * (c) UNIX System Laboratories, Inc.
7 1.17 cgd * All or some portions of this file are derived from material licensed
8 1.17 cgd * to the University of California by American Telephone and Telegraph
9 1.17 cgd * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 1.17 cgd * the permission of UNIX System Laboratories, Inc.
11 1.17 cgd *
12 1.17 cgd * Redistribution and use in source and binary forms, with or without
13 1.17 cgd * modification, are permitted provided that the following conditions
14 1.17 cgd * are met:
15 1.17 cgd * 1. Redistributions of source code must retain the above copyright
16 1.17 cgd * notice, this list of conditions and the following disclaimer.
17 1.17 cgd * 2. Redistributions in binary form must reproduce the above copyright
18 1.17 cgd * notice, this list of conditions and the following disclaimer in the
19 1.17 cgd * documentation and/or other materials provided with the distribution.
20 1.72 agc * 3. Neither the name of the University nor the names of its contributors
21 1.17 cgd * may be used to endorse or promote products derived from this software
22 1.17 cgd * without specific prior written permission.
23 1.17 cgd *
24 1.17 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 1.17 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 1.17 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 1.17 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 1.17 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 1.17 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 1.17 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 1.17 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 1.17 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 1.17 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 1.17 cgd * SUCH DAMAGE.
35 1.17 cgd *
36 1.45 fvdl * @(#)kern_resource.c 8.8 (Berkeley) 2/14/95
37 1.17 cgd */
38 1.61 lukem
39 1.61 lukem #include <sys/cdefs.h>
40 1.98.2.11 yamt __KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.98.2.11 2008/03/17 09:15:33 yamt Exp $");
41 1.44 mrg
42 1.17 cgd #include <sys/param.h>
43 1.22 cgd #include <sys/systm.h>
44 1.17 cgd #include <sys/kernel.h>
45 1.19 cgd #include <sys/file.h>
46 1.17 cgd #include <sys/resourcevar.h>
47 1.17 cgd #include <sys/malloc.h>
48 1.98.2.9 yamt #include <sys/kmem.h>
49 1.98.2.1 yamt #include <sys/namei.h>
50 1.49 thorpej #include <sys/pool.h>
51 1.17 cgd #include <sys/proc.h>
52 1.74 atatat #include <sys/sysctl.h>
53 1.98.2.8 yamt #include <sys/timevar.h>
54 1.98.2.1 yamt #include <sys/kauth.h>
55 1.98.2.7 yamt #include <sys/atomic.h>
56 1.22 cgd #include <sys/mount.h>
57 1.22 cgd #include <sys/syscallargs.h>
58 1.17 cgd
59 1.43 mrg #include <uvm/uvm_extern.h>
60 1.43 mrg
61 1.17 cgd /*
62 1.60 eeh * Maximum process data and stack limits.
63 1.60 eeh * They are variables so they are patchable.
64 1.60 eeh */
65 1.60 eeh rlim_t maxdmap = MAXDSIZ;
66 1.60 eeh rlim_t maxsmap = MAXSSIZ;
67 1.60 eeh
68 1.98.2.11 yamt static kmutex_t uihashtbl_lock;
69 1.98.2.11 yamt static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
70 1.98.2.11 yamt static u_long uihash;
71 1.79 christos
72 1.98.2.11 yamt #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
73 1.98.2.11 yamt
74 1.98.2.11 yamt static pool_cache_t plimit_cache;
75 1.98.2.11 yamt static pool_cache_t pstats_cache;
76 1.98.2.8 yamt
77 1.98.2.8 yamt void
78 1.98.2.8 yamt resource_init(void)
79 1.98.2.8 yamt {
80 1.98.2.8 yamt
81 1.98.2.8 yamt plimit_cache = pool_cache_init(sizeof(struct plimit), 0, 0, 0,
82 1.98.2.8 yamt "plimitpl", NULL, IPL_NONE, NULL, NULL, NULL);
83 1.98.2.8 yamt pstats_cache = pool_cache_init(sizeof(struct pstats), 0, 0, 0,
84 1.98.2.8 yamt "pstatspl", NULL, IPL_NONE, NULL, NULL, NULL);
85 1.98.2.11 yamt uihashtbl = hashinit(maxproc / 16, HASH_LIST, M_PROC,
86 1.98.2.11 yamt M_WAITOK, &uihash);
87 1.98.2.8 yamt }
88 1.98.2.8 yamt
89 1.60 eeh /*
90 1.17 cgd * Resource controls and accounting.
91 1.17 cgd */
92 1.17 cgd
93 1.25 cgd int
94 1.98.2.11 yamt sys_getpriority(struct lwp *l, const struct sys_getpriority_args *uap,
95 1.98.2.11 yamt register_t *retval)
96 1.30 thorpej {
97 1.98.2.8 yamt /* {
98 1.22 cgd syscallarg(int) which;
99 1.81 kleink syscallarg(id_t) who;
100 1.98.2.8 yamt } */
101 1.68 thorpej struct proc *curp = l->l_proc, *p;
102 1.54 augustss int low = NZERO + PRIO_MAX + 1;
103 1.98.2.3 yamt int who = SCARG(uap, who);
104 1.17 cgd
105 1.98.2.4 yamt mutex_enter(&proclist_lock);
106 1.22 cgd switch (SCARG(uap, which)) {
107 1.17 cgd case PRIO_PROCESS:
108 1.98.2.3 yamt if (who == 0)
109 1.17 cgd p = curp;
110 1.17 cgd else
111 1.98.2.3 yamt p = p_find(who, PFIND_LOCKED);
112 1.98.2.3 yamt if (p != NULL)
113 1.98.2.3 yamt low = p->p_nice;
114 1.17 cgd break;
115 1.17 cgd
116 1.17 cgd case PRIO_PGRP: {
117 1.54 augustss struct pgrp *pg;
118 1.17 cgd
119 1.98.2.3 yamt if (who == 0)
120 1.17 cgd pg = curp->p_pgrp;
121 1.98.2.3 yamt else if ((pg = pg_find(who, PFIND_LOCKED)) == NULL)
122 1.17 cgd break;
123 1.64 matt LIST_FOREACH(p, &pg->pg_members, p_pglist) {
124 1.17 cgd if (p->p_nice < low)
125 1.17 cgd low = p->p_nice;
126 1.17 cgd }
127 1.17 cgd break;
128 1.17 cgd }
129 1.17 cgd
130 1.17 cgd case PRIO_USER:
131 1.98.2.3 yamt if (who == 0)
132 1.98.2.3 yamt who = (int)kauth_cred_geteuid(l->l_cred);
133 1.86 yamt PROCLIST_FOREACH(p, &allproc) {
134 1.98.2.3 yamt mutex_enter(&p->p_mutex);
135 1.98.2.2 yamt if (kauth_cred_geteuid(p->p_cred) ==
136 1.98.2.3 yamt (uid_t)who && p->p_nice < low)
137 1.17 cgd low = p->p_nice;
138 1.98.2.3 yamt mutex_exit(&p->p_mutex);
139 1.64 matt }
140 1.17 cgd break;
141 1.17 cgd
142 1.17 cgd default:
143 1.98.2.4 yamt mutex_exit(&proclist_lock);
144 1.17 cgd return (EINVAL);
145 1.17 cgd }
146 1.98.2.4 yamt mutex_exit(&proclist_lock);
147 1.98.2.3 yamt
148 1.37 ws if (low == NZERO + PRIO_MAX + 1)
149 1.17 cgd return (ESRCH);
150 1.37 ws *retval = low - NZERO;
151 1.17 cgd return (0);
152 1.17 cgd }
153 1.17 cgd
154 1.17 cgd /* ARGSUSED */
155 1.25 cgd int
156 1.98.2.11 yamt sys_setpriority(struct lwp *l, const struct sys_setpriority_args *uap,
157 1.98.2.11 yamt register_t *retval)
158 1.30 thorpej {
159 1.98.2.8 yamt /* {
160 1.22 cgd syscallarg(int) which;
161 1.81 kleink syscallarg(id_t) who;
162 1.22 cgd syscallarg(int) prio;
163 1.98.2.8 yamt } */
164 1.68 thorpej struct proc *curp = l->l_proc, *p;
165 1.17 cgd int found = 0, error = 0;
166 1.98.2.3 yamt int who = SCARG(uap, who);
167 1.17 cgd
168 1.98.2.4 yamt mutex_enter(&proclist_lock);
169 1.22 cgd switch (SCARG(uap, which)) {
170 1.17 cgd case PRIO_PROCESS:
171 1.98.2.3 yamt if (who == 0)
172 1.17 cgd p = curp;
173 1.17 cgd else
174 1.98.2.3 yamt p = p_find(who, PFIND_LOCKED);
175 1.98.2.3 yamt if (p != 0) {
176 1.98.2.3 yamt mutex_enter(&p->p_mutex);
177 1.98.2.3 yamt error = donice(l, p, SCARG(uap, prio));
178 1.98.2.3 yamt mutex_exit(&p->p_mutex);
179 1.98.2.3 yamt }
180 1.17 cgd found++;
181 1.17 cgd break;
182 1.17 cgd
183 1.17 cgd case PRIO_PGRP: {
184 1.54 augustss struct pgrp *pg;
185 1.87 perry
186 1.98.2.3 yamt if (who == 0)
187 1.17 cgd pg = curp->p_pgrp;
188 1.98.2.3 yamt else if ((pg = pg_find(who, PFIND_LOCKED)) == NULL)
189 1.17 cgd break;
190 1.64 matt LIST_FOREACH(p, &pg->pg_members, p_pglist) {
191 1.98.2.3 yamt mutex_enter(&p->p_mutex);
192 1.98.2.2 yamt error = donice(l, p, SCARG(uap, prio));
193 1.98.2.3 yamt mutex_exit(&p->p_mutex);
194 1.17 cgd found++;
195 1.17 cgd }
196 1.17 cgd break;
197 1.17 cgd }
198 1.17 cgd
199 1.17 cgd case PRIO_USER:
200 1.98.2.3 yamt if (who == 0)
201 1.98.2.3 yamt who = (int)kauth_cred_geteuid(l->l_cred);
202 1.86 yamt PROCLIST_FOREACH(p, &allproc) {
203 1.98.2.3 yamt mutex_enter(&p->p_mutex);
204 1.98.2.2 yamt if (kauth_cred_geteuid(p->p_cred) ==
205 1.98.2.2 yamt (uid_t)SCARG(uap, who)) {
206 1.98.2.2 yamt error = donice(l, p, SCARG(uap, prio));
207 1.17 cgd found++;
208 1.17 cgd }
209 1.98.2.3 yamt mutex_exit(&p->p_mutex);
210 1.64 matt }
211 1.17 cgd break;
212 1.17 cgd
213 1.17 cgd default:
214 1.98.2.3 yamt error = EINVAL;
215 1.98.2.3 yamt break;
216 1.17 cgd }
217 1.98.2.4 yamt mutex_exit(&proclist_lock);
218 1.17 cgd if (found == 0)
219 1.17 cgd return (ESRCH);
220 1.17 cgd return (error);
221 1.17 cgd }
222 1.17 cgd
223 1.98.2.3 yamt /*
224 1.98.2.3 yamt * Renice a process.
225 1.98.2.3 yamt *
226 1.98.2.3 yamt * Call with the target process' credentials locked.
227 1.98.2.3 yamt */
228 1.25 cgd int
229 1.98.2.2 yamt donice(struct lwp *l, struct proc *chgp, int n)
230 1.17 cgd {
231 1.98.2.2 yamt kauth_cred_t cred = l->l_cred;
232 1.98.2.3 yamt int onice;
233 1.98.2.3 yamt
234 1.98.2.4 yamt KASSERT(mutex_owned(&chgp->p_mutex));
235 1.17 cgd
236 1.17 cgd if (n > PRIO_MAX)
237 1.17 cgd n = PRIO_MAX;
238 1.17 cgd if (n < PRIO_MIN)
239 1.17 cgd n = PRIO_MIN;
240 1.37 ws n += NZERO;
241 1.98.2.3 yamt onice = chgp->p_nice;
242 1.98.2.3 yamt onice = chgp->p_nice;
243 1.98.2.3 yamt
244 1.98.2.3 yamt again:
245 1.98.2.3 yamt if (kauth_authorize_process(cred, KAUTH_PROCESS_NICE, chgp,
246 1.98.2.3 yamt KAUTH_ARG(n), NULL, NULL))
247 1.17 cgd return (EACCES);
248 1.98.2.6 yamt mutex_spin_enter(&chgp->p_smutex);
249 1.98.2.3 yamt if (onice != chgp->p_nice) {
250 1.98.2.6 yamt mutex_spin_exit(&chgp->p_smutex);
251 1.98.2.3 yamt goto again;
252 1.98.2.3 yamt }
253 1.98.2.4 yamt sched_nice(chgp, n);
254 1.98.2.6 yamt mutex_spin_exit(&chgp->p_smutex);
255 1.17 cgd return (0);
256 1.17 cgd }
257 1.17 cgd
258 1.17 cgd /* ARGSUSED */
259 1.25 cgd int
260 1.98.2.11 yamt sys_setrlimit(struct lwp *l, const struct sys_setrlimit_args *uap,
261 1.98.2.11 yamt register_t *retval)
262 1.30 thorpej {
263 1.98.2.8 yamt /* {
264 1.42 mycroft syscallarg(int) which;
265 1.39 cgd syscallarg(const struct rlimit *) rlp;
266 1.98.2.8 yamt } */
267 1.42 mycroft int which = SCARG(uap, which);
268 1.19 cgd struct rlimit alim;
269 1.17 cgd int error;
270 1.17 cgd
271 1.46 perry error = copyin(SCARG(uap, rlp), &alim, sizeof(struct rlimit));
272 1.33 christos if (error)
273 1.17 cgd return (error);
274 1.98.2.2 yamt return (dosetrlimit(l, l->l_proc, which, &alim));
275 1.17 cgd }
276 1.17 cgd
277 1.17 cgd int
278 1.98.2.2 yamt dosetrlimit(struct lwp *l, struct proc *p, int which, struct rlimit *limp)
279 1.17 cgd {
280 1.54 augustss struct rlimit *alimp;
281 1.17 cgd int error;
282 1.17 cgd
283 1.67 itojun if ((u_int)which >= RLIM_NLIMITS)
284 1.17 cgd return (EINVAL);
285 1.38 matthias
286 1.38 matthias if (limp->rlim_cur < 0 || limp->rlim_max < 0)
287 1.38 matthias return (EINVAL);
288 1.38 matthias
289 1.62 jdolecek if (limp->rlim_cur > limp->rlim_max) {
290 1.62 jdolecek /*
291 1.62 jdolecek * This is programming error. According to SUSv2, we should
292 1.62 jdolecek * return error in this case.
293 1.62 jdolecek */
294 1.62 jdolecek return (EINVAL);
295 1.62 jdolecek }
296 1.98.2.5 yamt
297 1.98.2.5 yamt alimp = &p->p_rlimit[which];
298 1.98.2.5 yamt /* if we don't change the value, no need to limcopy() */
299 1.98.2.5 yamt if (limp->rlim_cur == alimp->rlim_cur &&
300 1.98.2.5 yamt limp->rlim_max == alimp->rlim_max)
301 1.98.2.5 yamt return 0;
302 1.98.2.5 yamt
303 1.98.2.3 yamt error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_RLIMIT,
304 1.98.2.9 yamt p, KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_SET), limp, KAUTH_ARG(which));
305 1.98.2.2 yamt if (error)
306 1.98.2.5 yamt return (error);
307 1.62 jdolecek
308 1.98.2.5 yamt lim_privatise(p, false);
309 1.98.2.5 yamt /* p->p_limit is now unchangeable */
310 1.98.2.5 yamt alimp = &p->p_rlimit[which];
311 1.17 cgd
312 1.17 cgd switch (which) {
313 1.17 cgd
314 1.17 cgd case RLIMIT_DATA:
315 1.19 cgd if (limp->rlim_cur > maxdmap)
316 1.19 cgd limp->rlim_cur = maxdmap;
317 1.19 cgd if (limp->rlim_max > maxdmap)
318 1.19 cgd limp->rlim_max = maxdmap;
319 1.17 cgd break;
320 1.17 cgd
321 1.17 cgd case RLIMIT_STACK:
322 1.19 cgd if (limp->rlim_cur > maxsmap)
323 1.19 cgd limp->rlim_cur = maxsmap;
324 1.19 cgd if (limp->rlim_max > maxsmap)
325 1.19 cgd limp->rlim_max = maxsmap;
326 1.62 jdolecek
327 1.62 jdolecek /*
328 1.62 jdolecek * Return EINVAL if the new stack size limit is lower than
329 1.62 jdolecek * current usage. Otherwise, the process would get SIGSEGV the
330 1.62 jdolecek * moment it would try to access anything on it's current stack.
331 1.62 jdolecek * This conforms to SUSv2.
332 1.62 jdolecek */
333 1.62 jdolecek if (limp->rlim_cur < p->p_vmspace->vm_ssize * PAGE_SIZE
334 1.98.2.3 yamt || limp->rlim_max < p->p_vmspace->vm_ssize * PAGE_SIZE) {
335 1.62 jdolecek return (EINVAL);
336 1.98.2.3 yamt }
337 1.40 enami
338 1.17 cgd /*
339 1.40 enami * Stack is allocated to the max at exec time with
340 1.40 enami * only "rlim_cur" bytes accessible (In other words,
341 1.40 enami * allocates stack dividing two contiguous regions at
342 1.40 enami * "rlim_cur" bytes boundary).
343 1.40 enami *
344 1.40 enami * Since allocation is done in terms of page, roundup
345 1.40 enami * "rlim_cur" (otherwise, contiguous regions
346 1.40 enami * overlap). If stack limit is going up make more
347 1.40 enami * accessible, if going down make inaccessible.
348 1.17 cgd */
349 1.40 enami limp->rlim_cur = round_page(limp->rlim_cur);
350 1.17 cgd if (limp->rlim_cur != alimp->rlim_cur) {
351 1.48 eeh vaddr_t addr;
352 1.48 eeh vsize_t size;
353 1.17 cgd vm_prot_t prot;
354 1.17 cgd
355 1.17 cgd if (limp->rlim_cur > alimp->rlim_cur) {
356 1.73 chs prot = VM_PROT_READ | VM_PROT_WRITE;
357 1.17 cgd size = limp->rlim_cur - alimp->rlim_cur;
358 1.91 fvdl addr = (vaddr_t)p->p_vmspace->vm_minsaddr -
359 1.91 fvdl limp->rlim_cur;
360 1.17 cgd } else {
361 1.17 cgd prot = VM_PROT_NONE;
362 1.17 cgd size = alimp->rlim_cur - limp->rlim_cur;
363 1.91 fvdl addr = (vaddr_t)p->p_vmspace->vm_minsaddr -
364 1.91 fvdl alimp->rlim_cur;
365 1.17 cgd }
366 1.43 mrg (void) uvm_map_protect(&p->p_vmspace->vm_map,
367 1.98.2.3 yamt addr, addr+size, prot, false);
368 1.17 cgd }
369 1.17 cgd break;
370 1.19 cgd
371 1.19 cgd case RLIMIT_NOFILE:
372 1.19 cgd if (limp->rlim_cur > maxfiles)
373 1.19 cgd limp->rlim_cur = maxfiles;
374 1.19 cgd if (limp->rlim_max > maxfiles)
375 1.19 cgd limp->rlim_max = maxfiles;
376 1.19 cgd break;
377 1.19 cgd
378 1.19 cgd case RLIMIT_NPROC:
379 1.19 cgd if (limp->rlim_cur > maxproc)
380 1.19 cgd limp->rlim_cur = maxproc;
381 1.19 cgd if (limp->rlim_max > maxproc)
382 1.19 cgd limp->rlim_max = maxproc;
383 1.19 cgd break;
384 1.17 cgd }
385 1.98.2.5 yamt
386 1.98.2.5 yamt mutex_enter(&p->p_limit->pl_lock);
387 1.17 cgd *alimp = *limp;
388 1.98.2.5 yamt mutex_exit(&p->p_limit->pl_lock);
389 1.17 cgd return (0);
390 1.17 cgd }
391 1.17 cgd
392 1.17 cgd /* ARGSUSED */
393 1.25 cgd int
394 1.98.2.11 yamt sys_getrlimit(struct lwp *l, const struct sys_getrlimit_args *uap,
395 1.98.2.11 yamt register_t *retval)
396 1.30 thorpej {
397 1.98.2.8 yamt /* {
398 1.42 mycroft syscallarg(int) which;
399 1.22 cgd syscallarg(struct rlimit *) rlp;
400 1.98.2.8 yamt } */
401 1.68 thorpej struct proc *p = l->l_proc;
402 1.42 mycroft int which = SCARG(uap, which);
403 1.98.2.4 yamt struct rlimit rl;
404 1.17 cgd
405 1.67 itojun if ((u_int)which >= RLIM_NLIMITS)
406 1.17 cgd return (EINVAL);
407 1.98.2.4 yamt
408 1.98.2.4 yamt mutex_enter(&p->p_mutex);
409 1.98.2.4 yamt memcpy(&rl, &p->p_rlimit[which], sizeof(rl));
410 1.98.2.4 yamt mutex_exit(&p->p_mutex);
411 1.98.2.4 yamt
412 1.98.2.4 yamt return copyout(&rl, SCARG(uap, rlp), sizeof(rl));
413 1.17 cgd }
414 1.17 cgd
415 1.17 cgd /*
416 1.17 cgd * Transform the running time and tick information in proc p into user,
417 1.17 cgd * system, and interrupt time usage.
418 1.98.2.3 yamt *
419 1.98.2.3 yamt * Should be called with p->p_smutex held unless called from exit1().
420 1.17 cgd */
421 1.25 cgd void
422 1.98 thorpej calcru(struct proc *p, struct timeval *up, struct timeval *sp,
423 1.98.2.3 yamt struct timeval *ip, struct timeval *rp)
424 1.17 cgd {
425 1.98.2.8 yamt uint64_t u, st, ut, it, tot;
426 1.68 thorpej struct lwp *l;
427 1.98.2.8 yamt struct bintime tm;
428 1.98.2.8 yamt struct timeval tv;
429 1.17 cgd
430 1.98.2.3 yamt mutex_spin_enter(&p->p_stmutex);
431 1.17 cgd st = p->p_sticks;
432 1.17 cgd ut = p->p_uticks;
433 1.17 cgd it = p->p_iticks;
434 1.98.2.3 yamt mutex_spin_exit(&p->p_stmutex);
435 1.17 cgd
436 1.98.2.8 yamt tm = p->p_rtime;
437 1.98.2.3 yamt
438 1.70 dsl LIST_FOREACH(l, &p->p_lwps, l_sibling) {
439 1.98.2.3 yamt lwp_lock(l);
440 1.98.2.8 yamt bintime_add(&tm, &l->l_rtime);
441 1.98.2.5 yamt if ((l->l_flag & LW_RUNNING) != 0) {
442 1.98.2.8 yamt struct bintime diff;
443 1.68 thorpej /*
444 1.68 thorpej * Adjust for the current time slice. This is
445 1.68 thorpej * actually fairly important since the error
446 1.68 thorpej * here is on the order of a time quantum,
447 1.68 thorpej * which is much greater than the sampling
448 1.87 perry * error.
449 1.68 thorpej */
450 1.98.2.8 yamt binuptime(&diff);
451 1.98.2.8 yamt bintime_sub(&diff, &l->l_stime);
452 1.98.2.8 yamt bintime_add(&tm, &diff);
453 1.68 thorpej }
454 1.98.2.3 yamt lwp_unlock(l);
455 1.17 cgd }
456 1.69 dsl
457 1.69 dsl tot = st + ut + it;
458 1.98.2.8 yamt bintime2timeval(&tm, &tv);
459 1.98.2.8 yamt u = (uint64_t)tv.tv_sec * 1000000ul + tv.tv_usec;
460 1.70 dsl
461 1.69 dsl if (tot == 0) {
462 1.69 dsl /* No ticks, so can't use to share time out, split 50-50 */
463 1.70 dsl st = ut = u / 2;
464 1.70 dsl } else {
465 1.70 dsl st = (u * st) / tot;
466 1.70 dsl ut = (u * ut) / tot;
467 1.69 dsl }
468 1.98.2.3 yamt if (sp != NULL) {
469 1.98.2.3 yamt sp->tv_sec = st / 1000000;
470 1.98.2.3 yamt sp->tv_usec = st % 1000000;
471 1.98.2.3 yamt }
472 1.98.2.3 yamt if (up != NULL) {
473 1.98.2.3 yamt up->tv_sec = ut / 1000000;
474 1.98.2.3 yamt up->tv_usec = ut % 1000000;
475 1.98.2.3 yamt }
476 1.17 cgd if (ip != NULL) {
477 1.70 dsl if (it != 0)
478 1.70 dsl it = (u * it) / tot;
479 1.17 cgd ip->tv_sec = it / 1000000;
480 1.17 cgd ip->tv_usec = it % 1000000;
481 1.17 cgd }
482 1.98.2.3 yamt if (rp != NULL) {
483 1.98.2.8 yamt *rp = tv;
484 1.98.2.3 yamt }
485 1.17 cgd }
486 1.17 cgd
487 1.17 cgd /* ARGSUSED */
488 1.25 cgd int
489 1.98.2.11 yamt sys_getrusage(struct lwp *l, const struct sys_getrusage_args *uap,
490 1.98.2.11 yamt register_t *retval)
491 1.30 thorpej {
492 1.98.2.8 yamt /* {
493 1.22 cgd syscallarg(int) who;
494 1.22 cgd syscallarg(struct rusage *) rusage;
495 1.98.2.8 yamt } */
496 1.98.2.4 yamt struct rusage ru;
497 1.68 thorpej struct proc *p = l->l_proc;
498 1.17 cgd
499 1.22 cgd switch (SCARG(uap, who)) {
500 1.19 cgd case RUSAGE_SELF:
501 1.98.2.3 yamt mutex_enter(&p->p_smutex);
502 1.98.2.4 yamt memcpy(&ru, &p->p_stats->p_ru, sizeof(ru));
503 1.98.2.4 yamt calcru(p, &ru.ru_utime, &ru.ru_stime, NULL, NULL);
504 1.98.2.3 yamt mutex_exit(&p->p_smutex);
505 1.17 cgd break;
506 1.17 cgd
507 1.17 cgd case RUSAGE_CHILDREN:
508 1.98.2.4 yamt mutex_enter(&p->p_smutex);
509 1.98.2.4 yamt memcpy(&ru, &p->p_stats->p_cru, sizeof(ru));
510 1.98.2.4 yamt mutex_exit(&p->p_smutex);
511 1.17 cgd break;
512 1.17 cgd
513 1.17 cgd default:
514 1.98.2.4 yamt return EINVAL;
515 1.17 cgd }
516 1.98.2.4 yamt
517 1.98.2.4 yamt return copyout(&ru, SCARG(uap, rusage), sizeof(ru));
518 1.17 cgd }
519 1.17 cgd
520 1.25 cgd void
521 1.98 thorpej ruadd(struct rusage *ru, struct rusage *ru2)
522 1.17 cgd {
523 1.54 augustss long *ip, *ip2;
524 1.54 augustss int i;
525 1.17 cgd
526 1.27 mycroft timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime);
527 1.27 mycroft timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime);
528 1.17 cgd if (ru->ru_maxrss < ru2->ru_maxrss)
529 1.17 cgd ru->ru_maxrss = ru2->ru_maxrss;
530 1.17 cgd ip = &ru->ru_first; ip2 = &ru2->ru_first;
531 1.17 cgd for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
532 1.17 cgd *ip++ += *ip2++;
533 1.17 cgd }
534 1.17 cgd
535 1.17 cgd /*
536 1.17 cgd * Make a copy of the plimit structure.
537 1.17 cgd * We share these structures copy-on-write after fork,
538 1.17 cgd * and copy when a limit is changed.
539 1.98.2.3 yamt *
540 1.98.2.5 yamt * Unfortunately (due to PL_SHAREMOD) it is possibly for the structure
541 1.98.2.5 yamt * we are copying to change beneath our feet!
542 1.17 cgd */
543 1.17 cgd struct plimit *
544 1.98.2.5 yamt lim_copy(struct plimit *lim)
545 1.17 cgd {
546 1.98.2.5 yamt struct plimit *newlim;
547 1.98.2.3 yamt char *corename;
548 1.98.2.5 yamt size_t alen, len;
549 1.83 pk
550 1.98.2.8 yamt newlim = pool_cache_get(plimit_cache, PR_WAITOK);
551 1.98.2.5 yamt mutex_init(&newlim->pl_lock, MUTEX_DEFAULT, IPL_NONE);
552 1.98.2.5 yamt newlim->pl_flags = 0;
553 1.98.2.5 yamt newlim->pl_refcnt = 1;
554 1.98.2.5 yamt newlim->pl_sv_limit = NULL;
555 1.98.2.5 yamt
556 1.98.2.5 yamt mutex_enter(&lim->pl_lock);
557 1.98.2.5 yamt memcpy(newlim->pl_rlimit, lim->pl_rlimit,
558 1.98.2.5 yamt sizeof(struct rlimit) * RLIM_NLIMITS);
559 1.83 pk
560 1.98.2.5 yamt alen = 0;
561 1.98.2.5 yamt corename = NULL;
562 1.98.2.3 yamt for (;;) {
563 1.98.2.5 yamt if (lim->pl_corename == defcorename) {
564 1.98.2.5 yamt newlim->pl_corename = defcorename;
565 1.98.2.5 yamt break;
566 1.98.2.5 yamt }
567 1.98.2.5 yamt len = strlen(lim->pl_corename) + 1;
568 1.98.2.5 yamt if (len <= alen) {
569 1.98.2.5 yamt newlim->pl_corename = corename;
570 1.98.2.5 yamt memcpy(corename, lim->pl_corename, len);
571 1.98.2.5 yamt corename = NULL;
572 1.98.2.5 yamt break;
573 1.98.2.5 yamt }
574 1.98.2.5 yamt mutex_exit(&lim->pl_lock);
575 1.98.2.5 yamt if (corename != NULL)
576 1.98.2.5 yamt free(corename, M_TEMP);
577 1.98.2.5 yamt alen = len;
578 1.98.2.5 yamt corename = malloc(alen, M_TEMP, M_WAITOK);
579 1.98.2.5 yamt mutex_enter(&lim->pl_lock);
580 1.98.2.5 yamt }
581 1.98.2.5 yamt mutex_exit(&lim->pl_lock);
582 1.98.2.5 yamt if (corename != NULL)
583 1.98.2.5 yamt free(corename, M_TEMP);
584 1.98.2.5 yamt return newlim;
585 1.98.2.5 yamt }
586 1.98.2.3 yamt
587 1.98.2.5 yamt void
588 1.98.2.5 yamt lim_addref(struct plimit *lim)
589 1.98.2.5 yamt {
590 1.98.2.7 yamt atomic_inc_uint(&lim->pl_refcnt);
591 1.98.2.5 yamt }
592 1.98.2.3 yamt
593 1.98.2.5 yamt /*
594 1.98.2.5 yamt * Give a process it's own private plimit structure.
595 1.98.2.5 yamt * This will only be shared (in fork) if modifications are to be shared.
596 1.98.2.5 yamt */
597 1.98.2.5 yamt void
598 1.98.2.5 yamt lim_privatise(struct proc *p, bool set_shared)
599 1.98.2.5 yamt {
600 1.98.2.5 yamt struct plimit *lim, *newlim;
601 1.98.2.5 yamt
602 1.98.2.5 yamt lim = p->p_limit;
603 1.98.2.5 yamt if (lim->pl_flags & PL_WRITEABLE) {
604 1.98.2.5 yamt if (set_shared)
605 1.98.2.5 yamt lim->pl_flags |= PL_SHAREMOD;
606 1.98.2.5 yamt return;
607 1.98.2.3 yamt }
608 1.83 pk
609 1.98.2.5 yamt if (set_shared && lim->pl_flags & PL_SHAREMOD)
610 1.98.2.5 yamt return;
611 1.98.2.5 yamt
612 1.98.2.5 yamt newlim = lim_copy(lim);
613 1.98.2.5 yamt
614 1.98.2.5 yamt mutex_enter(&p->p_mutex);
615 1.98.2.5 yamt if (p->p_limit->pl_flags & PL_WRITEABLE) {
616 1.98.2.5 yamt /* Someone crept in while we were busy */
617 1.98.2.5 yamt mutex_exit(&p->p_mutex);
618 1.98.2.5 yamt limfree(newlim);
619 1.98.2.5 yamt if (set_shared)
620 1.98.2.5 yamt p->p_limit->pl_flags |= PL_SHAREMOD;
621 1.98.2.5 yamt return;
622 1.98.2.5 yamt }
623 1.98.2.5 yamt
624 1.98.2.5 yamt /*
625 1.98.2.5 yamt * Since most accesses to p->p_limit aren't locked, we must not
626 1.98.2.5 yamt * delete the old limit structure yet.
627 1.98.2.5 yamt */
628 1.98.2.5 yamt newlim->pl_sv_limit = p->p_limit;
629 1.98.2.5 yamt newlim->pl_flags |= PL_WRITEABLE;
630 1.98.2.5 yamt if (set_shared)
631 1.98.2.5 yamt newlim->pl_flags |= PL_SHAREMOD;
632 1.98.2.5 yamt p->p_limit = newlim;
633 1.98.2.5 yamt mutex_exit(&p->p_mutex);
634 1.32 mycroft }
635 1.32 mycroft
636 1.32 mycroft void
637 1.98 thorpej limfree(struct plimit *lim)
638 1.32 mycroft {
639 1.98.2.5 yamt struct plimit *sv_lim;
640 1.85 kleink
641 1.98.2.5 yamt do {
642 1.98.2.7 yamt if (atomic_dec_uint_nv(&lim->pl_refcnt) > 0)
643 1.98.2.5 yamt return;
644 1.98.2.5 yamt if (lim->pl_corename != defcorename)
645 1.98.2.5 yamt free(lim->pl_corename, M_TEMP);
646 1.98.2.5 yamt sv_lim = lim->pl_sv_limit;
647 1.98.2.5 yamt mutex_destroy(&lim->pl_lock);
648 1.98.2.8 yamt pool_cache_put(plimit_cache, lim);
649 1.98.2.5 yamt } while ((lim = sv_lim) != NULL);
650 1.68 thorpej }
651 1.68 thorpej
652 1.68 thorpej struct pstats *
653 1.98 thorpej pstatscopy(struct pstats *ps)
654 1.68 thorpej {
655 1.87 perry
656 1.68 thorpej struct pstats *newps;
657 1.68 thorpej
658 1.98.2.8 yamt newps = pool_cache_get(pstats_cache, PR_WAITOK);
659 1.68 thorpej
660 1.68 thorpej memset(&newps->pstat_startzero, 0,
661 1.98.2.4 yamt (unsigned) ((char *)&newps->pstat_endzero -
662 1.98.2.4 yamt (char *)&newps->pstat_startzero));
663 1.68 thorpej memcpy(&newps->pstat_startcopy, &ps->pstat_startcopy,
664 1.98.2.4 yamt ((char *)&newps->pstat_endcopy -
665 1.98.2.4 yamt (char *)&newps->pstat_startcopy));
666 1.68 thorpej
667 1.68 thorpej return (newps);
668 1.68 thorpej
669 1.68 thorpej }
670 1.68 thorpej
671 1.68 thorpej void
672 1.98 thorpej pstatsfree(struct pstats *ps)
673 1.68 thorpej {
674 1.68 thorpej
675 1.98.2.8 yamt pool_cache_put(pstats_cache, ps);
676 1.74 atatat }
677 1.74 atatat
678 1.74 atatat /*
679 1.74 atatat * sysctl interface in five parts
680 1.74 atatat */
681 1.74 atatat
682 1.74 atatat /*
683 1.74 atatat * a routine for sysctl proc subtree helpers that need to pick a valid
684 1.74 atatat * process by pid.
685 1.74 atatat */
686 1.74 atatat static int
687 1.98.2.2 yamt sysctl_proc_findproc(struct lwp *l, struct proc **p2, pid_t pid)
688 1.74 atatat {
689 1.74 atatat struct proc *ptmp;
690 1.98.2.1 yamt int error = 0;
691 1.74 atatat
692 1.74 atatat if (pid == PROC_CURPROC)
693 1.98.2.2 yamt ptmp = l->l_proc;
694 1.74 atatat else if ((ptmp = pfind(pid)) == NULL)
695 1.74 atatat error = ESRCH;
696 1.74 atatat
697 1.74 atatat *p2 = ptmp;
698 1.74 atatat return (error);
699 1.74 atatat }
700 1.74 atatat
701 1.74 atatat /*
702 1.74 atatat * sysctl helper routine for setting a process's specific corefile
703 1.74 atatat * name. picks the process based on the given pid and checks the
704 1.74 atatat * correctness of the new value.
705 1.74 atatat */
706 1.74 atatat static int
707 1.74 atatat sysctl_proc_corename(SYSCTLFN_ARGS)
708 1.74 atatat {
709 1.98.2.2 yamt struct proc *ptmp;
710 1.83 pk struct plimit *lim;
711 1.74 atatat int error = 0, len;
712 1.98.2.1 yamt char *cname;
713 1.98.2.5 yamt char *ocore;
714 1.98.2.1 yamt char *tmp;
715 1.74 atatat struct sysctlnode node;
716 1.74 atatat
717 1.74 atatat /*
718 1.74 atatat * is this all correct?
719 1.74 atatat */
720 1.74 atatat if (namelen != 0)
721 1.74 atatat return (EINVAL);
722 1.74 atatat if (name[-1] != PROC_PID_CORENAME)
723 1.74 atatat return (EINVAL);
724 1.74 atatat
725 1.74 atatat /*
726 1.74 atatat * whom are we tweaking?
727 1.74 atatat */
728 1.98.2.2 yamt error = sysctl_proc_findproc(l, &ptmp, (pid_t)name[-2]);
729 1.98.2.2 yamt if (error)
730 1.98.2.2 yamt return (error);
731 1.98.2.2 yamt
732 1.98.2.9 yamt /* XXX-elad */
733 1.98.2.9 yamt error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, ptmp,
734 1.98.2.9 yamt KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL);
735 1.74 atatat if (error)
736 1.74 atatat return (error);
737 1.74 atatat
738 1.98.2.9 yamt if (newp == NULL) {
739 1.98.2.9 yamt error = kauth_authorize_process(l->l_cred,
740 1.98.2.9 yamt KAUTH_PROCESS_CORENAME, ptmp,
741 1.98.2.9 yamt KAUTH_ARG(KAUTH_REQ_PROCESS_CORENAME_GET), NULL, NULL);
742 1.98.2.9 yamt if (error)
743 1.98.2.9 yamt return (error);
744 1.98.2.9 yamt }
745 1.98.2.9 yamt
746 1.74 atatat /*
747 1.74 atatat * let them modify a temporary copy of the core name
748 1.74 atatat */
749 1.98.2.5 yamt cname = PNBUF_GET();
750 1.98.2.5 yamt lim = ptmp->p_limit;
751 1.98.2.5 yamt mutex_enter(&lim->pl_lock);
752 1.98.2.5 yamt strlcpy(cname, lim->pl_corename, MAXPATHLEN);
753 1.98.2.5 yamt mutex_exit(&lim->pl_lock);
754 1.98.2.5 yamt
755 1.74 atatat node = *rnode;
756 1.74 atatat node.sysctl_data = cname;
757 1.74 atatat error = sysctl_lookup(SYSCTLFN_CALL(&node));
758 1.74 atatat
759 1.74 atatat /*
760 1.74 atatat * if that failed, or they have nothing new to say, or we've
761 1.74 atatat * heard it before...
762 1.74 atatat */
763 1.98.2.5 yamt if (error || newp == NULL)
764 1.98.2.5 yamt goto done;
765 1.98.2.5 yamt lim = ptmp->p_limit;
766 1.98.2.5 yamt mutex_enter(&lim->pl_lock);
767 1.98.2.5 yamt error = strcmp(cname, lim->pl_corename);
768 1.98.2.5 yamt mutex_exit(&lim->pl_lock);
769 1.98.2.5 yamt if (error == 0)
770 1.98.2.5 yamt /* Unchanged */
771 1.98.2.1 yamt goto done;
772 1.74 atatat
773 1.98.2.2 yamt error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CORENAME,
774 1.98.2.9 yamt ptmp, KAUTH_ARG(KAUTH_REQ_PROCESS_CORENAME_SET), cname, NULL);
775 1.98.2.2 yamt if (error)
776 1.98.2.2 yamt return (error);
777 1.98.2.2 yamt
778 1.74 atatat /*
779 1.74 atatat * no error yet and cname now has the new core name in it.
780 1.74 atatat * let's see if it looks acceptable. it must be either "core"
781 1.74 atatat * or end in ".core" or "/core".
782 1.74 atatat */
783 1.74 atatat len = strlen(cname);
784 1.98.2.1 yamt if (len < 4) {
785 1.98.2.1 yamt error = EINVAL;
786 1.98.2.1 yamt } else if (strcmp(cname + len - 4, "core") != 0) {
787 1.98.2.1 yamt error = EINVAL;
788 1.98.2.1 yamt } else if (len > 4 && cname[len - 5] != '/' && cname[len - 5] != '.') {
789 1.98.2.1 yamt error = EINVAL;
790 1.98.2.1 yamt }
791 1.98.2.1 yamt if (error != 0) {
792 1.98.2.1 yamt goto done;
793 1.98.2.1 yamt }
794 1.74 atatat
795 1.74 atatat /*
796 1.74 atatat * hmm...looks good. now...where do we put it?
797 1.74 atatat */
798 1.74 atatat tmp = malloc(len + 1, M_TEMP, M_WAITOK|M_CANFAIL);
799 1.98.2.1 yamt if (tmp == NULL) {
800 1.98.2.1 yamt error = ENOMEM;
801 1.98.2.1 yamt goto done;
802 1.98.2.1 yamt }
803 1.98.2.5 yamt memcpy(tmp, cname, len + 1);
804 1.74 atatat
805 1.98.2.5 yamt lim_privatise(ptmp, false);
806 1.83 pk lim = ptmp->p_limit;
807 1.98.2.5 yamt mutex_enter(&lim->pl_lock);
808 1.98.2.5 yamt ocore = lim->pl_corename;
809 1.83 pk lim->pl_corename = tmp;
810 1.98.2.5 yamt mutex_exit(&lim->pl_lock);
811 1.98.2.5 yamt if (ocore != defcorename)
812 1.98.2.5 yamt free(ocore, M_TEMP);
813 1.98.2.5 yamt
814 1.98.2.1 yamt done:
815 1.98.2.1 yamt PNBUF_PUT(cname);
816 1.98.2.1 yamt return error;
817 1.74 atatat }
818 1.74 atatat
819 1.74 atatat /*
820 1.74 atatat * sysctl helper routine for checking/setting a process's stop flags,
821 1.74 atatat * one for fork and one for exec.
822 1.74 atatat */
823 1.74 atatat static int
824 1.74 atatat sysctl_proc_stop(SYSCTLFN_ARGS)
825 1.74 atatat {
826 1.98.2.2 yamt struct proc *ptmp;
827 1.74 atatat int i, f, error = 0;
828 1.74 atatat struct sysctlnode node;
829 1.74 atatat
830 1.74 atatat if (namelen != 0)
831 1.74 atatat return (EINVAL);
832 1.74 atatat
833 1.98.2.2 yamt error = sysctl_proc_findproc(l, &ptmp, (pid_t)name[-2]);
834 1.98.2.2 yamt if (error)
835 1.98.2.2 yamt return (error);
836 1.98.2.2 yamt
837 1.98.2.9 yamt /* XXX-elad */
838 1.98.2.9 yamt error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, ptmp,
839 1.98.2.9 yamt KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL);
840 1.74 atatat if (error)
841 1.74 atatat return (error);
842 1.74 atatat
843 1.74 atatat switch (rnode->sysctl_num) {
844 1.74 atatat case PROC_PID_STOPFORK:
845 1.98.2.3 yamt f = PS_STOPFORK;
846 1.74 atatat break;
847 1.74 atatat case PROC_PID_STOPEXEC:
848 1.98.2.3 yamt f = PS_STOPEXEC;
849 1.74 atatat break;
850 1.74 atatat case PROC_PID_STOPEXIT:
851 1.98.2.3 yamt f = PS_STOPEXIT;
852 1.74 atatat break;
853 1.74 atatat default:
854 1.74 atatat return (EINVAL);
855 1.74 atatat }
856 1.74 atatat
857 1.74 atatat i = (ptmp->p_flag & f) ? 1 : 0;
858 1.74 atatat node = *rnode;
859 1.74 atatat node.sysctl_data = &i;
860 1.74 atatat error = sysctl_lookup(SYSCTLFN_CALL(&node));
861 1.74 atatat if (error || newp == NULL)
862 1.74 atatat return (error);
863 1.74 atatat
864 1.98.2.3 yamt mutex_enter(&ptmp->p_smutex);
865 1.98.2.2 yamt error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_STOPFLAG,
866 1.98.2.2 yamt ptmp, KAUTH_ARG(f), NULL, NULL);
867 1.98.2.2 yamt if (error)
868 1.98.2.2 yamt return (error);
869 1.74 atatat if (i)
870 1.98.2.3 yamt ptmp->p_sflag |= f;
871 1.74 atatat else
872 1.98.2.3 yamt ptmp->p_sflag &= ~f;
873 1.98.2.3 yamt mutex_exit(&ptmp->p_smutex);
874 1.74 atatat
875 1.74 atatat return (0);
876 1.74 atatat }
877 1.74 atatat
878 1.74 atatat /*
879 1.74 atatat * sysctl helper routine for a process's rlimits as exposed by sysctl.
880 1.74 atatat */
881 1.74 atatat static int
882 1.74 atatat sysctl_proc_plimit(SYSCTLFN_ARGS)
883 1.74 atatat {
884 1.98.2.2 yamt struct proc *ptmp;
885 1.74 atatat u_int limitno;
886 1.74 atatat int which, error = 0;
887 1.74 atatat struct rlimit alim;
888 1.74 atatat struct sysctlnode node;
889 1.74 atatat
890 1.74 atatat if (namelen != 0)
891 1.74 atatat return (EINVAL);
892 1.74 atatat
893 1.74 atatat which = name[-1];
894 1.74 atatat if (which != PROC_PID_LIMIT_TYPE_SOFT &&
895 1.74 atatat which != PROC_PID_LIMIT_TYPE_HARD)
896 1.74 atatat return (EINVAL);
897 1.74 atatat
898 1.74 atatat limitno = name[-2] - 1;
899 1.74 atatat if (limitno >= RLIM_NLIMITS)
900 1.74 atatat return (EINVAL);
901 1.74 atatat
902 1.74 atatat if (name[-3] != PROC_PID_LIMIT)
903 1.74 atatat return (EINVAL);
904 1.74 atatat
905 1.98.2.2 yamt error = sysctl_proc_findproc(l, &ptmp, (pid_t)name[-4]);
906 1.98.2.2 yamt if (error)
907 1.98.2.2 yamt return (error);
908 1.98.2.2 yamt
909 1.98.2.9 yamt /* XXX-elad */
910 1.98.2.9 yamt error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, ptmp,
911 1.98.2.9 yamt KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL);
912 1.74 atatat if (error)
913 1.74 atatat return (error);
914 1.74 atatat
915 1.98.2.9 yamt /* Check if we can view limits. */
916 1.98.2.9 yamt if (newp == NULL) {
917 1.98.2.9 yamt error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_RLIMIT,
918 1.98.2.9 yamt ptmp, KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_GET), &alim,
919 1.98.2.9 yamt KAUTH_ARG(which));
920 1.98.2.9 yamt if (error)
921 1.98.2.9 yamt return (error);
922 1.98.2.9 yamt }
923 1.98.2.9 yamt
924 1.74 atatat node = *rnode;
925 1.74 atatat memcpy(&alim, &ptmp->p_rlimit[limitno], sizeof(alim));
926 1.74 atatat if (which == PROC_PID_LIMIT_TYPE_HARD)
927 1.74 atatat node.sysctl_data = &alim.rlim_max;
928 1.74 atatat else
929 1.74 atatat node.sysctl_data = &alim.rlim_cur;
930 1.74 atatat
931 1.74 atatat error = sysctl_lookup(SYSCTLFN_CALL(&node));
932 1.74 atatat if (error || newp == NULL)
933 1.74 atatat return (error);
934 1.74 atatat
935 1.98.2.2 yamt return (dosetrlimit(l, ptmp, limitno, &alim));
936 1.74 atatat }
937 1.74 atatat
938 1.74 atatat /*
939 1.74 atatat * and finally, the actually glue that sticks it to the tree
940 1.74 atatat */
941 1.74 atatat SYSCTL_SETUP(sysctl_proc_setup, "sysctl proc subtree setup")
942 1.74 atatat {
943 1.74 atatat
944 1.76 atatat sysctl_createv(clog, 0, NULL, NULL,
945 1.76 atatat CTLFLAG_PERMANENT,
946 1.74 atatat CTLTYPE_NODE, "proc", NULL,
947 1.74 atatat NULL, 0, NULL, 0,
948 1.74 atatat CTL_PROC, CTL_EOL);
949 1.76 atatat sysctl_createv(clog, 0, NULL, NULL,
950 1.76 atatat CTLFLAG_PERMANENT|CTLFLAG_ANYNUMBER,
951 1.78 atatat CTLTYPE_NODE, "curproc",
952 1.78 atatat SYSCTL_DESCR("Per-process settings"),
953 1.74 atatat NULL, 0, NULL, 0,
954 1.74 atatat CTL_PROC, PROC_CURPROC, CTL_EOL);
955 1.74 atatat
956 1.76 atatat sysctl_createv(clog, 0, NULL, NULL,
957 1.98.2.2 yamt CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE,
958 1.78 atatat CTLTYPE_STRING, "corename",
959 1.78 atatat SYSCTL_DESCR("Core file name"),
960 1.74 atatat sysctl_proc_corename, 0, NULL, MAXPATHLEN,
961 1.74 atatat CTL_PROC, PROC_CURPROC, PROC_PID_CORENAME, CTL_EOL);
962 1.76 atatat sysctl_createv(clog, 0, NULL, NULL,
963 1.76 atatat CTLFLAG_PERMANENT,
964 1.78 atatat CTLTYPE_NODE, "rlimit",
965 1.78 atatat SYSCTL_DESCR("Process limits"),
966 1.74 atatat NULL, 0, NULL, 0,
967 1.74 atatat CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, CTL_EOL);
968 1.74 atatat
969 1.74 atatat #define create_proc_plimit(s, n) do { \
970 1.76 atatat sysctl_createv(clog, 0, NULL, NULL, \
971 1.76 atatat CTLFLAG_PERMANENT, \
972 1.78 atatat CTLTYPE_NODE, s, \
973 1.78 atatat SYSCTL_DESCR("Process " s " limits"), \
974 1.74 atatat NULL, 0, NULL, 0, \
975 1.74 atatat CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \
976 1.74 atatat CTL_EOL); \
977 1.76 atatat sysctl_createv(clog, 0, NULL, NULL, \
978 1.76 atatat CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, \
979 1.78 atatat CTLTYPE_QUAD, "soft", \
980 1.78 atatat SYSCTL_DESCR("Process soft " s " limit"), \
981 1.74 atatat sysctl_proc_plimit, 0, NULL, 0, \
982 1.74 atatat CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \
983 1.74 atatat PROC_PID_LIMIT_TYPE_SOFT, CTL_EOL); \
984 1.76 atatat sysctl_createv(clog, 0, NULL, NULL, \
985 1.76 atatat CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, \
986 1.78 atatat CTLTYPE_QUAD, "hard", \
987 1.78 atatat SYSCTL_DESCR("Process hard " s " limit"), \
988 1.74 atatat sysctl_proc_plimit, 0, NULL, 0, \
989 1.74 atatat CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \
990 1.74 atatat PROC_PID_LIMIT_TYPE_HARD, CTL_EOL); \
991 1.74 atatat } while (0/*CONSTCOND*/)
992 1.74 atatat
993 1.74 atatat create_proc_plimit("cputime", PROC_PID_LIMIT_CPU);
994 1.74 atatat create_proc_plimit("filesize", PROC_PID_LIMIT_FSIZE);
995 1.74 atatat create_proc_plimit("datasize", PROC_PID_LIMIT_DATA);
996 1.74 atatat create_proc_plimit("stacksize", PROC_PID_LIMIT_STACK);
997 1.74 atatat create_proc_plimit("coredumpsize", PROC_PID_LIMIT_CORE);
998 1.74 atatat create_proc_plimit("memoryuse", PROC_PID_LIMIT_RSS);
999 1.74 atatat create_proc_plimit("memorylocked", PROC_PID_LIMIT_MEMLOCK);
1000 1.74 atatat create_proc_plimit("maxproc", PROC_PID_LIMIT_NPROC);
1001 1.74 atatat create_proc_plimit("descriptors", PROC_PID_LIMIT_NOFILE);
1002 1.79 christos create_proc_plimit("sbsize", PROC_PID_LIMIT_SBSIZE);
1003 1.74 atatat
1004 1.74 atatat #undef create_proc_plimit
1005 1.74 atatat
1006 1.76 atatat sysctl_createv(clog, 0, NULL, NULL,
1007 1.76 atatat CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE,
1008 1.78 atatat CTLTYPE_INT, "stopfork",
1009 1.78 atatat SYSCTL_DESCR("Stop process at fork(2)"),
1010 1.74 atatat sysctl_proc_stop, 0, NULL, 0,
1011 1.74 atatat CTL_PROC, PROC_CURPROC, PROC_PID_STOPFORK, CTL_EOL);
1012 1.76 atatat sysctl_createv(clog, 0, NULL, NULL,
1013 1.76 atatat CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE,
1014 1.78 atatat CTLTYPE_INT, "stopexec",
1015 1.78 atatat SYSCTL_DESCR("Stop process at execve(2)"),
1016 1.74 atatat sysctl_proc_stop, 0, NULL, 0,
1017 1.74 atatat CTL_PROC, PROC_CURPROC, PROC_PID_STOPEXEC, CTL_EOL);
1018 1.76 atatat sysctl_createv(clog, 0, NULL, NULL,
1019 1.76 atatat CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE,
1020 1.78 atatat CTLTYPE_INT, "stopexit",
1021 1.78 atatat SYSCTL_DESCR("Stop process before completing exit"),
1022 1.74 atatat sysctl_proc_stop, 0, NULL, 0,
1023 1.74 atatat CTL_PROC, PROC_CURPROC, PROC_PID_STOPEXIT, CTL_EOL);
1024 1.17 cgd }
1025 1.79 christos
1026 1.98.2.4 yamt void
1027 1.98.2.4 yamt uid_init(void)
1028 1.98.2.4 yamt {
1029 1.98.2.4 yamt
1030 1.98.2.4 yamt /*
1031 1.98.2.4 yamt * XXXSMP This could be at IPL_SOFTNET, but for now we want
1032 1.98.2.4 yamt * to to be deadlock free, so it must be at IPL_VM.
1033 1.98.2.4 yamt */
1034 1.98.2.7 yamt mutex_init(&uihashtbl_lock, MUTEX_DEFAULT, IPL_VM);
1035 1.98.2.4 yamt
1036 1.98.2.4 yamt /*
1037 1.98.2.4 yamt * Ensure that uid 0 is always in the user hash table, as
1038 1.98.2.4 yamt * sbreserve() expects it available from interrupt context.
1039 1.98.2.4 yamt */
1040 1.98.2.4 yamt (void)uid_find(0);
1041 1.98.2.4 yamt }
1042 1.98.2.4 yamt
1043 1.88 christos struct uidinfo *
1044 1.88 christos uid_find(uid_t uid)
1045 1.79 christos {
1046 1.79 christos struct uidinfo *uip;
1047 1.90 christos struct uidinfo *newuip = NULL;
1048 1.79 christos struct uihashhead *uipp;
1049 1.79 christos
1050 1.79 christos uipp = UIHASH(uid);
1051 1.79 christos
1052 1.90 christos again:
1053 1.98.2.4 yamt mutex_enter(&uihashtbl_lock);
1054 1.79 christos LIST_FOREACH(uip, uipp, ui_hash)
1055 1.88 christos if (uip->ui_uid == uid) {
1056 1.98.2.4 yamt mutex_exit(&uihashtbl_lock);
1057 1.98.2.4 yamt if (newuip) {
1058 1.98.2.4 yamt mutex_destroy(&newuip->ui_lock);
1059 1.98.2.9 yamt kmem_free(newuip, sizeof(*newuip));
1060 1.98.2.4 yamt }
1061 1.79 christos return uip;
1062 1.88 christos }
1063 1.90 christos if (newuip == NULL) {
1064 1.98.2.4 yamt mutex_exit(&uihashtbl_lock);
1065 1.98.2.4 yamt /* Must not be called from interrupt context. */
1066 1.98.2.9 yamt newuip = kmem_zalloc(sizeof(*newuip), KM_SLEEP);
1067 1.98.2.5 yamt /* XXX this could be IPL_SOFTNET */
1068 1.98.2.7 yamt mutex_init(&newuip->ui_lock, MUTEX_DEFAULT, IPL_VM);
1069 1.90 christos goto again;
1070 1.90 christos }
1071 1.90 christos uip = newuip;
1072 1.89 christos
1073 1.79 christos LIST_INSERT_HEAD(uipp, uip, ui_hash);
1074 1.79 christos uip->ui_uid = uid;
1075 1.98.2.4 yamt mutex_exit(&uihashtbl_lock);
1076 1.89 christos
1077 1.79 christos return uip;
1078 1.79 christos }
1079 1.79 christos
1080 1.79 christos /*
1081 1.79 christos * Change the count associated with number of processes
1082 1.79 christos * a given user is using.
1083 1.79 christos */
1084 1.79 christos int
1085 1.79 christos chgproccnt(uid_t uid, int diff)
1086 1.79 christos {
1087 1.79 christos struct uidinfo *uip;
1088 1.79 christos
1089 1.88 christos uip = uid_find(uid);
1090 1.98.2.4 yamt mutex_enter(&uip->ui_lock);
1091 1.88 christos uip->ui_proccnt += diff;
1092 1.88 christos KASSERT(uip->ui_proccnt >= 0);
1093 1.98.2.4 yamt mutex_exit(&uip->ui_lock);
1094 1.88 christos return uip->ui_proccnt;
1095 1.79 christos }
1096 1.79 christos
1097 1.79 christos int
1098 1.97 christos chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t xmax)
1099 1.79 christos {
1100 1.79 christos rlim_t nsb;
1101 1.79 christos
1102 1.98.2.4 yamt mutex_enter(&uip->ui_lock);
1103 1.80 yamt nsb = uip->ui_sbsize + to - *hiwat;
1104 1.97 christos if (to > *hiwat && nsb > xmax) {
1105 1.98.2.4 yamt mutex_exit(&uip->ui_lock);
1106 1.88 christos return 0;
1107 1.94 christos }
1108 1.79 christos *hiwat = to;
1109 1.79 christos uip->ui_sbsize = nsb;
1110 1.79 christos KASSERT(uip->ui_sbsize >= 0);
1111 1.98.2.4 yamt mutex_exit(&uip->ui_lock);
1112 1.88 christos return 1;
1113 1.79 christos }
1114