1 /* $NetBSD: kern_resource.c,v 1.197 2026/01/04 01:37:47 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 1982, 1986, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_resource.c 8.8 (Berkeley) 2/14/95 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.197 2026/01/04 01:37:47 riastradh Exp $"); 41 42 #include <sys/param.h> 43 #include <sys/types.h> 44 45 #include <sys/atomic.h> 46 #include <sys/file.h> 47 #include <sys/kauth.h> 48 #include <sys/kernel.h> 49 #include <sys/kmem.h> 50 #include <sys/mount.h> 51 #include <sys/namei.h> 52 #include <sys/pool.h> 53 #include <sys/proc.h> 54 #include <sys/resourcevar.h> 55 #include <sys/sdt.h> 56 #include <sys/syscallargs.h> 57 #include <sys/sysctl.h> 58 #include <sys/systm.h> 59 #include <sys/timevar.h> 60 61 #include <uvm/uvm_extern.h> 62 63 /* 64 * Maximum process data and stack limits. 65 * They are variables so they are patchable. 66 */ 67 rlim_t maxdmap = MAXDSIZ; 68 rlim_t maxsmap = MAXSSIZ; 69 70 static kauth_listener_t resource_listener; 71 static struct sysctllog *proc_sysctllog; 72 73 static int donice(struct lwp *, struct proc *, int); 74 static void sysctl_proc_setup(void); 75 76 static int 77 resource_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 78 void *arg0, void *arg1, void *arg2, void *arg3) 79 { 80 struct proc *p; 81 int result; 82 83 result = KAUTH_RESULT_DEFER; 84 p = arg0; 85 86 switch (action) { 87 case KAUTH_PROCESS_NICE: 88 if (kauth_cred_geteuid(cred) != kauth_cred_geteuid(p->p_cred) && 89 kauth_cred_getuid(cred) != kauth_cred_geteuid(p->p_cred)) { 90 break; 91 } 92 93 if ((u_long)arg1 >= p->p_nice) 94 result = KAUTH_RESULT_ALLOW; 95 96 break; 97 98 case KAUTH_PROCESS_RLIMIT: { 99 enum kauth_process_req req; 100 101 req = (enum kauth_process_req)(uintptr_t)arg1; 102 103 switch (req) { 104 case KAUTH_REQ_PROCESS_RLIMIT_GET: 105 result = KAUTH_RESULT_ALLOW; 106 break; 107 108 case KAUTH_REQ_PROCESS_RLIMIT_SET: { 109 struct rlimit *new_rlimit; 110 u_long which; 111 112 if ((p != curlwp->l_proc) && 113 (proc_uidmatch(cred, p->p_cred) != 0)) 114 break; 115 116 new_rlimit = arg2; 117 which = (u_long)arg3; 118 119 if (new_rlimit->rlim_max <= p->p_rlimit[which].rlim_max) 120 result = KAUTH_RESULT_ALLOW; 121 122 break; 123 } 124 125 default: 126 break; 127 } 128 129 break; 130 } 131 132 default: 133 break; 134 } 135 136 return result; 137 } 138 139 void 140 resource_init(void) 141 { 142 143 resource_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS, 144 resource_listener_cb, NULL); 145 146 sysctl_proc_setup(); 147 } 148 149 /* 150 * Resource controls and accounting. 151 */ 152 153 int 154 sys_getpriority(struct lwp *l, const struct sys_getpriority_args *uap, 155 register_t *retval) 156 { 157 /* { 158 syscallarg(int) which; 159 syscallarg(id_t) who; 160 } */ 161 struct proc *curp = l->l_proc, *p; 162 id_t who = SCARG(uap, who); 163 int low = NZERO + PRIO_MAX + 1; 164 165 mutex_enter(&proc_lock); 166 switch (SCARG(uap, which)) { 167 case PRIO_PROCESS: 168 p = who ? proc_find(who) : curp; 169 if (p != NULL) 170 low = p->p_nice; 171 break; 172 173 case PRIO_PGRP: { 174 struct pgrp *pg; 175 176 if (who == 0) 177 pg = curp->p_pgrp; 178 else if ((pg = pgrp_find(who)) == NULL) 179 break; 180 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 181 if (p->p_nice < low) 182 low = p->p_nice; 183 } 184 break; 185 } 186 187 case PRIO_USER: 188 if (who == 0) 189 who = (int)kauth_cred_geteuid(l->l_cred); 190 PROCLIST_FOREACH(p, &allproc) { 191 mutex_enter(p->p_lock); 192 if (kauth_cred_geteuid(p->p_cred) == 193 (uid_t)who && p->p_nice < low) 194 low = p->p_nice; 195 mutex_exit(p->p_lock); 196 } 197 break; 198 199 default: 200 mutex_exit(&proc_lock); 201 return SET_ERROR(EINVAL); 202 } 203 mutex_exit(&proc_lock); 204 205 if (low == NZERO + PRIO_MAX + 1) { 206 return SET_ERROR(ESRCH); 207 } 208 *retval = low - NZERO; 209 return 0; 210 } 211 212 int 213 sys_setpriority(struct lwp *l, const struct sys_setpriority_args *uap, 214 register_t *retval) 215 { 216 /* { 217 syscallarg(int) which; 218 syscallarg(id_t) who; 219 syscallarg(int) prio; 220 } */ 221 struct proc *curp = l->l_proc, *p; 222 id_t who = SCARG(uap, who); 223 int found = 0, error = 0; 224 225 mutex_enter(&proc_lock); 226 switch (SCARG(uap, which)) { 227 case PRIO_PROCESS: 228 p = who ? proc_find(who) : curp; 229 if (p != NULL) { 230 mutex_enter(p->p_lock); 231 found++; 232 error = donice(l, p, SCARG(uap, prio)); 233 mutex_exit(p->p_lock); 234 } 235 break; 236 237 case PRIO_PGRP: { 238 struct pgrp *pg; 239 240 if (who == 0) 241 pg = curp->p_pgrp; 242 else if ((pg = pgrp_find(who)) == NULL) 243 break; 244 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 245 mutex_enter(p->p_lock); 246 found++; 247 error = donice(l, p, SCARG(uap, prio)); 248 mutex_exit(p->p_lock); 249 if (error) 250 break; 251 } 252 break; 253 } 254 255 case PRIO_USER: 256 if (who == 0) 257 who = (int)kauth_cred_geteuid(l->l_cred); 258 PROCLIST_FOREACH(p, &allproc) { 259 mutex_enter(p->p_lock); 260 if (kauth_cred_geteuid(p->p_cred) == 261 (uid_t)SCARG(uap, who)) { 262 found++; 263 error = donice(l, p, SCARG(uap, prio)); 264 } 265 mutex_exit(p->p_lock); 266 if (error) 267 break; 268 } 269 break; 270 271 default: 272 mutex_exit(&proc_lock); 273 return SET_ERROR(EINVAL); 274 } 275 mutex_exit(&proc_lock); 276 277 return (found == 0) ? SET_ERROR(ESRCH) : error; 278 } 279 280 /* 281 * Renice a process. 282 * 283 * Call with the target process' credentials locked. 284 */ 285 static int 286 donice(struct lwp *l, struct proc *chgp, int n) 287 { 288 kauth_cred_t cred = l->l_cred; 289 290 KASSERT(mutex_owned(chgp->p_lock)); 291 292 if (kauth_cred_geteuid(cred) && kauth_cred_getuid(cred) && 293 kauth_cred_geteuid(cred) != kauth_cred_geteuid(chgp->p_cred) && 294 kauth_cred_getuid(cred) != kauth_cred_geteuid(chgp->p_cred)) 295 return SET_ERROR(EPERM); 296 297 if (n > PRIO_MAX) { 298 n = PRIO_MAX; 299 } 300 if (n < PRIO_MIN) { 301 n = PRIO_MIN; 302 } 303 n += NZERO; 304 305 if (kauth_authorize_process(cred, KAUTH_PROCESS_NICE, chgp, 306 KAUTH_ARG(n), NULL, NULL)) { 307 return SET_ERROR(EACCES); 308 } 309 310 sched_nice(chgp, n); 311 return 0; 312 } 313 314 int 315 sys_setrlimit(struct lwp *l, const struct sys_setrlimit_args *uap, 316 register_t *retval) 317 { 318 /* { 319 syscallarg(int) which; 320 syscallarg(const struct rlimit *) rlp; 321 } */ 322 int error, which = SCARG(uap, which); 323 struct rlimit alim; 324 325 error = copyin(SCARG(uap, rlp), &alim, sizeof(struct rlimit)); 326 if (error) { 327 return error; 328 } 329 return dosetrlimit(l, l->l_proc, which, &alim); 330 } 331 332 int 333 dosetrlimit(struct lwp *l, struct proc *p, int which, struct rlimit *limp) 334 { 335 struct rlimit *alimp; 336 int error; 337 338 if ((u_int)which >= RLIM_NLIMITS) 339 return SET_ERROR(EINVAL); 340 341 if (limp->rlim_cur > limp->rlim_max) { 342 /* 343 * This is programming error. According to SUSv2, we should 344 * return error in this case. 345 */ 346 return SET_ERROR(EINVAL); 347 } 348 349 alimp = &p->p_rlimit[which]; 350 /* if we don't change the value, no need to limcopy() */ 351 if (limp->rlim_cur == alimp->rlim_cur && 352 limp->rlim_max == alimp->rlim_max) 353 return 0; 354 355 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_RLIMIT, 356 p, KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_SET), limp, KAUTH_ARG(which)); 357 if (error) 358 return error; 359 360 lim_privatise(p); 361 /* p->p_limit is now unchangeable */ 362 alimp = &p->p_rlimit[which]; 363 364 switch (which) { 365 366 case RLIMIT_DATA: 367 if (limp->rlim_cur > maxdmap) 368 limp->rlim_cur = maxdmap; 369 if (limp->rlim_max > maxdmap) 370 limp->rlim_max = maxdmap; 371 break; 372 373 case RLIMIT_STACK: 374 if (limp->rlim_cur > maxsmap) 375 limp->rlim_cur = maxsmap; 376 if (limp->rlim_max > maxsmap) 377 limp->rlim_max = maxsmap; 378 379 /* 380 * Return EINVAL if the new stack size limit is lower than 381 * current usage. Otherwise, the process would get SIGSEGV the 382 * moment it would try to access anything on its current stack. 383 * This conforms to SUSv2. 384 */ 385 if (btoc(limp->rlim_cur) < p->p_vmspace->vm_ssize || 386 btoc(limp->rlim_max) < p->p_vmspace->vm_ssize) { 387 return SET_ERROR(EINVAL); 388 } 389 390 /* 391 * Stack is allocated to the max at exec time with 392 * only "rlim_cur" bytes accessible (In other words, 393 * allocates stack dividing two contiguous regions at 394 * "rlim_cur" bytes boundary). 395 * 396 * Since allocation is done in terms of page, roundup 397 * "rlim_cur" (otherwise, contiguous regions 398 * overlap). If stack limit is going up make more 399 * accessible, if going down make inaccessible. 400 */ 401 limp->rlim_max = round_page(limp->rlim_max); 402 limp->rlim_cur = round_page(limp->rlim_cur); 403 if (limp->rlim_cur != alimp->rlim_cur) { 404 vaddr_t addr; 405 vsize_t size; 406 vm_prot_t prot; 407 char *base, *tmp; 408 409 base = p->p_vmspace->vm_minsaddr; 410 if (limp->rlim_cur > alimp->rlim_cur) { 411 prot = VM_PROT_READ | VM_PROT_WRITE; 412 size = limp->rlim_cur - alimp->rlim_cur; 413 tmp = STACK_GROW(base, alimp->rlim_cur); 414 } else { 415 prot = VM_PROT_NONE; 416 size = alimp->rlim_cur - limp->rlim_cur; 417 tmp = STACK_GROW(base, limp->rlim_cur); 418 } 419 addr = (vaddr_t)STACK_ALLOC(tmp, size); 420 (void) uvm_map_protect(&p->p_vmspace->vm_map, 421 addr, addr + size, prot, false); 422 } 423 break; 424 425 case RLIMIT_NOFILE: 426 if (limp->rlim_cur > maxfiles) 427 limp->rlim_cur = maxfiles; 428 if (limp->rlim_max > maxfiles) 429 limp->rlim_max = maxfiles; 430 break; 431 432 case RLIMIT_NPROC: 433 if (limp->rlim_cur > maxproc) 434 limp->rlim_cur = maxproc; 435 if (limp->rlim_max > maxproc) 436 limp->rlim_max = maxproc; 437 break; 438 439 case RLIMIT_NTHR: 440 if (limp->rlim_cur > maxlwp) 441 limp->rlim_cur = maxlwp; 442 if (limp->rlim_max > maxlwp) 443 limp->rlim_max = maxlwp; 444 break; 445 } 446 447 mutex_enter(&p->p_limit->pl_lock); 448 *alimp = *limp; 449 mutex_exit(&p->p_limit->pl_lock); 450 return 0; 451 } 452 453 int 454 sys_getrlimit(struct lwp *l, const struct sys_getrlimit_args *uap, 455 register_t *retval) 456 { 457 /* { 458 syscallarg(int) which; 459 syscallarg(struct rlimit *) rlp; 460 } */ 461 struct proc *p = l->l_proc; 462 int which = SCARG(uap, which); 463 struct rlimit rl; 464 465 if ((u_int)which >= RLIM_NLIMITS) 466 return SET_ERROR(EINVAL); 467 468 mutex_enter(p->p_lock); 469 memcpy(&rl, &p->p_rlimit[which], sizeof(rl)); 470 mutex_exit(p->p_lock); 471 472 return copyout(&rl, SCARG(uap, rlp), sizeof(rl)); 473 } 474 475 void 476 addrulwp(struct lwp *l, struct bintime *tm) 477 { 478 479 lwp_lock(l); 480 bintime_add(tm, &l->l_rtime); 481 if ((l->l_pflag & LP_RUNNING) != 0 && 482 (l->l_pflag & (LP_INTR | LP_TIMEINTR)) != LP_INTR) { 483 struct bintime diff; 484 /* 485 * Adjust for the current time slice. This is 486 * actually fairly important since the error 487 * here is on the order of a time quantum, 488 * which is much greater than the sampling 489 * error. 490 */ 491 binuptime(&diff); 492 membar_consumer(); /* for softint_dispatch() */ 493 bintime_sub(&diff, &l->l_stime); 494 bintime_add(tm, &diff); 495 } 496 lwp_unlock(l); 497 } 498 499 /* 500 * Transform the running time and tick information in proc p into user, 501 * system, and interrupt time usage. 502 * 503 * Should be called with p->p_lock held unless called from exit1(). 504 */ 505 void 506 calcru(struct proc *p, struct timeval *up, struct timeval *sp, 507 struct timeval *ip, struct timeval *rp) 508 { 509 uint64_t u, st, ut, it, tot, dt; 510 struct lwp *l; 511 struct bintime tm; 512 struct timeval tv; 513 514 KASSERT(p->p_stat == SDEAD || mutex_owned(p->p_lock)); 515 516 mutex_spin_enter(&p->p_stmutex); 517 st = p->p_sticks; 518 ut = p->p_uticks; 519 it = p->p_iticks; 520 mutex_spin_exit(&p->p_stmutex); 521 522 tm = p->p_rtime; 523 524 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 525 addrulwp(l, &tm); 526 } 527 528 tot = st + ut + it; 529 bintime2timeval(&tm, &tv); 530 u = (uint64_t)tv.tv_sec * 1000000ul + tv.tv_usec; 531 532 if (tot == 0) { 533 /* No ticks, so can't use to share time out, split 50-50 */ 534 st = ut = u / 2; 535 } else { 536 st = (u * st) / tot; 537 ut = (u * ut) / tot; 538 } 539 540 /* 541 * Try to avoid lying to the users (too much) 542 * 543 * Of course, user/sys time are based on sampling (ie: statistics) 544 * so that would be impossible, but convincing the mark 545 * that we have used less ?time this call than we had 546 * last time, is beyond reasonable... (the con fails!) 547 * 548 * Note that since actual used time cannot decrease, either 549 * utime or stime (or both) must be greater now than last time 550 * (or both the same) - if one seems to have decreased, hold 551 * it constant and steal the necessary bump from the other 552 * which must have increased. 553 */ 554 if (p->p_xutime > ut) { 555 dt = p->p_xutime - ut; 556 st -= uimin(dt, st); 557 ut = p->p_xutime; 558 } else if (p->p_xstime > st) { 559 dt = p->p_xstime - st; 560 ut -= uimin(dt, ut); 561 st = p->p_xstime; 562 } 563 564 if (sp != NULL) { 565 p->p_xstime = st; 566 sp->tv_sec = st / 1000000; 567 sp->tv_usec = st % 1000000; 568 } 569 if (up != NULL) { 570 p->p_xutime = ut; 571 up->tv_sec = ut / 1000000; 572 up->tv_usec = ut % 1000000; 573 } 574 if (ip != NULL) { 575 if (it != 0) /* it != 0 --> tot != 0 */ 576 it = (u * it) / tot; 577 ip->tv_sec = it / 1000000; 578 ip->tv_usec = it % 1000000; 579 } 580 if (rp != NULL) { 581 *rp = tv; 582 } 583 } 584 585 int 586 sys___getrusage50(struct lwp *l, const struct sys___getrusage50_args *uap, 587 register_t *retval) 588 { 589 /* { 590 syscallarg(int) who; 591 syscallarg(struct rusage *) rusage; 592 } */ 593 int error; 594 struct rusage ru; 595 struct proc *p = l->l_proc; 596 597 error = getrusage1(p, SCARG(uap, who), &ru); 598 if (error != 0) 599 return error; 600 601 return copyout(&ru, SCARG(uap, rusage), sizeof(ru)); 602 } 603 604 int 605 getrusage1(struct proc *p, int who, struct rusage *ru) 606 { 607 608 switch (who) { 609 case RUSAGE_SELF: 610 mutex_enter(p->p_lock); 611 ruspace(p); 612 memcpy(ru, &p->p_stats->p_ru, sizeof(*ru)); 613 calcru(p, &ru->ru_utime, &ru->ru_stime, NULL, NULL); 614 rulwps(p, ru); 615 mutex_exit(p->p_lock); 616 break; 617 case RUSAGE_CHILDREN: 618 mutex_enter(p->p_lock); 619 memcpy(ru, &p->p_stats->p_cru, sizeof(*ru)); 620 mutex_exit(p->p_lock); 621 break; 622 default: 623 return SET_ERROR(EINVAL); 624 } 625 626 return 0; 627 } 628 629 void 630 ruspace(struct proc *p) 631 { 632 struct vmspace *vm = p->p_vmspace; 633 struct rusage *ru = &p->p_stats->p_ru; 634 635 ru->ru_ixrss = vm->vm_tsize << (PAGE_SHIFT - 10); 636 ru->ru_idrss = vm->vm_dsize << (PAGE_SHIFT - 10); 637 ru->ru_isrss = vm->vm_ssize << (PAGE_SHIFT - 10); 638 #ifdef __HAVE_NO_PMAP_STATS 639 /* We don't keep track of the max so we get the current */ 640 ru->ru_maxrss = vm_resident_count(vm) << (PAGE_SHIFT - 10); 641 #else 642 ru->ru_maxrss = vm->vm_rssmax << (PAGE_SHIFT - 10); 643 #endif 644 } 645 646 void 647 ruadd(struct rusage *ru, struct rusage *ru2) 648 { 649 long *ip, *ip2; 650 int i; 651 652 timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime); 653 timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime); 654 if (ru->ru_maxrss < ru2->ru_maxrss) 655 ru->ru_maxrss = ru2->ru_maxrss; 656 ip = &ru->ru_first; ip2 = &ru2->ru_first; 657 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) 658 *ip++ += *ip2++; 659 } 660 661 void 662 rulwps(proc_t *p, struct rusage *ru) 663 { 664 lwp_t *l; 665 666 KASSERT(mutex_owned(p->p_lock)); 667 668 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 669 ruadd(ru, &l->l_ru); 670 } 671 } 672 673 /* 674 * lim_copy: make a copy of the plimit structure. 675 * 676 * We use copy-on-write after fork, and copy when a limit is changed. 677 */ 678 struct plimit * 679 lim_copy(struct plimit *lim) 680 { 681 struct plimit *newlim; 682 char *corename; 683 size_t alen, len; 684 685 newlim = kmem_alloc(sizeof(*newlim), KM_SLEEP); 686 mutex_init(&newlim->pl_lock, MUTEX_DEFAULT, IPL_NONE); 687 newlim->pl_writeable = false; 688 newlim->pl_refcnt = 1; 689 newlim->pl_sv_limit = NULL; 690 691 mutex_enter(&lim->pl_lock); 692 memcpy(newlim->pl_rlimit, lim->pl_rlimit, 693 sizeof(struct rlimit) * RLIM_NLIMITS); 694 695 /* 696 * Note: the common case is a use of default core name. 697 */ 698 alen = 0; 699 corename = NULL; 700 for (;;) { 701 if (lim->pl_corename == defcorename) { 702 newlim->pl_corename = defcorename; 703 newlim->pl_cnlen = 0; 704 break; 705 } 706 len = lim->pl_cnlen; 707 if (len == alen) { 708 newlim->pl_corename = corename; 709 newlim->pl_cnlen = len; 710 memcpy(corename, lim->pl_corename, len); 711 corename = NULL; 712 break; 713 } 714 mutex_exit(&lim->pl_lock); 715 if (corename) { 716 kmem_free(corename, alen); 717 } 718 alen = len; 719 corename = kmem_alloc(alen, KM_SLEEP); 720 mutex_enter(&lim->pl_lock); 721 } 722 mutex_exit(&lim->pl_lock); 723 724 if (corename) { 725 kmem_free(corename, alen); 726 } 727 return newlim; 728 } 729 730 void 731 lim_addref(struct plimit *lim) 732 { 733 atomic_inc_uint(&lim->pl_refcnt); 734 } 735 736 /* 737 * lim_privatise: give a process its own private plimit structure. 738 */ 739 void 740 lim_privatise(proc_t *p) 741 { 742 struct plimit *lim = p->p_limit, *newlim; 743 744 if (lim->pl_writeable) { 745 return; 746 } 747 748 newlim = lim_copy(lim); 749 750 mutex_enter(p->p_lock); 751 if (p->p_limit->pl_writeable) { 752 /* Other thread won the race. */ 753 mutex_exit(p->p_lock); 754 lim_free(newlim); 755 return; 756 } 757 758 /* 759 * Since p->p_limit can be accessed without locked held, 760 * old limit structure must not be deleted yet. 761 */ 762 newlim->pl_sv_limit = p->p_limit; 763 newlim->pl_writeable = true; 764 p->p_limit = newlim; 765 mutex_exit(p->p_lock); 766 } 767 768 void 769 lim_setcorename(proc_t *p, char *name, size_t len) 770 { 771 struct plimit *lim; 772 char *oname; 773 size_t olen; 774 775 lim_privatise(p); 776 lim = p->p_limit; 777 778 mutex_enter(&lim->pl_lock); 779 oname = lim->pl_corename; 780 olen = lim->pl_cnlen; 781 lim->pl_corename = name; 782 lim->pl_cnlen = len; 783 mutex_exit(&lim->pl_lock); 784 785 if (oname != defcorename) { 786 kmem_free(oname, olen); 787 } 788 } 789 790 void 791 lim_free(struct plimit *lim) 792 { 793 struct plimit *sv_lim; 794 795 do { 796 membar_release(); 797 if (atomic_dec_uint_nv(&lim->pl_refcnt) > 0) { 798 return; 799 } 800 membar_acquire(); 801 if (lim->pl_corename != defcorename) { 802 kmem_free(lim->pl_corename, lim->pl_cnlen); 803 } 804 sv_lim = lim->pl_sv_limit; 805 mutex_destroy(&lim->pl_lock); 806 kmem_free(lim, sizeof(*lim)); 807 } while ((lim = sv_lim) != NULL); 808 } 809 810 struct pstats * 811 pstatscopy(struct pstats *ps) 812 { 813 struct pstats *nps; 814 size_t len; 815 816 nps = kmem_alloc(sizeof(*nps), KM_SLEEP); 817 818 len = (char *)&nps->pstat_endzero - (char *)&nps->pstat_startzero; 819 memset(&nps->pstat_startzero, 0, len); 820 821 len = (char *)&nps->pstat_endcopy - (char *)&nps->pstat_startcopy; 822 memcpy(&nps->pstat_startcopy, &ps->pstat_startcopy, len); 823 824 return nps; 825 } 826 827 void 828 pstatsfree(struct pstats *ps) 829 { 830 831 kmem_free(ps, sizeof(*ps)); 832 } 833 834 /* 835 * sysctl_proc_findproc: a routine for sysctl proc subtree helpers that 836 * need to pick a valid process by PID. 837 * 838 * => Hold a reference on the process, on success. 839 */ 840 static int 841 sysctl_proc_findproc(lwp_t *l, pid_t pid, proc_t **p2) 842 { 843 proc_t *p; 844 int error; 845 846 if (pid == PROC_CURPROC) { 847 p = l->l_proc; 848 } else { 849 mutex_enter(&proc_lock); 850 p = proc_find(pid); 851 if (p == NULL) { 852 mutex_exit(&proc_lock); 853 return SET_ERROR(ESRCH); 854 } 855 } 856 error = rw_tryenter(&p->p_reflock, RW_READER) ? 0 : SET_ERROR(EBUSY); 857 if (pid != PROC_CURPROC) { 858 mutex_exit(&proc_lock); 859 } 860 *p2 = p; 861 return error; 862 } 863 864 /* 865 * sysctl_proc_paxflags: helper routine to get process's paxctl flags 866 */ 867 static int 868 sysctl_proc_paxflags(SYSCTLFN_ARGS) 869 { 870 struct proc *p; 871 struct sysctlnode node; 872 int paxflags; 873 int error; 874 875 /* First, validate the request. */ 876 if (namelen != 0 || name[-1] != PROC_PID_PAXFLAGS) 877 return SET_ERROR(EINVAL); 878 879 /* Find the process. Hold a reference (p_reflock), if found. */ 880 error = sysctl_proc_findproc(l, (pid_t)name[-2], &p); 881 if (error) 882 return error; 883 884 /* XXX-elad */ 885 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, p, 886 KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL); 887 if (error) { 888 rw_exit(&p->p_reflock); 889 return error; 890 } 891 892 /* Retrieve the limits. */ 893 node = *rnode; 894 paxflags = p->p_pax; 895 node.sysctl_data = &paxflags; 896 897 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 898 899 /* If attempting to write new value, it's an error */ 900 if (error == 0 && newp != NULL) 901 error = SET_ERROR(EACCES); 902 903 rw_exit(&p->p_reflock); 904 return error; 905 } 906 907 /* 908 * sysctl_proc_corename: helper routine to get or set the core file name 909 * for a process specified by PID. 910 */ 911 static int 912 sysctl_proc_corename(SYSCTLFN_ARGS) 913 { 914 struct proc *p; 915 struct plimit *lim; 916 char *cnbuf, *cname; 917 struct sysctlnode node; 918 size_t len; 919 int error; 920 921 /* First, validate the request. */ 922 if (namelen != 0 || name[-1] != PROC_PID_CORENAME) 923 return SET_ERROR(EINVAL); 924 925 /* Find the process. Hold a reference (p_reflock), if found. */ 926 error = sysctl_proc_findproc(l, (pid_t)name[-2], &p); 927 if (error) 928 return error; 929 930 /* XXX-elad */ 931 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, p, 932 KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL); 933 if (error) { 934 rw_exit(&p->p_reflock); 935 return error; 936 } 937 938 cnbuf = PNBUF_GET(); 939 940 if (oldp) { 941 /* Get case: copy the core name into the buffer. */ 942 error = kauth_authorize_process(l->l_cred, 943 KAUTH_PROCESS_CORENAME, p, 944 KAUTH_ARG(KAUTH_REQ_PROCESS_CORENAME_GET), NULL, NULL); 945 if (error) { 946 goto done; 947 } 948 lim = p->p_limit; 949 mutex_enter(&lim->pl_lock); 950 strlcpy(cnbuf, lim->pl_corename, MAXPATHLEN); 951 mutex_exit(&lim->pl_lock); 952 } 953 954 node = *rnode; 955 node.sysctl_data = cnbuf; 956 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 957 958 /* Return if error, or if caller is only getting the core name. */ 959 if (error || newp == NULL) { 960 goto done; 961 } 962 963 /* 964 * Set case. Check permission and then validate new core name. 965 * It must be either "core", "/core", or end in ".core". 966 */ 967 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CORENAME, 968 p, KAUTH_ARG(KAUTH_REQ_PROCESS_CORENAME_SET), cnbuf, NULL); 969 if (error) { 970 goto done; 971 } 972 len = strlen(cnbuf); 973 if ((len < 4 || strcmp(cnbuf + len - 4, "core") != 0) || 974 (len > 4 && cnbuf[len - 5] != '/' && cnbuf[len - 5] != '.')) { 975 error = SET_ERROR(EINVAL); 976 goto done; 977 } 978 979 /* Allocate, copy and set the new core name for plimit structure. */ 980 cname = kmem_alloc(++len, KM_NOSLEEP); 981 if (cname == NULL) { 982 error = SET_ERROR(ENOMEM); 983 goto done; 984 } 985 memcpy(cname, cnbuf, len); 986 lim_setcorename(p, cname, len); 987 done: 988 rw_exit(&p->p_reflock); 989 PNBUF_PUT(cnbuf); 990 return error; 991 } 992 993 /* 994 * sysctl_proc_stop: helper routine for checking/setting the stop flags. 995 */ 996 static int 997 sysctl_proc_stop(SYSCTLFN_ARGS) 998 { 999 struct proc *p; 1000 int isset, flag, error = 0; 1001 struct sysctlnode node; 1002 1003 if (namelen != 0) 1004 return SET_ERROR(EINVAL); 1005 1006 /* Find the process. Hold a reference (p_reflock), if found. */ 1007 error = sysctl_proc_findproc(l, (pid_t)name[-2], &p); 1008 if (error) 1009 return error; 1010 1011 /* XXX-elad */ 1012 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, p, 1013 KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL); 1014 if (error) { 1015 goto out; 1016 } 1017 1018 /* Determine the flag. */ 1019 switch (rnode->sysctl_num) { 1020 case PROC_PID_STOPFORK: 1021 flag = PS_STOPFORK; 1022 break; 1023 case PROC_PID_STOPEXEC: 1024 flag = PS_STOPEXEC; 1025 break; 1026 case PROC_PID_STOPEXIT: 1027 flag = PS_STOPEXIT; 1028 break; 1029 default: 1030 error = SET_ERROR(EINVAL); 1031 goto out; 1032 } 1033 isset = (p->p_flag & flag) ? 1 : 0; 1034 node = *rnode; 1035 node.sysctl_data = &isset; 1036 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1037 1038 /* Return if error, or if callers is only getting the flag. */ 1039 if (error || newp == NULL) { 1040 goto out; 1041 } 1042 1043 /* Check if caller can set the flags. */ 1044 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_STOPFLAG, 1045 p, KAUTH_ARG(flag), NULL, NULL); 1046 if (error) { 1047 goto out; 1048 } 1049 mutex_enter(p->p_lock); 1050 if (isset) { 1051 p->p_sflag |= flag; 1052 } else { 1053 p->p_sflag &= ~flag; 1054 } 1055 mutex_exit(p->p_lock); 1056 out: 1057 rw_exit(&p->p_reflock); 1058 return error; 1059 } 1060 1061 /* 1062 * sysctl_proc_plimit: helper routine to get/set rlimits of a process. 1063 */ 1064 static int 1065 sysctl_proc_plimit(SYSCTLFN_ARGS) 1066 { 1067 struct proc *p; 1068 u_int limitno; 1069 int which, error = 0; 1070 struct rlimit alim; 1071 struct sysctlnode node; 1072 1073 if (namelen != 0) 1074 return SET_ERROR(EINVAL); 1075 1076 which = name[-1]; 1077 if (which != PROC_PID_LIMIT_TYPE_SOFT && 1078 which != PROC_PID_LIMIT_TYPE_HARD) 1079 return SET_ERROR(EINVAL); 1080 1081 limitno = name[-2] - 1; 1082 if (limitno >= RLIM_NLIMITS) 1083 return SET_ERROR(EINVAL); 1084 1085 if (name[-3] != PROC_PID_LIMIT) 1086 return SET_ERROR(EINVAL); 1087 1088 /* Find the process. Hold a reference (p_reflock), if found. */ 1089 error = sysctl_proc_findproc(l, (pid_t)name[-4], &p); 1090 if (error) 1091 return error; 1092 1093 /* XXX-elad */ 1094 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, p, 1095 KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL); 1096 if (error) 1097 goto out; 1098 1099 /* Check if caller can retrieve the limits. */ 1100 if (newp == NULL) { 1101 error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_RLIMIT, 1102 p, KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_GET), &alim, 1103 KAUTH_ARG(which)); 1104 if (error) 1105 goto out; 1106 } 1107 1108 /* Retrieve the limits. */ 1109 node = *rnode; 1110 memcpy(&alim, &p->p_rlimit[limitno], sizeof(alim)); 1111 if (which == PROC_PID_LIMIT_TYPE_HARD) { 1112 node.sysctl_data = &alim.rlim_max; 1113 } else { 1114 node.sysctl_data = &alim.rlim_cur; 1115 } 1116 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1117 1118 /* Return if error, or if we are only retrieving the limits. */ 1119 if (error || newp == NULL) { 1120 goto out; 1121 } 1122 error = dosetrlimit(l, p, limitno, &alim); 1123 out: 1124 rw_exit(&p->p_reflock); 1125 return error; 1126 } 1127 1128 /* 1129 * Setup sysctl nodes. 1130 */ 1131 static void 1132 sysctl_proc_setup(void) 1133 { 1134 1135 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, 1136 CTLFLAG_PERMANENT|CTLFLAG_ANYNUMBER, 1137 CTLTYPE_NODE, "curproc", 1138 SYSCTL_DESCR("Per-process settings"), 1139 NULL, 0, NULL, 0, 1140 CTL_PROC, PROC_CURPROC, CTL_EOL); 1141 1142 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, 1143 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 1144 CTLTYPE_INT, "paxflags", 1145 SYSCTL_DESCR("Process PAX control flags"), 1146 sysctl_proc_paxflags, 0, NULL, 0, 1147 CTL_PROC, PROC_CURPROC, PROC_PID_PAXFLAGS, CTL_EOL); 1148 1149 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, 1150 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 1151 CTLTYPE_STRING, "corename", 1152 SYSCTL_DESCR("Core file name"), 1153 sysctl_proc_corename, 0, NULL, MAXPATHLEN, 1154 CTL_PROC, PROC_CURPROC, PROC_PID_CORENAME, CTL_EOL); 1155 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, 1156 CTLFLAG_PERMANENT, 1157 CTLTYPE_NODE, "rlimit", 1158 SYSCTL_DESCR("Process limits"), 1159 NULL, 0, NULL, 0, 1160 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, CTL_EOL); 1161 1162 #define create_proc_plimit(s, n) do { \ 1163 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, \ 1164 CTLFLAG_PERMANENT, \ 1165 CTLTYPE_NODE, s, \ 1166 SYSCTL_DESCR("Process " s " limits"), \ 1167 NULL, 0, NULL, 0, \ 1168 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 1169 CTL_EOL); \ 1170 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, \ 1171 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, \ 1172 CTLTYPE_QUAD, "soft", \ 1173 SYSCTL_DESCR("Process soft " s " limit"), \ 1174 sysctl_proc_plimit, 0, NULL, 0, \ 1175 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 1176 PROC_PID_LIMIT_TYPE_SOFT, CTL_EOL); \ 1177 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, \ 1178 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, \ 1179 CTLTYPE_QUAD, "hard", \ 1180 SYSCTL_DESCR("Process hard " s " limit"), \ 1181 sysctl_proc_plimit, 0, NULL, 0, \ 1182 CTL_PROC, PROC_CURPROC, PROC_PID_LIMIT, n, \ 1183 PROC_PID_LIMIT_TYPE_HARD, CTL_EOL); \ 1184 } while (0/*CONSTCOND*/) 1185 1186 create_proc_plimit("cputime", PROC_PID_LIMIT_CPU); 1187 create_proc_plimit("filesize", PROC_PID_LIMIT_FSIZE); 1188 create_proc_plimit("datasize", PROC_PID_LIMIT_DATA); 1189 create_proc_plimit("stacksize", PROC_PID_LIMIT_STACK); 1190 create_proc_plimit("coredumpsize", PROC_PID_LIMIT_CORE); 1191 create_proc_plimit("memoryuse", PROC_PID_LIMIT_RSS); 1192 create_proc_plimit("memorylocked", PROC_PID_LIMIT_MEMLOCK); 1193 create_proc_plimit("maxproc", PROC_PID_LIMIT_NPROC); 1194 create_proc_plimit("descriptors", PROC_PID_LIMIT_NOFILE); 1195 create_proc_plimit("sbsize", PROC_PID_LIMIT_SBSIZE); 1196 create_proc_plimit("vmemoryuse", PROC_PID_LIMIT_AS); 1197 create_proc_plimit("maxlwp", PROC_PID_LIMIT_NTHR); 1198 1199 #undef create_proc_plimit 1200 1201 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, 1202 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 1203 CTLTYPE_INT, "stopfork", 1204 SYSCTL_DESCR("Stop process at fork(2)"), 1205 sysctl_proc_stop, 0, NULL, 0, 1206 CTL_PROC, PROC_CURPROC, PROC_PID_STOPFORK, CTL_EOL); 1207 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, 1208 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 1209 CTLTYPE_INT, "stopexec", 1210 SYSCTL_DESCR("Stop process at execve(2)"), 1211 sysctl_proc_stop, 0, NULL, 0, 1212 CTL_PROC, PROC_CURPROC, PROC_PID_STOPEXEC, CTL_EOL); 1213 sysctl_createv(&proc_sysctllog, 0, NULL, NULL, 1214 CTLFLAG_PERMANENT|CTLFLAG_READWRITE|CTLFLAG_ANYWRITE, 1215 CTLTYPE_INT, "stopexit", 1216 SYSCTL_DESCR("Stop process before completing exit"), 1217 sysctl_proc_stop, 0, NULL, 0, 1218 CTL_PROC, PROC_CURPROC, PROC_PID_STOPEXIT, CTL_EOL); 1219 } 1220