1 /* $NetBSD: kern_exec.c,v 1.531 2025/07/16 19:14:13 kre Exp $ */ 2 3 /*- 4 * Copyright (c) 2008, 2019, 2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (C) 1993, 1994, 1996 Christopher G. Demetriou 34 * Copyright (C) 1992 Wolfgang Solfrank. 35 * Copyright (C) 1992 TooLs GmbH. 36 * All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by TooLs GmbH. 49 * 4. The name of TooLs GmbH may not be used to endorse or promote products 50 * derived from this software without specific prior written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 53 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 54 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 55 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 57 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 58 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 59 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 60 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 61 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 */ 63 64 #include <sys/cdefs.h> 65 __KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.531 2025/07/16 19:14:13 kre Exp $"); 66 67 #include "opt_exec.h" 68 #include "opt_execfmt.h" 69 #include "opt_ktrace.h" 70 #include "opt_modular.h" 71 #include "opt_pax.h" 72 #include "opt_syscall_debug.h" 73 #include "veriexec.h" 74 75 #include <sys/param.h> 76 #include <sys/types.h> 77 78 #include <sys/acct.h> 79 #include <sys/atomic.h> 80 #include <sys/cprng.h> 81 #include <sys/cpu.h> 82 #include <sys/exec.h> 83 #include <sys/file.h> 84 #include <sys/filedesc.h> 85 #include <sys/futex.h> 86 #include <sys/kauth.h> 87 #include <sys/kernel.h> 88 #include <sys/kmem.h> 89 #include <sys/ktrace.h> 90 #include <sys/lwpctl.h> 91 #include <sys/mman.h> 92 #include <sys/module.h> 93 #include <sys/mount.h> 94 #include <sys/namei.h> 95 #include <sys/pax.h> 96 #include <sys/proc.h> 97 #include <sys/prot.h> 98 #include <sys/ptrace.h> 99 #include <sys/ras.h> 100 #include <sys/sdt.h> 101 #include <sys/signalvar.h> 102 #include <sys/spawn.h> 103 #include <sys/stat.h> 104 #include <sys/syscall.h> 105 #include <sys/syscallargs.h> 106 #include <sys/syscallvar.h> 107 #include <sys/systm.h> 108 #include <sys/uidinfo.h> 109 #if NVERIEXEC > 0 110 #include <sys/verified_exec.h> 111 #endif /* NVERIEXEC > 0 */ 112 #include <sys/vfs_syscalls.h> 113 #include <sys/vnode.h> 114 #include <sys/wait.h> 115 116 #include <uvm/uvm_extern.h> 117 118 #include <machine/reg.h> 119 120 #include <compat/common/compat_util.h> 121 122 #ifndef MD_TOPDOWN_INIT 123 #ifdef __USE_TOPDOWN_VM 124 #define MD_TOPDOWN_INIT(epp) (epp)->ep_flags |= EXEC_TOPDOWN_VM 125 #else 126 #define MD_TOPDOWN_INIT(epp) 127 #endif 128 #endif 129 130 struct execve_data; 131 132 extern int user_va0_disable; 133 134 static size_t calcargs(struct execve_data * restrict, const size_t); 135 static size_t calcstack(struct execve_data * restrict, const size_t); 136 static int copyoutargs(struct execve_data * restrict, struct lwp *, 137 char * const); 138 static int copyoutpsstrs(struct execve_data * restrict, struct proc *); 139 static int copyinargs(struct execve_data * restrict, char * const *, 140 char * const *, execve_fetch_element_t, char **); 141 static int copyinargstrs(struct execve_data * restrict, char * const *, 142 execve_fetch_element_t, char **, size_t *, void (*)(const void *, size_t)); 143 static int exec_sigcode_map(struct proc *, const struct emul *); 144 145 #if defined(DEBUG) && !defined(DEBUG_EXEC) 146 #define DEBUG_EXEC 147 #endif 148 #ifdef DEBUG_EXEC 149 #define DPRINTF(a) printf a 150 #define COPYPRINTF(s, a, b) printf("%s, %d: copyout%s @%p %zu\n", __func__, \ 151 __LINE__, (s), (a), (b)) 152 static void dump_vmcmds(const struct exec_package * const, size_t, int); 153 #define DUMPVMCMDS(p, x, e) do { dump_vmcmds((p), (x), (e)); } while (0) 154 #else 155 #define DPRINTF(a) 156 #define COPYPRINTF(s, a, b) 157 #define DUMPVMCMDS(p, x, e) do {} while (0) 158 #endif /* DEBUG_EXEC */ 159 160 /* 161 * DTrace SDT provider definitions 162 */ 163 SDT_PROVIDER_DECLARE(proc); 164 SDT_PROBE_DEFINE1(proc, kernel, , exec, "char *"); 165 SDT_PROBE_DEFINE1(proc, kernel, , exec__success, "char *"); 166 SDT_PROBE_DEFINE1(proc, kernel, , exec__failure, "int"); 167 168 /* 169 * Exec function switch: 170 * 171 * Note that each makecmds function is responsible for loading the 172 * exec package with the necessary functions for any exec-type-specific 173 * handling. 174 * 175 * Functions for specific exec types should be defined in their own 176 * header file. 177 */ 178 static const struct execsw **execsw = NULL; 179 static int nexecs; 180 181 u_int exec_maxhdrsz; /* must not be static - used by netbsd32 */ 182 183 /* list of dynamically loaded execsw entries */ 184 static LIST_HEAD(execlist_head, exec_entry) ex_head = 185 LIST_HEAD_INITIALIZER(ex_head); 186 struct exec_entry { 187 LIST_ENTRY(exec_entry) ex_list; 188 SLIST_ENTRY(exec_entry) ex_slist; 189 const struct execsw *ex_sw; 190 }; 191 192 #ifndef __HAVE_SYSCALL_INTERN 193 void syscall(void); 194 #endif 195 196 /* NetBSD autoloadable syscalls */ 197 #ifdef MODULAR 198 #include <kern/syscalls_autoload.c> 199 #endif 200 201 /* NetBSD emul struct */ 202 struct emul emul_netbsd = { 203 .e_name = "netbsd", 204 #ifdef EMUL_NATIVEROOT 205 .e_path = EMUL_NATIVEROOT, 206 #else 207 .e_path = NULL, 208 #endif 209 #ifndef __HAVE_MINIMAL_EMUL 210 .e_flags = EMUL_HAS_SYS___syscall, 211 .e_errno = NULL, 212 .e_nosys = SYS_syscall, 213 .e_nsysent = SYS_NSYSENT, 214 #endif 215 #ifdef MODULAR 216 .e_sc_autoload = netbsd_syscalls_autoload, 217 #endif 218 .e_sysent = sysent, 219 .e_nomodbits = sysent_nomodbits, 220 #ifdef SYSCALL_DEBUG 221 .e_syscallnames = syscallnames, 222 #else 223 .e_syscallnames = NULL, 224 #endif 225 .e_sendsig = sendsig, 226 .e_trapsignal = trapsignal, 227 .e_sigcode = NULL, 228 .e_esigcode = NULL, 229 .e_sigobject = NULL, 230 .e_setregs = setregs, 231 .e_proc_exec = NULL, 232 .e_proc_fork = NULL, 233 .e_proc_exit = NULL, 234 .e_lwp_fork = NULL, 235 .e_lwp_exit = NULL, 236 #ifdef __HAVE_SYSCALL_INTERN 237 .e_syscall_intern = syscall_intern, 238 #else 239 .e_syscall = syscall, 240 #endif 241 .e_sysctlovly = NULL, 242 .e_vm_default_addr = uvm_default_mapaddr, 243 .e_usertrap = NULL, 244 .e_ucsize = sizeof(ucontext_t), 245 .e_startlwp = startlwp 246 }; 247 248 /* 249 * Exec lock. Used to control access to execsw[] structures. 250 * This must not be static so that netbsd32 can access it, too. 251 */ 252 krwlock_t exec_lock __cacheline_aligned; 253 254 /* 255 * Data used between a loadvm and execve part of an "exec" operation 256 */ 257 struct execve_data { 258 struct exec_package ed_pack; 259 struct pathbuf *ed_pathbuf; 260 struct vattr ed_attr; 261 struct ps_strings ed_arginfo; 262 char *ed_argp; 263 const char *ed_pathstring; 264 char *ed_resolvedname; 265 size_t ed_ps_strings_sz; 266 int ed_szsigcode; 267 size_t ed_argslen; 268 long ed_argc; 269 long ed_envc; 270 }; 271 272 /* 273 * data passed from parent lwp to child during a posix_spawn() 274 */ 275 struct spawn_exec_data { 276 struct execve_data sed_exec; 277 struct posix_spawn_file_actions 278 *sed_actions; 279 struct posix_spawnattr *sed_attrs; 280 struct proc *sed_parent; 281 kcondvar_t sed_cv_child_ready; 282 kmutex_t sed_mtx_child; 283 int sed_error; 284 bool sed_child_ready; 285 volatile uint32_t sed_refcnt; 286 }; 287 288 static struct vm_map *exec_map; 289 static struct pool exec_pool; 290 291 static void * 292 exec_pool_alloc(struct pool *pp, int flags) 293 { 294 295 return (void *)uvm_km_alloc(exec_map, NCARGS, 0, 296 UVM_KMF_PAGEABLE | UVM_KMF_WAITVA); 297 } 298 299 static void 300 exec_pool_free(struct pool *pp, void *addr) 301 { 302 303 uvm_km_free(exec_map, (vaddr_t)addr, NCARGS, UVM_KMF_PAGEABLE); 304 } 305 306 static struct pool_allocator exec_palloc = { 307 .pa_alloc = exec_pool_alloc, 308 .pa_free = exec_pool_free, 309 .pa_pagesz = NCARGS 310 }; 311 312 static void 313 exec_path_free(struct execve_data *data) 314 { 315 pathbuf_stringcopy_put(data->ed_pathbuf, data->ed_pathstring); 316 pathbuf_destroy(data->ed_pathbuf); 317 if (data->ed_resolvedname) 318 PNBUF_PUT(data->ed_resolvedname); 319 } 320 321 static int 322 exec_resolvename(struct lwp *l, struct exec_package *epp, struct vnode *vp, 323 char **rpath) 324 { 325 int error; 326 char *p; 327 328 KASSERT(rpath != NULL); 329 330 *rpath = PNBUF_GET(); 331 error = vnode_to_path(*rpath, MAXPATHLEN, vp, l, l->l_proc); 332 if (error) { 333 DPRINTF(("%s: can't resolve name for %s, error %d\n", 334 __func__, epp->ep_kname, error)); 335 PNBUF_PUT(*rpath); 336 *rpath = NULL; 337 return error; 338 } 339 epp->ep_resolvedname = *rpath; 340 if ((p = strrchr(*rpath, '/')) != NULL) 341 epp->ep_kname = p + 1; 342 return 0; 343 } 344 345 346 /* 347 * check exec: 348 * given an "executable" described in the exec package's namei info, 349 * see what we can do with it. 350 * 351 * ON ENTRY: 352 * exec package with appropriate namei info 353 * lwp pointer of exec'ing lwp 354 * NO SELF-LOCKED VNODES 355 * 356 * ON EXIT: 357 * error: nothing held, etc. exec header still allocated. 358 * ok: filled exec package, executable's vnode (unlocked). 359 * 360 * EXEC SWITCH ENTRY: 361 * Locked vnode to check, exec package, proc. 362 * 363 * EXEC SWITCH EXIT: 364 * ok: return 0, filled exec package, executable's vnode (unlocked). 365 * error: destructive: 366 * everything deallocated execept exec header. 367 * non-destructive: 368 * error code, executable's vnode (unlocked), 369 * exec header unmodified. 370 */ 371 int 372 /*ARGSUSED*/ 373 check_exec(struct lwp *l, struct exec_package *epp, struct pathbuf *pb, 374 char **rpath) 375 { 376 int error, i; 377 struct vnode *vp; 378 size_t resid; 379 380 if (epp->ep_resolvedname) { 381 struct nameidata nd; 382 383 // grab the absolute pathbuf here before namei() trashes it. 384 pathbuf_copystring(pb, epp->ep_resolvedname, PATH_MAX); 385 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, pb); 386 387 /* first get the vnode */ 388 if ((error = namei(&nd)) != 0) 389 return error; 390 391 epp->ep_vp = vp = nd.ni_vp; 392 #ifdef DIAGNOSTIC 393 /* paranoia (take this out once namei stuff stabilizes) */ 394 memset(nd.ni_pnbuf, '~', PATH_MAX); 395 #endif 396 } else { 397 struct file *fp; 398 399 if ((error = fd_getvnode(epp->ep_xfd, &fp)) != 0) 400 return error; 401 epp->ep_vp = vp = fp->f_vnode; 402 vref(vp); 403 fd_putfile(epp->ep_xfd); 404 if ((error = exec_resolvename(l, epp, vp, rpath)) != 0) 405 return error; 406 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 407 } 408 409 /* check access and type */ 410 if (vp->v_type != VREG) { 411 error = SET_ERROR(EACCES); 412 goto bad1; 413 } 414 if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred)) != 0) 415 goto bad1; 416 417 /* get attributes */ 418 /* XXX VOP_GETATTR is the only thing that needs LK_EXCLUSIVE here */ 419 if ((error = VOP_GETATTR(vp, epp->ep_vap, l->l_cred)) != 0) 420 goto bad1; 421 422 /* Check mount point */ 423 if (vp->v_mount->mnt_flag & MNT_NOEXEC) { 424 error = SET_ERROR(EACCES); 425 goto bad1; 426 } 427 if (vp->v_mount->mnt_flag & MNT_NOSUID) 428 epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID); 429 430 /* try to open it */ 431 if ((error = VOP_OPEN(vp, FREAD, l->l_cred)) != 0) 432 goto bad1; 433 434 /* now we have the file, get the exec header */ 435 error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0, 436 UIO_SYSSPACE, IO_NODELOCKED, l->l_cred, &resid, NULL); 437 if (error) 438 goto bad1; 439 440 /* unlock vp, since we need it unlocked from here on out. */ 441 VOP_UNLOCK(vp); 442 443 #if NVERIEXEC > 0 444 error = veriexec_verify(l, vp, 445 epp->ep_resolvedname ? epp->ep_resolvedname : epp->ep_kname, 446 epp->ep_flags & EXEC_INDIR ? VERIEXEC_INDIRECT : VERIEXEC_DIRECT, 447 NULL); 448 if (error) 449 goto bad2; 450 #endif /* NVERIEXEC > 0 */ 451 452 #ifdef PAX_SEGVGUARD 453 error = pax_segvguard(l, vp, epp->ep_resolvedname, false); 454 if (error) 455 goto bad2; 456 #endif /* PAX_SEGVGUARD */ 457 458 epp->ep_hdrvalid = epp->ep_hdrlen - resid; 459 460 /* 461 * Set up default address space limits. Can be overridden 462 * by individual exec packages. 463 */ 464 epp->ep_vm_minaddr = exec_vm_minaddr(VM_MIN_ADDRESS); 465 epp->ep_vm_maxaddr = VM_MAXUSER_ADDRESS; 466 467 /* 468 * set up the vmcmds for creation of the process 469 * address space 470 */ 471 error = nexecs == 0 ? SET_ERROR(ENOEXEC) : ENOEXEC; 472 for (i = 0; i < nexecs; i++) { 473 int newerror; 474 475 epp->ep_esch = execsw[i]; 476 newerror = (*execsw[i]->es_makecmds)(l, epp); 477 478 if (!newerror) { 479 /* Seems ok: check that entry point is not too high */ 480 if (epp->ep_entry >= epp->ep_vm_maxaddr) { 481 #ifdef DIAGNOSTIC 482 printf("%s: rejecting %p due to " 483 "too high entry address (>= %p)\n", 484 __func__, (void *)epp->ep_entry, 485 (void *)epp->ep_vm_maxaddr); 486 #endif 487 error = SET_ERROR(ENOEXEC); 488 break; 489 } 490 /* Seems ok: check that entry point is not too low */ 491 if (epp->ep_entry < epp->ep_vm_minaddr) { 492 #ifdef DIAGNOSTIC 493 printf("%s: rejecting %p due to " 494 "too low entry address (< %p)\n", 495 __func__, (void *)epp->ep_entry, 496 (void *)epp->ep_vm_minaddr); 497 #endif 498 error = SET_ERROR(ENOEXEC); 499 break; 500 } 501 502 /* check limits */ 503 #ifdef DIAGNOSTIC 504 #define LMSG "%s: rejecting due to %s limit (%ju > %ju)\n" 505 #endif 506 #ifdef MAXTSIZ 507 if (epp->ep_tsize > MAXTSIZ) { 508 #ifdef DIAGNOSTIC 509 printf(LMSG, __func__, "text", 510 (uintmax_t)epp->ep_tsize, 511 (uintmax_t)MAXTSIZ); 512 #endif 513 error = SET_ERROR(ENOMEM); 514 break; 515 } 516 #endif 517 vsize_t dlimit = 518 (vsize_t)l->l_proc->p_rlimit[RLIMIT_DATA].rlim_cur; 519 if (epp->ep_dsize > dlimit) { 520 #ifdef DIAGNOSTIC 521 printf(LMSG, __func__, "data", 522 (uintmax_t)epp->ep_dsize, 523 (uintmax_t)dlimit); 524 #endif 525 error = SET_ERROR(ENOMEM); 526 break; 527 } 528 return 0; 529 } 530 531 /* 532 * Reset all the fields that may have been modified by the 533 * loader. 534 */ 535 KASSERT(epp->ep_emul_arg == NULL); 536 if (epp->ep_emul_root != NULL) { 537 vrele(epp->ep_emul_root); 538 epp->ep_emul_root = NULL; 539 } 540 if (epp->ep_interp != NULL) { 541 vrele(epp->ep_interp); 542 epp->ep_interp = NULL; 543 } 544 epp->ep_pax_flags = 0; 545 546 /* make sure the first "interesting" error code is saved. */ 547 if (error == ENOEXEC) 548 error = newerror; 549 550 if (epp->ep_flags & EXEC_DESTR) 551 /* Error from "#!" code, tidied up by recursive call */ 552 return error; 553 } 554 555 /* not found, error */ 556 557 /* 558 * free any vmspace-creation commands, 559 * and release their references 560 */ 561 kill_vmcmds(&epp->ep_vmcmds); 562 563 #if NVERIEXEC > 0 || defined(PAX_SEGVGUARD) 564 bad2: 565 #endif 566 /* 567 * close and release the vnode, restore the old one, free the 568 * pathname buf, and punt. 569 */ 570 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 571 VOP_CLOSE(vp, FREAD, l->l_cred); 572 vput(vp); 573 return error; 574 575 bad1: 576 /* 577 * free the namei pathname buffer, and put the vnode 578 * (which we don't yet have open). 579 */ 580 vput(vp); /* was still locked */ 581 return error; 582 } 583 584 #ifdef __MACHINE_STACK_GROWS_UP 585 #define STACK_PTHREADSPACE NBPG 586 #else 587 #define STACK_PTHREADSPACE 0 588 #endif 589 590 static int 591 execve_fetch_element(char * const *array, size_t index, char **value) 592 { 593 return copyin(array + index, value, sizeof(*value)); 594 } 595 596 /* 597 * exec system call 598 */ 599 int 600 sys_execve(struct lwp *l, const struct sys_execve_args *uap, register_t *retval) 601 { 602 /* { 603 syscallarg(const char *) path; 604 syscallarg(char * const *) argp; 605 syscallarg(char * const *) envp; 606 } */ 607 608 return execve1(l, true, SCARG(uap, path), -1, SCARG(uap, argp), 609 SCARG(uap, envp), execve_fetch_element); 610 } 611 612 int 613 sys_fexecve(struct lwp *l, const struct sys_fexecve_args *uap, 614 register_t *retval) 615 { 616 /* { 617 syscallarg(int) fd; 618 syscallarg(char * const *) argp; 619 syscallarg(char * const *) envp; 620 } */ 621 622 return execve1(l, false, NULL, SCARG(uap, fd), SCARG(uap, argp), 623 SCARG(uap, envp), execve_fetch_element); 624 } 625 626 /* 627 * Load modules to try and execute an image that we do not understand. 628 * If no execsw entries are present, we load those likely to be needed 629 * in order to run native images only. Otherwise, we autoload all 630 * possible modules that could let us run the binary. XXX lame 631 */ 632 static void 633 exec_autoload(void) 634 { 635 #ifdef MODULAR 636 static const char * const native[] = { 637 "exec_elf32", 638 "exec_elf64", 639 "exec_script", 640 NULL 641 }; 642 static const char * const compat[] = { 643 "exec_elf32", 644 "exec_elf64", 645 "exec_script", 646 "exec_aout", 647 "exec_coff", 648 "exec_ecoff", 649 "compat_aoutm68k", 650 "compat_netbsd32", 651 #if 0 652 "compat_linux", 653 "compat_linux32", 654 #endif 655 "compat_sunos", 656 "compat_sunos32", 657 "compat_ultrix", 658 NULL 659 }; 660 char const * const *list; 661 int i; 662 663 list = nexecs == 0 ? native : compat; 664 for (i = 0; list[i] != NULL; i++) { 665 if (module_autoload(list[i], MODULE_CLASS_EXEC) != 0) { 666 continue; 667 } 668 yield(); 669 } 670 #endif 671 } 672 673 /* 674 * Copy the user or kernel supplied upath to the allocated pathbuffer pbp 675 * making it absolute in the process, by prepending the current working 676 * directory if it is not. If offs is supplied it will contain the offset 677 * where the original supplied copy of upath starts. 678 */ 679 int 680 exec_makepathbuf(struct lwp *l, const char *upath, enum uio_seg seg, 681 struct pathbuf **pbp, size_t *offs) 682 { 683 char *path, *bp; 684 size_t len, tlen; 685 int error; 686 struct cwdinfo *cwdi; 687 688 path = PNBUF_GET(); 689 if (seg == UIO_SYSSPACE) { 690 error = copystr(upath, path, MAXPATHLEN, &len); 691 } else { 692 error = copyinstr(upath, path, MAXPATHLEN, &len); 693 } 694 if (error) 695 goto err; 696 697 if (path[0] == '/') { 698 if (offs) 699 *offs = 0; 700 goto out; 701 } 702 703 len++; 704 if (len + 1 >= MAXPATHLEN) { 705 error = SET_ERROR(ENAMETOOLONG); 706 goto err; 707 } 708 bp = path + MAXPATHLEN - len; 709 memmove(bp, path, len); 710 *(--bp) = '/'; 711 712 cwdi = l->l_proc->p_cwdi; 713 rw_enter(&cwdi->cwdi_lock, RW_READER); 714 error = getcwd_common(cwdi->cwdi_cdir, NULL, &bp, path, MAXPATHLEN / 2, 715 GETCWD_CHECK_ACCESS, l); 716 rw_exit(&cwdi->cwdi_lock); 717 718 if (error) 719 goto err; 720 tlen = path + MAXPATHLEN - bp; 721 722 memmove(path, bp, tlen); 723 path[tlen - 1] = '\0'; 724 if (offs) 725 *offs = tlen - len; 726 out: 727 *pbp = pathbuf_assimilate(path); 728 return 0; 729 err: 730 PNBUF_PUT(path); 731 return error; 732 } 733 734 vaddr_t 735 exec_vm_minaddr(vaddr_t va_min) 736 { 737 /* 738 * Increase va_min if we don't want NULL to be mappable by the 739 * process. 740 */ 741 #define VM_MIN_GUARD PAGE_SIZE 742 if (user_va0_disable && (va_min < VM_MIN_GUARD)) 743 return VM_MIN_GUARD; 744 return va_min; 745 } 746 747 static int 748 execve_loadvm(struct lwp *l, bool has_path, const char *path, int fd, 749 char * const *args, char * const *envs, 750 execve_fetch_element_t fetch_element, 751 struct execve_data * restrict data) 752 { 753 struct exec_package * const epp = &data->ed_pack; 754 int error; 755 struct proc *p; 756 char *dp; 757 u_int modgen; 758 759 KASSERT(data != NULL); 760 761 p = l->l_proc; 762 modgen = 0; 763 764 SDT_PROBE(proc, kernel, , exec, path, 0, 0, 0, 0); 765 766 /* 767 * Check if we have exceeded our number of processes limit. 768 * This is so that we handle the case where a root daemon 769 * forked, ran setuid to become the desired user and is trying 770 * to exec. The obvious place to do the reference counting check 771 * is setuid(), but we don't do the reference counting check there 772 * like other OS's do because then all the programs that use setuid() 773 * must be modified to check the return code of setuid() and exit(). 774 * It is dangerous to make setuid() fail, because it fails open and 775 * the program will continue to run as root. If we make it succeed 776 * and return an error code, again we are not enforcing the limit. 777 * The best place to enforce the limit is here, when the process tries 778 * to execute a new image, because eventually the process will need 779 * to call exec in order to do something useful. 780 */ 781 retry: 782 if (p->p_flag & PK_SUGID) { 783 if (kauth_authorize_process(l->l_cred, KAUTH_PROCESS_RLIMIT, 784 p, KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS), 785 &p->p_rlimit[RLIMIT_NPROC], 786 KAUTH_ARG(RLIMIT_NPROC)) != 0 && 787 chgproccnt(kauth_cred_getuid(l->l_cred), 0) > 788 p->p_rlimit[RLIMIT_NPROC].rlim_cur) 789 return SET_ERROR(EAGAIN); 790 } 791 792 /* 793 * Drain existing references and forbid new ones. The process 794 * should be left alone until we're done here. This is necessary 795 * to avoid race conditions - e.g. in ptrace() - that might allow 796 * a local user to illicitly obtain elevated privileges. 797 */ 798 rw_enter(&p->p_reflock, RW_WRITER); 799 800 if (has_path) { 801 size_t offs; 802 /* 803 * Init the namei data to point the file user's program name. 804 * This is done here rather than in check_exec(), so that it's 805 * possible to override this settings if any of makecmd/probe 806 * functions call check_exec() recursively - for example, 807 * see exec_script_makecmds(). 808 */ 809 if ((error = exec_makepathbuf(l, path, UIO_USERSPACE, 810 &data->ed_pathbuf, &offs)) != 0) 811 goto clrflg; 812 data->ed_pathstring = pathbuf_stringcopy_get(data->ed_pathbuf); 813 epp->ep_kname = data->ed_pathstring + offs; 814 data->ed_resolvedname = PNBUF_GET(); 815 epp->ep_resolvedname = data->ed_resolvedname; 816 epp->ep_xfd = -1; 817 } else { 818 data->ed_pathbuf = pathbuf_assimilate(strcpy(PNBUF_GET(), "/")); 819 data->ed_pathstring = pathbuf_stringcopy_get(data->ed_pathbuf); 820 epp->ep_kname = "*fexecve*"; 821 data->ed_resolvedname = NULL; 822 epp->ep_resolvedname = NULL; 823 epp->ep_xfd = fd; 824 } 825 826 827 /* 828 * initialize the fields of the exec package. 829 */ 830 epp->ep_hdr = kmem_alloc(exec_maxhdrsz, KM_SLEEP); 831 epp->ep_hdrlen = exec_maxhdrsz; 832 epp->ep_hdrvalid = 0; 833 epp->ep_emul_arg = NULL; 834 epp->ep_emul_arg_free = NULL; 835 memset(&epp->ep_vmcmds, 0, sizeof(epp->ep_vmcmds)); 836 epp->ep_vap = &data->ed_attr; 837 epp->ep_flags = (p->p_flag & PK_32) ? EXEC_FROM32 : 0; 838 MD_TOPDOWN_INIT(epp); 839 epp->ep_emul_root = NULL; 840 epp->ep_interp = NULL; 841 epp->ep_esch = NULL; 842 epp->ep_pax_flags = 0; 843 memset(epp->ep_machine_arch, 0, sizeof(epp->ep_machine_arch)); 844 845 rw_enter(&exec_lock, RW_READER); 846 847 /* see if we can run it. */ 848 if ((error = check_exec(l, epp, data->ed_pathbuf, 849 &data->ed_resolvedname)) != 0) { 850 if (error != ENOENT && error != EACCES && error != ENOEXEC) { 851 DPRINTF(("%s: check exec failed for %s, error %d\n", 852 __func__, epp->ep_kname, error)); 853 } 854 goto freehdr; 855 } 856 857 /* allocate an argument buffer */ 858 data->ed_argp = pool_get(&exec_pool, PR_WAITOK); 859 KASSERT(data->ed_argp != NULL); 860 dp = data->ed_argp; 861 862 if ((error = copyinargs(data, args, envs, fetch_element, &dp)) != 0) { 863 goto bad; 864 } 865 866 /* 867 * Calculate the new stack size. 868 */ 869 870 #ifdef __MACHINE_STACK_GROWS_UP 871 /* 872 * copyargs() fills argc/argv/envp from the lower address even on 873 * __MACHINE_STACK_GROWS_UP machines. Reserve a few words just below the SP 874 * so that _rtld() use it. 875 */ 876 #define RTLD_GAP 32 877 #else 878 #define RTLD_GAP 0 879 #endif 880 881 const size_t argenvstrlen = (char *)ALIGN(dp) - data->ed_argp; 882 883 data->ed_argslen = calcargs(data, argenvstrlen); 884 885 const size_t len = calcstack(data, pax_aslr_stack_gap(epp) + RTLD_GAP); 886 887 if (len > epp->ep_ssize) { 888 /* in effect, compare to initial limit */ 889 DPRINTF(("%s: stack limit exceeded %zu\n", __func__, len)); 890 error = SET_ERROR(ENOMEM); 891 goto bad; 892 } 893 /* adjust "active stack depth" for process VSZ */ 894 epp->ep_ssize = len; 895 896 return 0; 897 898 bad: 899 /* free the vmspace-creation commands, and release their references */ 900 kill_vmcmds(&epp->ep_vmcmds); 901 /* kill any opened file descriptor, if necessary */ 902 if (epp->ep_flags & EXEC_HASFD) { 903 epp->ep_flags &= ~EXEC_HASFD; 904 fd_close(epp->ep_fd); 905 } 906 /* close and put the exec'd file */ 907 vn_lock(epp->ep_vp, LK_EXCLUSIVE | LK_RETRY); 908 VOP_CLOSE(epp->ep_vp, FREAD, l->l_cred); 909 vput(epp->ep_vp); 910 pool_put(&exec_pool, data->ed_argp); 911 912 freehdr: 913 kmem_free(epp->ep_hdr, epp->ep_hdrlen); 914 if (epp->ep_emul_root != NULL) 915 vrele(epp->ep_emul_root); 916 if (epp->ep_interp != NULL) 917 vrele(epp->ep_interp); 918 919 rw_exit(&exec_lock); 920 921 exec_path_free(data); 922 923 clrflg: 924 rw_exit(&p->p_reflock); 925 926 if (modgen != module_gen && error == ENOEXEC) { 927 modgen = module_gen; 928 exec_autoload(); 929 goto retry; 930 } 931 932 SDT_PROBE(proc, kernel, , exec__failure, error, 0, 0, 0, 0); 933 return error; 934 } 935 936 static int 937 execve_dovmcmds(struct lwp *l, struct execve_data * restrict data) 938 { 939 struct exec_package * const epp = &data->ed_pack; 940 struct proc *p = l->l_proc; 941 struct exec_vmcmd *base_vcp; 942 int error = 0; 943 size_t i; 944 945 /* record proc's vnode, for use by procfs and others */ 946 if (p->p_textvp) 947 vrele(p->p_textvp); 948 vref(epp->ep_vp); 949 p->p_textvp = epp->ep_vp; 950 951 /* create the new process's VM space by running the vmcmds */ 952 KASSERTMSG(epp->ep_vmcmds.evs_used != 0, "%s: no vmcmds", __func__); 953 954 #ifdef TRACE_EXEC 955 DUMPVMCMDS(epp, 0, 0); 956 #endif 957 958 base_vcp = NULL; 959 960 for (i = 0; i < epp->ep_vmcmds.evs_used && !error; i++) { 961 struct exec_vmcmd *vcp; 962 963 vcp = &epp->ep_vmcmds.evs_cmds[i]; 964 if (vcp->ev_flags & VMCMD_RELATIVE) { 965 KASSERTMSG(base_vcp != NULL, 966 "%s: relative vmcmd with no base", __func__); 967 KASSERTMSG((vcp->ev_flags & VMCMD_BASE) == 0, 968 "%s: illegal base & relative vmcmd", __func__); 969 vcp->ev_addr += base_vcp->ev_addr; 970 } 971 error = (*vcp->ev_proc)(l, vcp); 972 if (error) 973 DUMPVMCMDS(epp, i, error); 974 if (vcp->ev_flags & VMCMD_BASE) 975 base_vcp = vcp; 976 } 977 978 /* free the vmspace-creation commands, and release their references */ 979 kill_vmcmds(&epp->ep_vmcmds); 980 981 vn_lock(epp->ep_vp, LK_EXCLUSIVE | LK_RETRY); 982 VOP_CLOSE(epp->ep_vp, FREAD, l->l_cred); 983 vput(epp->ep_vp); 984 985 /* if an error happened, deallocate and punt */ 986 if (error != 0) { 987 DPRINTF(("%s: vmcmd %zu failed: %d\n", __func__, i - 1, error)); 988 } 989 return error; 990 } 991 992 static void 993 execve_free_data(struct execve_data *data) 994 { 995 struct exec_package * const epp = &data->ed_pack; 996 997 /* free the vmspace-creation commands, and release their references */ 998 kill_vmcmds(&epp->ep_vmcmds); 999 /* kill any opened file descriptor, if necessary */ 1000 if (epp->ep_flags & EXEC_HASFD) { 1001 epp->ep_flags &= ~EXEC_HASFD; 1002 fd_close(epp->ep_fd); 1003 } 1004 1005 /* close and put the exec'd file */ 1006 vn_lock(epp->ep_vp, LK_EXCLUSIVE | LK_RETRY); 1007 VOP_CLOSE(epp->ep_vp, FREAD, curlwp->l_cred); 1008 vput(epp->ep_vp); 1009 pool_put(&exec_pool, data->ed_argp); 1010 1011 kmem_free(epp->ep_hdr, epp->ep_hdrlen); 1012 if (epp->ep_emul_root != NULL) 1013 vrele(epp->ep_emul_root); 1014 if (epp->ep_interp != NULL) 1015 vrele(epp->ep_interp); 1016 1017 exec_path_free(data); 1018 } 1019 1020 static void 1021 pathexec(struct proc *p, const char *resolvedname) 1022 { 1023 /* set command name & other accounting info */ 1024 const char *cmdname; 1025 1026 if (resolvedname == NULL) { 1027 cmdname = "*fexecve*"; 1028 resolvedname = "/"; 1029 } else { 1030 cmdname = strrchr(resolvedname, '/') + 1; 1031 } 1032 KASSERTMSG(resolvedname[0] == '/', "bad resolvedname `%s'", 1033 resolvedname); 1034 1035 strlcpy(p->p_comm, cmdname, sizeof(p->p_comm)); 1036 1037 kmem_strfree(p->p_path); 1038 p->p_path = kmem_strdupsize(resolvedname, NULL, KM_SLEEP); 1039 } 1040 1041 /* XXX elsewhere */ 1042 static int 1043 credexec(struct lwp *l, struct execve_data *data) 1044 { 1045 struct proc *p = l->l_proc; 1046 struct vattr *attr = &data->ed_attr; 1047 int error; 1048 1049 /* 1050 * Deal with set[ug]id. MNT_NOSUID has already been used to disable 1051 * s[ug]id. It's OK to check for PSL_TRACED here as we have blocked 1052 * out additional references on the process for the moment. 1053 */ 1054 if ((p->p_slflag & PSL_TRACED) == 0 && 1055 1056 (((attr->va_mode & S_ISUID) != 0 && 1057 kauth_cred_geteuid(l->l_cred) != attr->va_uid) || 1058 1059 ((attr->va_mode & S_ISGID) != 0 && 1060 kauth_cred_getegid(l->l_cred) != attr->va_gid))) { 1061 /* 1062 * Mark the process as SUGID before we do 1063 * anything that might block. 1064 */ 1065 proc_crmod_enter(); 1066 proc_crmod_leave(NULL, NULL, true); 1067 if (data->ed_argc == 0) { 1068 DPRINTF(( 1069 "%s: not executing set[ug]id binary with no args\n", 1070 __func__)); 1071 return SET_ERROR(EINVAL); 1072 } 1073 1074 /* Make sure file descriptors 0..2 are in use. */ 1075 if ((error = fd_checkstd()) != 0) { 1076 DPRINTF(("%s: fdcheckstd failed %d\n", 1077 __func__, error)); 1078 return error; 1079 } 1080 1081 /* 1082 * Copy the credential so other references don't see our 1083 * changes. 1084 */ 1085 l->l_cred = kauth_cred_copy(l->l_cred); 1086 #ifdef KTRACE 1087 /* 1088 * If the persistent trace flag isn't set, turn off. 1089 */ 1090 if (p->p_tracep) { 1091 mutex_enter(&ktrace_lock); 1092 if (!(p->p_traceflag & KTRFAC_PERSISTENT)) 1093 ktrderef(p); 1094 mutex_exit(&ktrace_lock); 1095 } 1096 #endif 1097 if (attr->va_mode & S_ISUID) 1098 kauth_cred_seteuid(l->l_cred, attr->va_uid); 1099 if (attr->va_mode & S_ISGID) 1100 kauth_cred_setegid(l->l_cred, attr->va_gid); 1101 } else { 1102 if (kauth_cred_geteuid(l->l_cred) == 1103 kauth_cred_getuid(l->l_cred) && 1104 kauth_cred_getegid(l->l_cred) == 1105 kauth_cred_getgid(l->l_cred)) 1106 p->p_flag &= ~PK_SUGID; 1107 } 1108 1109 /* 1110 * Copy the credential so other references don't see our changes. 1111 * Test to see if this is necessary first, since in the common case 1112 * we won't need a private reference. 1113 */ 1114 if (kauth_cred_geteuid(l->l_cred) != kauth_cred_getsvuid(l->l_cred) || 1115 kauth_cred_getegid(l->l_cred) != kauth_cred_getsvgid(l->l_cred)) { 1116 l->l_cred = kauth_cred_copy(l->l_cred); 1117 kauth_cred_setsvuid(l->l_cred, kauth_cred_geteuid(l->l_cred)); 1118 kauth_cred_setsvgid(l->l_cred, kauth_cred_getegid(l->l_cred)); 1119 } 1120 1121 /* Update the master credentials. */ 1122 if (l->l_cred != p->p_cred) { 1123 kauth_cred_t ocred; 1124 mutex_enter(p->p_lock); 1125 ocred = p->p_cred; 1126 p->p_cred = kauth_cred_hold(l->l_cred); 1127 mutex_exit(p->p_lock); 1128 kauth_cred_free(ocred); 1129 } 1130 1131 return 0; 1132 } 1133 1134 static void 1135 emulexec(struct lwp *l, struct exec_package *epp) 1136 { 1137 struct proc *p = l->l_proc; 1138 1139 /* The emulation root will usually have been found when we looked 1140 * for the elf interpreter (or similar), if not look now. */ 1141 if (epp->ep_esch->es_emul->e_path != NULL && 1142 epp->ep_emul_root == NULL) 1143 emul_find_root(l, epp); 1144 1145 /* Any old emulation root got removed by fdcloseexec */ 1146 rw_enter(&p->p_cwdi->cwdi_lock, RW_WRITER); 1147 p->p_cwdi->cwdi_edir = epp->ep_emul_root; 1148 rw_exit(&p->p_cwdi->cwdi_lock); 1149 epp->ep_emul_root = NULL; 1150 if (epp->ep_interp != NULL) 1151 vrele(epp->ep_interp); 1152 1153 /* 1154 * Call emulation specific exec hook. This can setup per-process 1155 * p->p_emuldata or do any other per-process stuff an emulation needs. 1156 * 1157 * If we are executing process of different emulation than the 1158 * original forked process, call e_proc_exit() of the old emulation 1159 * first, then e_proc_exec() of new emulation. If the emulation is 1160 * same, the exec hook code should deallocate any old emulation 1161 * resources held previously by this process. 1162 */ 1163 if (p->p_emul && p->p_emul->e_proc_exit 1164 && p->p_emul != epp->ep_esch->es_emul) 1165 (*p->p_emul->e_proc_exit)(p); 1166 1167 /* 1168 * Call exec hook. Emulation code may NOT store reference to anything 1169 * from &pack. 1170 */ 1171 if (epp->ep_esch->es_emul->e_proc_exec) 1172 (*epp->ep_esch->es_emul->e_proc_exec)(p, epp); 1173 1174 /* update p_emul, the old value is no longer needed */ 1175 p->p_emul = epp->ep_esch->es_emul; 1176 1177 /* ...and the same for p_execsw */ 1178 p->p_execsw = epp->ep_esch; 1179 1180 #ifdef __HAVE_SYSCALL_INTERN 1181 (*p->p_emul->e_syscall_intern)(p); 1182 #endif 1183 ktremul(); 1184 } 1185 1186 static int 1187 execve_runproc(struct lwp *l, struct execve_data * restrict data, 1188 bool no_local_exec_lock, bool is_spawn) 1189 { 1190 struct exec_package * const epp = &data->ed_pack; 1191 int error = 0; 1192 struct proc *p; 1193 struct vmspace *vm; 1194 1195 /* 1196 * In case of a posix_spawn operation, the child doing the exec 1197 * might not hold the reader lock on exec_lock, but the parent 1198 * will do this instead. 1199 */ 1200 KASSERT(no_local_exec_lock || rw_lock_held(&exec_lock)); 1201 KASSERT(!no_local_exec_lock || is_spawn); 1202 KASSERT(data != NULL); 1203 1204 p = l->l_proc; 1205 1206 /* Get rid of other LWPs. */ 1207 if (p->p_nlwps > 1) { 1208 mutex_enter(p->p_lock); 1209 exit_lwps(l); 1210 mutex_exit(p->p_lock); 1211 } 1212 KDASSERT(p->p_nlwps == 1); 1213 1214 /* 1215 * All of the other LWPs got rid of their robust futexes 1216 * when they exited above, but we might still have some 1217 * to dispose of. Do that now. 1218 */ 1219 if (__predict_false(l->l_robust_head != 0)) { 1220 futex_release_all_lwp(l); 1221 /* 1222 * Since this LWP will live on with a different 1223 * program image, we need to clear the robust 1224 * futex list pointer here. 1225 */ 1226 l->l_robust_head = 0; 1227 } 1228 1229 /* Destroy any lwpctl info. */ 1230 if (p->p_lwpctl != NULL) 1231 lwp_ctl_exit(); 1232 1233 /* Remove POSIX timers */ 1234 ptimers_free(p, TIMERS_POSIX); 1235 1236 /* Set the PaX flags. */ 1237 pax_set_flags(epp, p); 1238 1239 /* 1240 * Do whatever is necessary to prepare the address space 1241 * for remapping. Note that this might replace the current 1242 * vmspace with another! 1243 * 1244 * vfork(): do not touch any user space data in the new child 1245 * until we have awoken the parent below, or it will defeat 1246 * lazy pmap switching (on x86). 1247 */ 1248 uvmspace_exec(l, epp->ep_vm_minaddr, epp->ep_vm_maxaddr, 1249 epp->ep_flags & EXEC_TOPDOWN_VM); 1250 vm = p->p_vmspace; 1251 1252 vm->vm_taddr = (void *)epp->ep_taddr; 1253 vm->vm_tsize = btoc(epp->ep_tsize); 1254 vm->vm_daddr = (void*)epp->ep_daddr; 1255 vm->vm_dsize = btoc(epp->ep_dsize); 1256 vm->vm_ssize = btoc(epp->ep_ssize); 1257 vm->vm_issize = 0; 1258 vm->vm_maxsaddr = (void *)epp->ep_maxsaddr; 1259 vm->vm_minsaddr = (void *)epp->ep_minsaddr; 1260 1261 pax_aslr_init_vm(l, vm, epp); 1262 1263 cwdexec(p); 1264 fd_closeexec(); /* handle close on exec & close on fork */ 1265 1266 if (__predict_false(ktrace_on)) 1267 fd_ktrexecfd(); 1268 1269 execsigs(p); /* reset caught signals */ 1270 1271 mutex_enter(p->p_lock); 1272 l->l_ctxlink = NULL; /* reset ucontext link */ 1273 p->p_acflag &= ~AFORK; 1274 p->p_flag |= PK_EXEC; 1275 mutex_exit(p->p_lock); 1276 1277 error = credexec(l, data); 1278 if (error) 1279 goto exec_abort; 1280 1281 #if defined(__HAVE_RAS) 1282 /* 1283 * Remove all RASs from the address space. 1284 */ 1285 ras_purgeall(); 1286 #endif 1287 1288 /* 1289 * Stop profiling. 1290 */ 1291 if ((p->p_stflag & PST_PROFIL) != 0) { 1292 mutex_spin_enter(&p->p_stmutex); 1293 stopprofclock(p); 1294 mutex_spin_exit(&p->p_stmutex); 1295 } 1296 1297 /* 1298 * It's OK to test PL_PPWAIT unlocked here, as other LWPs have 1299 * exited and exec()/exit() are the only places it will be cleared. 1300 * 1301 * Once the parent has been awoken, curlwp may teleport to a new CPU 1302 * in sched_vforkexec(), and it's then OK to start messing with user 1303 * data. See comment above. 1304 */ 1305 if ((p->p_lflag & PL_PPWAIT) != 0) { 1306 bool samecpu; 1307 lwp_t *lp; 1308 1309 mutex_enter(&proc_lock); 1310 lp = p->p_vforklwp; 1311 p->p_vforklwp = NULL; 1312 l->l_lwpctl = NULL; /* was on loan from blocked parent */ 1313 1314 /* Clear flags after cv_broadcast() (scheduler needs them). */ 1315 p->p_lflag &= ~PL_PPWAIT; 1316 lp->l_vforkwaiting = false; 1317 1318 /* If parent is still on same CPU, teleport curlwp elsewhere. */ 1319 samecpu = (lp->l_cpu == curlwp->l_cpu); 1320 cv_broadcast(&lp->l_waitcv); 1321 mutex_exit(&proc_lock); 1322 1323 /* Give the parent its CPU back - find a new home. */ 1324 KASSERT(!is_spawn); 1325 sched_vforkexec(l, samecpu); 1326 } 1327 1328 /* Now map address space. */ 1329 error = execve_dovmcmds(l, data); 1330 if (error != 0) 1331 goto exec_abort; 1332 1333 pathexec(p, epp->ep_resolvedname); 1334 1335 char * const newstack = STACK_GROW(vm->vm_minsaddr, epp->ep_ssize); 1336 1337 error = copyoutargs(data, l, newstack); 1338 if (error != 0) 1339 goto exec_abort; 1340 1341 doexechooks(p); 1342 1343 /* 1344 * Set initial SP at the top of the stack. 1345 * 1346 * Note that on machines where stack grows up (e.g. hppa), SP points to 1347 * the end of arg/env strings. Userland guesses the address of argc 1348 * via ps_strings::ps_argvstr. 1349 */ 1350 1351 /* Setup new registers and do misc. setup. */ 1352 (*epp->ep_esch->es_emul->e_setregs)(l, epp, (vaddr_t)newstack); 1353 if (epp->ep_esch->es_setregs) 1354 (*epp->ep_esch->es_setregs)(l, epp, (vaddr_t)newstack); 1355 1356 /* Provide a consistent LWP private setting */ 1357 (void)lwp_setprivate(l, NULL); 1358 1359 /* Discard all PCU state; need to start fresh */ 1360 pcu_discard_all(l); 1361 1362 /* map the process's signal trampoline code */ 1363 if ((error = exec_sigcode_map(p, epp->ep_esch->es_emul)) != 0) { 1364 DPRINTF(("%s: map sigcode failed %d\n", __func__, error)); 1365 goto exec_abort; 1366 } 1367 1368 pool_put(&exec_pool, data->ed_argp); 1369 1370 /* 1371 * Notify anyone who might care that we've exec'd. 1372 * 1373 * This is slightly racy; someone could sneak in and 1374 * attach a knote after we've decided not to notify, 1375 * or vice-versa, but that's not particularly bothersome. 1376 * knote_proc_exec() will acquire p->p_lock as needed. 1377 */ 1378 if (!SLIST_EMPTY(&p->p_klist)) { 1379 knote_proc_exec(p); 1380 } 1381 1382 kmem_free(epp->ep_hdr, epp->ep_hdrlen); 1383 1384 SDT_PROBE(proc, kernel, , exec__success, epp->ep_kname, 0, 0, 0, 0); 1385 1386 emulexec(l, epp); 1387 1388 /* Allow new references from the debugger/procfs. */ 1389 rw_exit(&p->p_reflock); 1390 if (!no_local_exec_lock) 1391 rw_exit(&exec_lock); 1392 1393 mutex_enter(&proc_lock); 1394 1395 /* posix_spawn(3) reports a single event with implied exec(3) */ 1396 if ((p->p_slflag & PSL_TRACED) && !is_spawn) { 1397 mutex_enter(p->p_lock); 1398 eventswitch(TRAP_EXEC, 0, 0); 1399 mutex_enter(&proc_lock); 1400 } 1401 1402 if (p->p_sflag & PS_STOPEXEC) { 1403 ksiginfoq_t kq; 1404 1405 KASSERT(l->l_blcnt == 0); 1406 p->p_pptr->p_nstopchild++; 1407 p->p_waited = 0; 1408 mutex_enter(p->p_lock); 1409 ksiginfo_queue_init(&kq); 1410 sigclearall(p, &contsigmask, &kq); 1411 lwp_lock(l); 1412 l->l_stat = LSSTOP; 1413 p->p_stat = SSTOP; 1414 p->p_nrlwps--; 1415 lwp_unlock(l); 1416 mutex_exit(p->p_lock); 1417 mutex_exit(&proc_lock); 1418 lwp_lock(l); 1419 spc_lock(l->l_cpu); 1420 mi_switch(l); 1421 ksiginfo_queue_drain(&kq); 1422 } else { 1423 mutex_exit(&proc_lock); 1424 } 1425 1426 exec_path_free(data); 1427 #ifdef TRACE_EXEC 1428 DPRINTF(("%s finished\n", __func__)); 1429 #endif 1430 return EJUSTRETURN; 1431 1432 exec_abort: 1433 SDT_PROBE(proc, kernel, , exec__failure, error, 0, 0, 0, 0); 1434 rw_exit(&p->p_reflock); 1435 if (!no_local_exec_lock) 1436 rw_exit(&exec_lock); 1437 1438 exec_path_free(data); 1439 1440 /* 1441 * the old process doesn't exist anymore. exit gracefully. 1442 * get rid of the (new) address space we have created, if any, get rid 1443 * of our namei data and vnode, and exit noting failure 1444 */ 1445 if (vm != NULL) { 1446 uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS, 1447 VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS); 1448 } 1449 1450 exec_free_emul_arg(epp); 1451 pool_put(&exec_pool, data->ed_argp); 1452 kmem_free(epp->ep_hdr, epp->ep_hdrlen); 1453 if (epp->ep_emul_root != NULL) 1454 vrele(epp->ep_emul_root); 1455 if (epp->ep_interp != NULL) 1456 vrele(epp->ep_interp); 1457 1458 /* Acquire the sched-state mutex (exit1() will release it). */ 1459 if (!is_spawn) { 1460 mutex_enter(p->p_lock); 1461 exit1(l, error, SIGABRT); 1462 } 1463 1464 return error; 1465 } 1466 1467 int 1468 execve1(struct lwp *l, bool has_path, const char *path, int fd, 1469 char * const *args, char * const *envs, 1470 execve_fetch_element_t fetch_element) 1471 { 1472 struct execve_data data; 1473 int error; 1474 1475 error = execve_loadvm(l, has_path, path, fd, args, envs, fetch_element, 1476 &data); 1477 if (error) 1478 return error; 1479 error = execve_runproc(l, &data, false, false); 1480 return error; 1481 } 1482 1483 static size_t 1484 fromptrsz(const struct exec_package *epp) 1485 { 1486 return (epp->ep_flags & EXEC_FROM32) ? sizeof(int) : sizeof(char *); 1487 } 1488 1489 static size_t 1490 ptrsz(const struct exec_package *epp) 1491 { 1492 return (epp->ep_flags & EXEC_32) ? sizeof(int) : sizeof(char *); 1493 } 1494 1495 static size_t 1496 calcargs(struct execve_data * restrict data, const size_t argenvstrlen) 1497 { 1498 struct exec_package * const epp = &data->ed_pack; 1499 1500 const size_t nargenvptrs = 1501 1 + /* long argc */ 1502 data->ed_argc + /* char *argv[] */ 1503 1 + /* \0 */ 1504 data->ed_envc + /* char *env[] */ 1505 1; /* \0 */ 1506 1507 return (nargenvptrs * ptrsz(epp)) /* pointers */ 1508 + argenvstrlen /* strings */ 1509 + epp->ep_esch->es_arglen; /* auxinfo */ 1510 } 1511 1512 static size_t 1513 calcstack(struct execve_data * restrict data, const size_t gaplen) 1514 { 1515 struct exec_package * const epp = &data->ed_pack; 1516 1517 data->ed_szsigcode = epp->ep_esch->es_emul->e_esigcode - 1518 epp->ep_esch->es_emul->e_sigcode; 1519 1520 data->ed_ps_strings_sz = (epp->ep_flags & EXEC_32) ? 1521 sizeof(struct ps_strings32) : sizeof(struct ps_strings); 1522 1523 const size_t sigcode_psstr_sz = 1524 data->ed_szsigcode + /* sigcode */ 1525 data->ed_ps_strings_sz + /* ps_strings */ 1526 STACK_PTHREADSPACE; /* pthread space */ 1527 1528 const size_t stacklen = 1529 data->ed_argslen + 1530 gaplen + 1531 sigcode_psstr_sz; 1532 1533 /* make the stack "safely" aligned */ 1534 return STACK_LEN_ALIGN(stacklen, STACK_ALIGNBYTES); 1535 } 1536 1537 static int 1538 copyoutargs(struct execve_data * restrict data, struct lwp *l, 1539 char * const newstack) 1540 { 1541 struct exec_package * const epp = &data->ed_pack; 1542 struct proc *p = l->l_proc; 1543 int error; 1544 1545 memset(&data->ed_arginfo, 0, sizeof(data->ed_arginfo)); 1546 1547 /* remember information about the process */ 1548 data->ed_arginfo.ps_nargvstr = data->ed_argc; 1549 data->ed_arginfo.ps_nenvstr = data->ed_envc; 1550 1551 /* 1552 * Allocate the stack address passed to the newly execve()'ed process. 1553 * 1554 * The new stack address will be set to the SP (stack pointer) register 1555 * in setregs(). 1556 */ 1557 1558 char *newargs = STACK_ALLOC( 1559 STACK_SHRINK(newstack, data->ed_argslen), data->ed_argslen); 1560 1561 error = (*epp->ep_esch->es_copyargs)(l, epp, 1562 &data->ed_arginfo, &newargs, data->ed_argp); 1563 1564 if (error) { 1565 DPRINTF(("%s: copyargs failed %d\n", __func__, error)); 1566 return error; 1567 } 1568 1569 error = copyoutpsstrs(data, p); 1570 if (error != 0) 1571 return error; 1572 1573 return 0; 1574 } 1575 1576 static int 1577 copyoutpsstrs(struct execve_data * restrict data, struct proc *p) 1578 { 1579 struct exec_package * const epp = &data->ed_pack; 1580 struct ps_strings32 arginfo32; 1581 void *aip; 1582 int error; 1583 1584 /* fill process ps_strings info */ 1585 p->p_psstrp = (vaddr_t)STACK_ALLOC(STACK_GROW(epp->ep_minsaddr, 1586 STACK_PTHREADSPACE), data->ed_ps_strings_sz); 1587 1588 if (epp->ep_flags & EXEC_32) { 1589 aip = &arginfo32; 1590 arginfo32.ps_argvstr = (vaddr_t)data->ed_arginfo.ps_argvstr; 1591 arginfo32.ps_nargvstr = data->ed_arginfo.ps_nargvstr; 1592 arginfo32.ps_envstr = (vaddr_t)data->ed_arginfo.ps_envstr; 1593 arginfo32.ps_nenvstr = data->ed_arginfo.ps_nenvstr; 1594 } else 1595 aip = &data->ed_arginfo; 1596 1597 /* copy out the process's ps_strings structure */ 1598 if ((error = copyout(aip, (void *)p->p_psstrp, data->ed_ps_strings_sz)) 1599 != 0) { 1600 DPRINTF(("%s: ps_strings copyout %p->%p size %zu failed\n", 1601 __func__, aip, (void *)p->p_psstrp, data->ed_ps_strings_sz)); 1602 return error; 1603 } 1604 1605 return 0; 1606 } 1607 1608 static int 1609 copyinargs(struct execve_data * restrict data, char * const *args, 1610 char * const *envs, execve_fetch_element_t fetch_element, char **dpp) 1611 { 1612 struct exec_package * const epp = &data->ed_pack; 1613 char *dp; 1614 size_t i; 1615 int error; 1616 1617 dp = *dpp; 1618 1619 data->ed_argc = 0; 1620 1621 /* copy the fake args list, if there's one, freeing it as we go */ 1622 if (epp->ep_flags & EXEC_HASARGL) { 1623 struct exec_fakearg *fa = epp->ep_fa; 1624 1625 while (fa->fa_arg != NULL) { 1626 const size_t maxlen = ARG_MAX - (dp - data->ed_argp); 1627 size_t len; 1628 1629 len = strlcpy(dp, fa->fa_arg, maxlen); 1630 /* Count NUL into len. */ 1631 if (len < maxlen) 1632 len++; 1633 else { 1634 while (fa->fa_arg != NULL) { 1635 kmem_free(fa->fa_arg, fa->fa_len); 1636 fa++; 1637 } 1638 kmem_free(epp->ep_fa, epp->ep_fa_len); 1639 epp->ep_flags &= ~EXEC_HASARGL; 1640 return SET_ERROR(E2BIG); 1641 } 1642 ktrexecarg(fa->fa_arg, len - 1); 1643 dp += len; 1644 1645 kmem_free(fa->fa_arg, fa->fa_len); 1646 fa++; 1647 data->ed_argc++; 1648 } 1649 kmem_free(epp->ep_fa, epp->ep_fa_len); 1650 epp->ep_flags &= ~EXEC_HASARGL; 1651 } 1652 1653 /* 1654 * Read and count argument strings from user. 1655 */ 1656 1657 if (args == NULL) { 1658 DPRINTF(("%s: null args\n", __func__)); 1659 return SET_ERROR(EINVAL); 1660 } 1661 if (epp->ep_flags & EXEC_SKIPARG) 1662 args = (const void *)((const char *)args + fromptrsz(epp)); 1663 i = 0; 1664 error = copyinargstrs(data, args, fetch_element, &dp, &i, ktr_execarg); 1665 if (error != 0) { 1666 DPRINTF(("%s: copyin arg %d\n", __func__, error)); 1667 return error; 1668 } 1669 data->ed_argc += i; 1670 1671 /* 1672 * Read and count environment strings from user. 1673 */ 1674 1675 data->ed_envc = 0; 1676 /* environment need not be there */ 1677 if (envs == NULL) 1678 goto done; 1679 i = 0; 1680 error = copyinargstrs(data, envs, fetch_element, &dp, &i, ktr_execenv); 1681 if (error != 0) { 1682 DPRINTF(("%s: copyin env %d\n", __func__, error)); 1683 return error; 1684 } 1685 data->ed_envc += i; 1686 1687 done: 1688 *dpp = dp; 1689 1690 return 0; 1691 } 1692 1693 static int 1694 copyinargstrs(struct execve_data * restrict data, char * const *strs, 1695 execve_fetch_element_t fetch_element, char **dpp, size_t *ip, 1696 void (*ktr)(const void *, size_t)) 1697 { 1698 char *dp, *sp; 1699 size_t i; 1700 int error; 1701 1702 dp = *dpp; 1703 1704 i = 0; 1705 while (1) { 1706 const size_t maxlen = ARG_MAX - (dp - data->ed_argp); 1707 size_t len; 1708 1709 if ((error = (*fetch_element)(strs, i, &sp)) != 0) { 1710 return error; 1711 } 1712 if (!sp) 1713 break; 1714 if ((error = copyinstr(sp, dp, maxlen, &len)) != 0) { 1715 if (error == ENAMETOOLONG) 1716 error = SET_ERROR(E2BIG); 1717 return error; 1718 } 1719 if (__predict_false(ktrace_on)) 1720 (*ktr)(dp, len - 1); 1721 dp += len; 1722 i++; 1723 } 1724 1725 *dpp = dp; 1726 *ip = i; 1727 1728 return 0; 1729 } 1730 1731 /* 1732 * Copy argv and env strings from kernel buffer (argp) to the new stack. 1733 * Those strings are located just after auxinfo. 1734 */ 1735 int 1736 copyargs(struct lwp *l, struct exec_package *pack, struct ps_strings *arginfo, 1737 char **stackp, void *argp) 1738 { 1739 char **cpp, *dp, *sp; 1740 size_t len; 1741 void *nullp; 1742 long argc, envc; 1743 int error; 1744 1745 cpp = (char **)*stackp; 1746 nullp = NULL; 1747 argc = arginfo->ps_nargvstr; 1748 envc = arginfo->ps_nenvstr; 1749 1750 /* argc on stack is long */ 1751 CTASSERT(sizeof(*cpp) == sizeof(argc)); 1752 1753 dp = (char *)(cpp + 1754 1 + /* long argc */ 1755 argc + /* char *argv[] */ 1756 1 + /* \0 */ 1757 envc + /* char *env[] */ 1758 1) + /* \0 */ 1759 pack->ep_esch->es_arglen; /* auxinfo */ 1760 sp = argp; 1761 1762 if ((error = copyout(&argc, cpp++, sizeof(argc))) != 0) { 1763 COPYPRINTF("", cpp - 1, sizeof(argc)); 1764 return error; 1765 } 1766 1767 /* XXX don't copy them out, remap them! */ 1768 arginfo->ps_argvstr = cpp; /* remember location of argv for later */ 1769 1770 for (; --argc >= 0; sp += len, dp += len) { 1771 if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0) { 1772 COPYPRINTF("", cpp - 1, sizeof(dp)); 1773 return error; 1774 } 1775 if ((error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0) { 1776 COPYPRINTF("str", dp, (size_t)ARG_MAX); 1777 return error; 1778 } 1779 } 1780 1781 if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0) { 1782 COPYPRINTF("", cpp - 1, sizeof(nullp)); 1783 return error; 1784 } 1785 1786 arginfo->ps_envstr = cpp; /* remember location of envp for later */ 1787 1788 for (; --envc >= 0; sp += len, dp += len) { 1789 if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0) { 1790 COPYPRINTF("", cpp - 1, sizeof(dp)); 1791 return error; 1792 } 1793 if ((error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0) { 1794 COPYPRINTF("str", dp, (size_t)ARG_MAX); 1795 return error; 1796 } 1797 1798 } 1799 1800 if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0) { 1801 COPYPRINTF("", cpp - 1, sizeof(nullp)); 1802 return error; 1803 } 1804 1805 *stackp = (char *)cpp; 1806 return 0; 1807 } 1808 1809 1810 /* 1811 * Add execsw[] entries. 1812 */ 1813 int 1814 exec_add(struct execsw *esp, int count) 1815 { 1816 struct exec_entry *it; 1817 int i, error = 0; 1818 1819 if (count == 0) { 1820 return 0; 1821 } 1822 1823 /* Check for duplicates. */ 1824 rw_enter(&exec_lock, RW_WRITER); 1825 for (i = 0; i < count; i++) { 1826 LIST_FOREACH(it, &ex_head, ex_list) { 1827 /* assume unique (makecmds, probe_func, emulation) */ 1828 if (it->ex_sw->es_makecmds == esp[i].es_makecmds && 1829 it->ex_sw->u.elf_probe_func == 1830 esp[i].u.elf_probe_func && 1831 it->ex_sw->es_emul == esp[i].es_emul) { 1832 rw_exit(&exec_lock); 1833 return SET_ERROR(EEXIST); 1834 } 1835 } 1836 } 1837 1838 /* Allocate new entries. */ 1839 for (i = 0; i < count; i++) { 1840 it = kmem_alloc(sizeof(*it), KM_SLEEP); 1841 it->ex_sw = &esp[i]; 1842 error = exec_sigcode_alloc(it->ex_sw->es_emul); 1843 if (error != 0) { 1844 kmem_free(it, sizeof(*it)); 1845 break; 1846 } 1847 LIST_INSERT_HEAD(&ex_head, it, ex_list); 1848 } 1849 /* If even one fails, remove them all back. */ 1850 if (error != 0) { 1851 for (i--; i >= 0; i--) { 1852 it = LIST_FIRST(&ex_head); 1853 LIST_REMOVE(it, ex_list); 1854 exec_sigcode_free(it->ex_sw->es_emul); 1855 kmem_free(it, sizeof(*it)); 1856 } 1857 rw_exit(&exec_lock); 1858 return error; 1859 } 1860 1861 /* update execsw[] */ 1862 exec_init(0); 1863 rw_exit(&exec_lock); 1864 return 0; 1865 } 1866 1867 /* 1868 * Remove execsw[] entry. 1869 */ 1870 int 1871 exec_remove(struct execsw *esp, int count) 1872 { 1873 struct exec_entry *it, *next; 1874 int i; 1875 const struct proclist_desc *pd; 1876 proc_t *p; 1877 1878 if (count == 0) { 1879 return 0; 1880 } 1881 1882 /* Abort if any are busy. */ 1883 rw_enter(&exec_lock, RW_WRITER); 1884 for (i = 0; i < count; i++) { 1885 mutex_enter(&proc_lock); 1886 for (pd = proclists; pd->pd_list != NULL; pd++) { 1887 PROCLIST_FOREACH(p, pd->pd_list) { 1888 if (p->p_execsw == &esp[i]) { 1889 mutex_exit(&proc_lock); 1890 rw_exit(&exec_lock); 1891 return SET_ERROR(EBUSY); 1892 } 1893 } 1894 } 1895 mutex_exit(&proc_lock); 1896 } 1897 1898 /* None are busy, so remove them all. */ 1899 for (i = 0; i < count; i++) { 1900 for (it = LIST_FIRST(&ex_head); it != NULL; it = next) { 1901 next = LIST_NEXT(it, ex_list); 1902 if (it->ex_sw == &esp[i]) { 1903 LIST_REMOVE(it, ex_list); 1904 exec_sigcode_free(it->ex_sw->es_emul); 1905 kmem_free(it, sizeof(*it)); 1906 break; 1907 } 1908 } 1909 } 1910 1911 /* update execsw[] */ 1912 exec_init(0); 1913 rw_exit(&exec_lock); 1914 return 0; 1915 } 1916 1917 /* 1918 * Initialize exec structures. If init_boot is true, also does necessary 1919 * one-time initialization (it's called from main() that way). 1920 * Once system is multiuser, this should be called with exec_lock held, 1921 * i.e. via exec_{add|remove}(). 1922 */ 1923 int 1924 exec_init(int init_boot) 1925 { 1926 const struct execsw **sw; 1927 struct exec_entry *ex; 1928 SLIST_HEAD(,exec_entry) first; 1929 SLIST_HEAD(,exec_entry) any; 1930 SLIST_HEAD(,exec_entry) last; 1931 int i, sz; 1932 1933 if (init_boot) { 1934 /* do one-time initializations */ 1935 vaddr_t vmin = 0, vmax; 1936 1937 rw_init(&exec_lock); 1938 exec_map = uvm_km_suballoc(kernel_map, &vmin, &vmax, 1939 maxexec*NCARGS, VM_MAP_PAGEABLE, false, NULL); 1940 pool_init(&exec_pool, NCARGS, 0, 0, PR_NOALIGN|PR_NOTOUCH, 1941 "execargs", &exec_palloc, IPL_NONE); 1942 pool_sethardlimit(&exec_pool, maxexec, "should not happen", 0); 1943 } else { 1944 KASSERT(rw_write_held(&exec_lock)); 1945 } 1946 1947 /* Sort each entry onto the appropriate queue. */ 1948 SLIST_INIT(&first); 1949 SLIST_INIT(&any); 1950 SLIST_INIT(&last); 1951 sz = 0; 1952 LIST_FOREACH(ex, &ex_head, ex_list) { 1953 switch(ex->ex_sw->es_prio) { 1954 case EXECSW_PRIO_FIRST: 1955 SLIST_INSERT_HEAD(&first, ex, ex_slist); 1956 break; 1957 case EXECSW_PRIO_ANY: 1958 SLIST_INSERT_HEAD(&any, ex, ex_slist); 1959 break; 1960 case EXECSW_PRIO_LAST: 1961 SLIST_INSERT_HEAD(&last, ex, ex_slist); 1962 break; 1963 default: 1964 panic("%s", __func__); 1965 break; 1966 } 1967 sz++; 1968 } 1969 1970 /* 1971 * Create new execsw[]. Ensure we do not try a zero-sized 1972 * allocation. 1973 */ 1974 sw = kmem_alloc(sz * sizeof(struct execsw *) + 1, KM_SLEEP); 1975 i = 0; 1976 SLIST_FOREACH(ex, &first, ex_slist) { 1977 sw[i++] = ex->ex_sw; 1978 } 1979 SLIST_FOREACH(ex, &any, ex_slist) { 1980 sw[i++] = ex->ex_sw; 1981 } 1982 SLIST_FOREACH(ex, &last, ex_slist) { 1983 sw[i++] = ex->ex_sw; 1984 } 1985 1986 /* Replace old execsw[] and free used memory. */ 1987 if (execsw != NULL) { 1988 kmem_free(__UNCONST(execsw), 1989 nexecs * sizeof(struct execsw *) + 1); 1990 } 1991 execsw = sw; 1992 nexecs = sz; 1993 1994 /* Figure out the maximum size of an exec header. */ 1995 exec_maxhdrsz = sizeof(int); 1996 for (i = 0; i < nexecs; i++) { 1997 if (execsw[i]->es_hdrsz > exec_maxhdrsz) 1998 exec_maxhdrsz = execsw[i]->es_hdrsz; 1999 } 2000 2001 return 0; 2002 } 2003 2004 int 2005 exec_sigcode_alloc(const struct emul *e) 2006 { 2007 vaddr_t va; 2008 vsize_t sz; 2009 int error; 2010 struct uvm_object *uobj; 2011 2012 KASSERT(rw_lock_held(&exec_lock)); 2013 2014 if (e == NULL || e->e_sigobject == NULL) 2015 return 0; 2016 2017 sz = (vaddr_t)e->e_esigcode - (vaddr_t)e->e_sigcode; 2018 if (sz == 0) 2019 return 0; 2020 2021 /* 2022 * Create a sigobject for this emulation. 2023 * 2024 * sigobject is an anonymous memory object (just like SYSV shared 2025 * memory) that we keep a permanent reference to and that we map 2026 * in all processes that need this sigcode. The creation is simple, 2027 * we create an object, add a permanent reference to it, map it in 2028 * kernel space, copy out the sigcode to it and unmap it. 2029 * We map it with PROT_READ|PROT_EXEC into the process just 2030 * the way sys_mmap() would map it. 2031 */ 2032 if (*e->e_sigobject == NULL) { 2033 uobj = uao_create(sz, 0); 2034 (*uobj->pgops->pgo_reference)(uobj); 2035 va = vm_map_min(kernel_map); 2036 if ((error = uvm_map(kernel_map, &va, round_page(sz), 2037 uobj, 0, 0, 2038 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, 2039 UVM_INH_SHARE, UVM_ADV_RANDOM, 0)))) { 2040 printf("sigcode kernel mapping failed %d\n", error); 2041 (*uobj->pgops->pgo_detach)(uobj); 2042 return error; 2043 } 2044 memcpy((void *)va, e->e_sigcode, sz); 2045 #ifdef PMAP_NEED_PROCWR 2046 pmap_procwr(&proc0, va, sz); 2047 #endif 2048 uvm_unmap(kernel_map, va, va + round_page(sz)); 2049 *e->e_sigobject = uobj; 2050 KASSERT(uobj->uo_refs == 1); 2051 } else { 2052 /* if already created, reference++ */ 2053 uobj = *e->e_sigobject; 2054 (*uobj->pgops->pgo_reference)(uobj); 2055 } 2056 2057 return 0; 2058 } 2059 2060 void 2061 exec_sigcode_free(const struct emul *e) 2062 { 2063 struct uvm_object *uobj; 2064 2065 KASSERT(rw_lock_held(&exec_lock)); 2066 2067 if (e == NULL || e->e_sigobject == NULL) 2068 return; 2069 2070 uobj = *e->e_sigobject; 2071 if (uobj == NULL) 2072 return; 2073 2074 if (uobj->uo_refs == 1) 2075 *e->e_sigobject = NULL; /* I'm the last person to reference. */ 2076 (*uobj->pgops->pgo_detach)(uobj); 2077 } 2078 2079 static int 2080 exec_sigcode_map(struct proc *p, const struct emul *e) 2081 { 2082 vaddr_t va; 2083 vsize_t sz; 2084 int error; 2085 struct uvm_object *uobj; 2086 2087 sz = (vaddr_t)e->e_esigcode - (vaddr_t)e->e_sigcode; 2088 if (e->e_sigobject == NULL || sz == 0) 2089 return 0; 2090 2091 uobj = *e->e_sigobject; 2092 if (uobj == NULL) 2093 return 0; 2094 2095 /* Just a hint to uvm_map where to put it. */ 2096 va = e->e_vm_default_addr(p, (vaddr_t)p->p_vmspace->vm_daddr, 2097 round_page(sz), p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN); 2098 2099 #ifdef __alpha__ 2100 /* 2101 * Tru64 puts /sbin/loader at the end of user virtual memory, 2102 * which causes the above calculation to put the sigcode at 2103 * an invalid address. Put it just below the text instead. 2104 */ 2105 if (va == (vaddr_t)vm_map_max(&p->p_vmspace->vm_map)) { 2106 va = (vaddr_t)p->p_vmspace->vm_taddr - round_page(sz); 2107 } 2108 #endif 2109 2110 (*uobj->pgops->pgo_reference)(uobj); 2111 error = uvm_map(&p->p_vmspace->vm_map, &va, round_page(sz), 2112 uobj, 0, 0, 2113 UVM_MAPFLAG(UVM_PROT_RX, UVM_PROT_RX, UVM_INH_SHARE, 2114 UVM_ADV_RANDOM, 0)); 2115 if (error) { 2116 DPRINTF(("%s, %d: map %p " 2117 "uvm_map %#"PRIxVSIZE"@%#"PRIxVADDR" failed %d\n", 2118 __func__, __LINE__, &p->p_vmspace->vm_map, round_page(sz), 2119 va, error)); 2120 (*uobj->pgops->pgo_detach)(uobj); 2121 return error; 2122 } 2123 p->p_sigctx.ps_sigcode = (void *)va; 2124 return 0; 2125 } 2126 2127 /* 2128 * Release a refcount on spawn_exec_data and destroy memory, if this 2129 * was the last one. 2130 */ 2131 static void 2132 spawn_exec_data_release(struct spawn_exec_data *data) 2133 { 2134 2135 membar_release(); 2136 if (atomic_dec_32_nv(&data->sed_refcnt) != 0) 2137 return; 2138 membar_acquire(); 2139 2140 cv_destroy(&data->sed_cv_child_ready); 2141 mutex_destroy(&data->sed_mtx_child); 2142 2143 if (data->sed_actions) 2144 posix_spawn_fa_free(data->sed_actions, 2145 data->sed_actions->len); 2146 if (data->sed_attrs) 2147 kmem_free(data->sed_attrs, 2148 sizeof(*data->sed_attrs)); 2149 kmem_free(data, sizeof(*data)); 2150 } 2151 2152 static int 2153 handle_posix_spawn_file_actions(struct posix_spawn_file_actions *actions) 2154 { 2155 struct lwp *l = curlwp; 2156 register_t retval; 2157 int error = 0, newfd; 2158 2159 if (actions == NULL) 2160 return 0; 2161 2162 for (size_t i = 0; i < actions->len; i++) { 2163 const struct posix_spawn_file_actions_entry *fae = 2164 &actions->fae[i]; 2165 switch (fae->fae_action) { 2166 case FAE_OPEN: 2167 if (fd_getfile(fae->fae_fildes) != NULL) { 2168 error = fd_close(fae->fae_fildes); 2169 if (error) 2170 return error; 2171 } 2172 error = fd_open(fae->fae_path, fae->fae_oflag, 2173 fae->fae_mode, &newfd); 2174 if (error) 2175 return error; 2176 if (newfd != fae->fae_fildes) { 2177 error = dodup(l, newfd, 2178 fae->fae_fildes, 0, &retval); 2179 if (fd_getfile(newfd) != NULL) 2180 fd_close(newfd); 2181 } 2182 break; 2183 case FAE_DUP2: 2184 error = dodup(l, fae->fae_fildes, 2185 fae->fae_newfildes, 0, &retval); 2186 break; 2187 case FAE_CLOSE: 2188 /* 2189 * posix specifies failures from close() due to 2190 * already closed file descriptors should be ignored. 2191 * out of range filedescriptors would have been 2192 * caught earlier already. 2193 */ 2194 if (fd_getfile(fae->fae_fildes) != NULL) 2195 fd_close(fae->fae_fildes); 2196 break; 2197 case FAE_CHDIR: 2198 error = do_sys_chdir(l, fae->fae_chdir_path, 2199 UIO_SYSSPACE, &retval); 2200 break; 2201 case FAE_FCHDIR: 2202 error = do_sys_fchdir(l, fae->fae_fildes, &retval); 2203 break; 2204 } 2205 if (error) 2206 return error; 2207 } 2208 return 0; 2209 } 2210 2211 static int 2212 handle_posix_spawn_attrs(struct posix_spawnattr *attrs, struct proc *parent) 2213 { 2214 struct sigaction sigact; 2215 int error = 0; 2216 struct proc *p = curproc; 2217 struct lwp *l = curlwp; 2218 2219 if (attrs == NULL) 2220 return 0; 2221 2222 memset(&sigact, 0, sizeof(sigact)); 2223 sigact._sa_u._sa_handler = SIG_DFL; 2224 sigact.sa_flags = 0; 2225 2226 /* 2227 * set state to SSTOP so that this proc can be found by pid. 2228 * see proc_enterprp, do_sched_setparam below 2229 */ 2230 mutex_enter(&proc_lock); 2231 /* 2232 * p_stat should be SACTIVE, so we need to adjust the 2233 * parent's p_nstopchild here. For safety, just make 2234 * we're on the good side of SDEAD before we adjust. 2235 */ 2236 int ostat = p->p_stat; 2237 KASSERT(ostat < SSTOP); 2238 p->p_stat = SSTOP; 2239 p->p_waited = 0; 2240 p->p_pptr->p_nstopchild++; 2241 mutex_exit(&proc_lock); 2242 2243 /* Set process group */ 2244 if (attrs->sa_flags & POSIX_SPAWN_SETPGROUP) { 2245 pid_t mypid = p->p_pid; 2246 pid_t pgrp = attrs->sa_pgroup; 2247 2248 if (pgrp == 0) 2249 pgrp = mypid; 2250 2251 error = proc_enterpgrp(parent, mypid, pgrp, false); 2252 if (error) 2253 goto out; 2254 } 2255 2256 /* Set scheduler policy */ 2257 if (attrs->sa_flags & POSIX_SPAWN_SETSCHEDULER) 2258 error = do_sched_setparam(p->p_pid, 0, attrs->sa_schedpolicy, 2259 &attrs->sa_schedparam); 2260 else if (attrs->sa_flags & POSIX_SPAWN_SETSCHEDPARAM) { 2261 error = do_sched_setparam(parent->p_pid, 0, 2262 SCHED_NONE, &attrs->sa_schedparam); 2263 } 2264 if (error) 2265 goto out; 2266 2267 /* Reset user ID's */ 2268 if (attrs->sa_flags & POSIX_SPAWN_RESETIDS) { 2269 error = do_setresgid(l, -1, kauth_cred_getgid(l->l_cred), -1, 2270 ID_E_EQ_R | ID_E_EQ_S); 2271 if (error) 2272 return error; 2273 error = do_setresuid(l, -1, kauth_cred_getuid(l->l_cred), -1, 2274 ID_E_EQ_R | ID_E_EQ_S); 2275 if (error) 2276 goto out; 2277 } 2278 2279 /* Set signal masks/defaults */ 2280 if (attrs->sa_flags & POSIX_SPAWN_SETSIGMASK) { 2281 mutex_enter(p->p_lock); 2282 error = sigprocmask1(l, SIG_SETMASK, &attrs->sa_sigmask, NULL); 2283 mutex_exit(p->p_lock); 2284 if (error) 2285 goto out; 2286 } 2287 2288 if (attrs->sa_flags & POSIX_SPAWN_SETSIGDEF) { 2289 /* 2290 * The following sigaction call is using a sigaction 2291 * version 0 trampoline which is in the compatibility 2292 * code only. This is not a problem because for SIG_DFL 2293 * and SIG_IGN, the trampolines are now ignored. If they 2294 * were not, this would be a problem because we are 2295 * holding the exec_lock, and the compat code needs 2296 * to do the same in order to replace the trampoline 2297 * code of the process. 2298 */ 2299 for (int i = 1; i <= NSIG; i++) { 2300 if (sigismember(&attrs->sa_sigdefault, i)) 2301 sigaction1(l, i, &sigact, NULL, NULL, 0); 2302 } 2303 } 2304 out: 2305 mutex_enter(&proc_lock); 2306 p->p_stat = ostat; 2307 p->p_pptr->p_nstopchild--; 2308 mutex_exit(&proc_lock); 2309 return error; 2310 } 2311 2312 /* 2313 * A child lwp of a posix_spawn operation starts here and ends up in 2314 * cpu_spawn_return, dealing with all filedescriptor and scheduler 2315 * manipulations in between. 2316 * The parent waits for the child, as it is not clear whether the child 2317 * will be able to acquire its own exec_lock. If it can, the parent can 2318 * be released early and continue running in parallel. If not (or if the 2319 * magic debug flag is passed in the scheduler attribute struct), the 2320 * child rides on the parent's exec lock until it is ready to return to 2321 * to userland - and only then releases the parent. This method loses 2322 * concurrency, but improves error reporting. 2323 */ 2324 static void 2325 spawn_return(void *arg) 2326 { 2327 struct spawn_exec_data *spawn_data = arg; 2328 struct lwp *l = curlwp; 2329 struct proc *p = l->l_proc; 2330 int error; 2331 bool have_reflock; 2332 bool parent_is_waiting = true; 2333 2334 /* 2335 * Check if we can release parent early. 2336 * We either need to have no sed_attrs, or sed_attrs does not 2337 * have POSIX_SPAWN_RETURNERROR or one of the flags, that require 2338 * safe access to the parent proc (passed in sed_parent). 2339 * We then try to get the exec_lock, and only if that works, we can 2340 * release the parent here already. 2341 */ 2342 struct posix_spawnattr *attrs = spawn_data->sed_attrs; 2343 if ((!attrs || (attrs->sa_flags 2344 & (POSIX_SPAWN_RETURNERROR|POSIX_SPAWN_SETPGROUP)) == 0) 2345 && rw_tryenter(&exec_lock, RW_READER)) { 2346 parent_is_waiting = false; 2347 mutex_enter(&spawn_data->sed_mtx_child); 2348 KASSERT(!spawn_data->sed_child_ready); 2349 spawn_data->sed_error = 0; 2350 spawn_data->sed_child_ready = true; 2351 cv_signal(&spawn_data->sed_cv_child_ready); 2352 mutex_exit(&spawn_data->sed_mtx_child); 2353 } 2354 2355 /* don't allow debugger access yet */ 2356 rw_enter(&p->p_reflock, RW_WRITER); 2357 have_reflock = true; 2358 2359 /* handle posix_spawnattr */ 2360 error = handle_posix_spawn_attrs(attrs, spawn_data->sed_parent); 2361 if (error) 2362 goto report_error; 2363 2364 /* handle posix_spawn_file_actions */ 2365 error = handle_posix_spawn_file_actions(spawn_data->sed_actions); 2366 if (error) 2367 goto report_error; 2368 2369 /* now do the real exec */ 2370 error = execve_runproc(l, &spawn_data->sed_exec, parent_is_waiting, 2371 true); 2372 have_reflock = false; 2373 if (error == EJUSTRETURN) 2374 error = 0; 2375 else if (error) 2376 goto report_error; 2377 2378 if (parent_is_waiting) { 2379 mutex_enter(&spawn_data->sed_mtx_child); 2380 KASSERT(!spawn_data->sed_child_ready); 2381 spawn_data->sed_error = 0; 2382 spawn_data->sed_child_ready = true; 2383 cv_signal(&spawn_data->sed_cv_child_ready); 2384 mutex_exit(&spawn_data->sed_mtx_child); 2385 } 2386 2387 /* release our refcount on the data */ 2388 spawn_exec_data_release(spawn_data); 2389 2390 if ((p->p_slflag & (PSL_TRACED|PSL_TRACEDCHILD)) == 2391 (PSL_TRACED|PSL_TRACEDCHILD)) { 2392 eventswitchchild(p, TRAP_CHLD, PTRACE_POSIX_SPAWN); 2393 } 2394 2395 /* and finally: leave to userland for the first time */ 2396 cpu_spawn_return(l); 2397 2398 /* NOTREACHED */ 2399 return; 2400 2401 report_error: 2402 if (have_reflock) { 2403 /* 2404 * We have not passed through execve_runproc(), 2405 * which would have released the p_reflock and also 2406 * taken ownership of the sed_exec part of spawn_data, 2407 * so release/free both here. 2408 */ 2409 rw_exit(&p->p_reflock); 2410 execve_free_data(&spawn_data->sed_exec); 2411 } 2412 2413 if (parent_is_waiting) { 2414 /* pass error to parent */ 2415 mutex_enter(&spawn_data->sed_mtx_child); 2416 KASSERT(!spawn_data->sed_child_ready); 2417 spawn_data->sed_error = error; 2418 spawn_data->sed_child_ready = true; 2419 cv_signal(&spawn_data->sed_cv_child_ready); 2420 mutex_exit(&spawn_data->sed_mtx_child); 2421 } else { 2422 rw_exit(&exec_lock); 2423 } 2424 2425 /* release our refcount on the data */ 2426 spawn_exec_data_release(spawn_data); 2427 2428 /* done, exit */ 2429 mutex_enter(p->p_lock); 2430 /* 2431 * Posix explicitly asks for an exit code of 127 if we report 2432 * errors from the child process - so, unfortunately, there 2433 * is no way to report a more exact error code. 2434 * A NetBSD specific workaround is POSIX_SPAWN_RETURNERROR as 2435 * flag bit in the attrp argument to posix_spawn(2), see above. 2436 */ 2437 exit1(l, 127, 0); 2438 } 2439 2440 static __inline char ** 2441 posix_spawn_fae_path(struct posix_spawn_file_actions_entry *fae) 2442 { 2443 switch (fae->fae_action) { 2444 case FAE_OPEN: 2445 return &fae->fae_path; 2446 case FAE_CHDIR: 2447 return &fae->fae_chdir_path; 2448 default: 2449 return NULL; 2450 } 2451 } 2452 2453 void 2454 posix_spawn_fa_free(struct posix_spawn_file_actions *fa, size_t len) 2455 { 2456 2457 for (size_t i = 0; i < len; i++) { 2458 char **pathp = posix_spawn_fae_path(&fa->fae[i]); 2459 if (pathp) 2460 kmem_strfree(*pathp); 2461 } 2462 if (fa->len > 0) 2463 kmem_free(fa->fae, sizeof(*fa->fae) * fa->len); 2464 kmem_free(fa, sizeof(*fa)); 2465 } 2466 2467 static int 2468 posix_spawn_fa_alloc(struct posix_spawn_file_actions **fap, 2469 const struct posix_spawn_file_actions *ufa, rlim_t lim) 2470 { 2471 struct posix_spawn_file_actions *fa; 2472 struct posix_spawn_file_actions_entry *fae; 2473 char *pbuf = NULL; 2474 int error; 2475 size_t i = 0; 2476 2477 fa = kmem_alloc(sizeof(*fa), KM_SLEEP); 2478 error = copyin(ufa, fa, sizeof(*fa)); 2479 if (error || fa->len == 0) { 2480 kmem_free(fa, sizeof(*fa)); 2481 return error; /* 0 if not an error, and len == 0 */ 2482 } 2483 2484 if (fa->len > lim) { 2485 kmem_free(fa, sizeof(*fa)); 2486 return SET_ERROR(EINVAL); 2487 } 2488 2489 fa->size = fa->len; 2490 size_t fal = fa->len * sizeof(*fae); 2491 fae = fa->fae; 2492 fa->fae = kmem_alloc(fal, KM_SLEEP); 2493 error = copyin(fae, fa->fae, fal); 2494 if (error) 2495 goto out; 2496 2497 pbuf = PNBUF_GET(); 2498 for (; i < fa->len; i++) { 2499 char **pathp = posix_spawn_fae_path(&fa->fae[i]); 2500 if (pathp == NULL) 2501 continue; 2502 error = copyinstr(*pathp, pbuf, MAXPATHLEN, &fal); 2503 if (error) 2504 goto out; 2505 *pathp = kmem_alloc(fal, KM_SLEEP); 2506 memcpy(*pathp, pbuf, fal); 2507 } 2508 PNBUF_PUT(pbuf); 2509 2510 *fap = fa; 2511 return 0; 2512 out: 2513 if (pbuf) 2514 PNBUF_PUT(pbuf); 2515 posix_spawn_fa_free(fa, i); 2516 return error; 2517 } 2518 2519 /* 2520 * N.B. increments nprocs upon success. Callers need to drop nprocs if 2521 * they fail for some other reason. 2522 */ 2523 int 2524 check_posix_spawn(struct lwp *l1) 2525 { 2526 int error, tnprocs, count; 2527 uid_t uid; 2528 struct proc *p1; 2529 2530 p1 = l1->l_proc; 2531 uid = kauth_cred_getuid(l1->l_cred); 2532 tnprocs = atomic_inc_uint_nv(&nprocs); 2533 2534 /* 2535 * Although process entries are dynamically created, we still keep 2536 * a global limit on the maximum number we will create. 2537 */ 2538 if (__predict_false(tnprocs >= maxproc)) 2539 error = -1; 2540 else 2541 error = kauth_authorize_process(l1->l_cred, 2542 KAUTH_PROCESS_FORK, p1, KAUTH_ARG(tnprocs), NULL, NULL); 2543 2544 if (error) { 2545 atomic_dec_uint(&nprocs); 2546 return SET_ERROR(EAGAIN); 2547 } 2548 2549 /* 2550 * Enforce limits. 2551 */ 2552 count = chgproccnt(uid, 1); 2553 if (kauth_authorize_process(l1->l_cred, KAUTH_PROCESS_RLIMIT, 2554 p1, KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS), 2555 &p1->p_rlimit[RLIMIT_NPROC], KAUTH_ARG(RLIMIT_NPROC)) != 0 && 2556 __predict_false(count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur)) { 2557 (void)chgproccnt(uid, -1); 2558 atomic_dec_uint(&nprocs); 2559 return SET_ERROR(EAGAIN); 2560 } 2561 2562 return 0; 2563 } 2564 2565 int 2566 do_posix_spawn(struct lwp *l1, pid_t *pid_res, bool *child_ok, const char *path, 2567 struct posix_spawn_file_actions *fa, 2568 struct posix_spawnattr *sa, 2569 char *const *argv, char *const *envp, 2570 execve_fetch_element_t fetch) 2571 { 2572 2573 struct proc *p1, *p2; 2574 struct lwp *l2; 2575 int error; 2576 struct spawn_exec_data *spawn_data; 2577 vaddr_t uaddr = 0; 2578 pid_t pid; 2579 bool have_exec_lock = false; 2580 2581 p1 = l1->l_proc; 2582 2583 /* Allocate and init spawn_data */ 2584 spawn_data = kmem_zalloc(sizeof(*spawn_data), KM_SLEEP); 2585 spawn_data->sed_refcnt = 1; /* only parent so far */ 2586 cv_init(&spawn_data->sed_cv_child_ready, "pspawn"); 2587 mutex_init(&spawn_data->sed_mtx_child, MUTEX_DEFAULT, IPL_NONE); 2588 mutex_enter(&spawn_data->sed_mtx_child); 2589 2590 /* 2591 * Do the first part of the exec now, collect state 2592 * in spawn_data. 2593 */ 2594 error = execve_loadvm(l1, true, path, -1, argv, 2595 envp, fetch, &spawn_data->sed_exec); 2596 if (error == EJUSTRETURN) 2597 error = 0; 2598 else if (error) 2599 goto error_exit; 2600 2601 have_exec_lock = true; 2602 2603 /* 2604 * Allocate virtual address space for the U-area now, while it 2605 * is still easy to abort the fork operation if we're out of 2606 * kernel virtual address space. 2607 */ 2608 uaddr = uvm_uarea_alloc(); 2609 if (__predict_false(uaddr == 0)) { 2610 error = SET_ERROR(ENOMEM); 2611 goto error_exit; 2612 } 2613 2614 /* 2615 * Allocate new proc. Borrow proc0 vmspace for it, we will 2616 * replace it with its own before returning to userland 2617 * in the child. 2618 */ 2619 p2 = proc_alloc(); 2620 if (p2 == NULL) { 2621 /* We were unable to allocate a process ID. */ 2622 error = SET_ERROR(EAGAIN); 2623 goto error_exit; 2624 } 2625 2626 /* 2627 * This is a point of no return, we will have to go through 2628 * the child proc to properly clean it up past this point. 2629 */ 2630 pid = p2->p_pid; 2631 2632 /* 2633 * Make a proc table entry for the new process. 2634 * Start by zeroing the section of proc that is zero-initialized, 2635 * then copy the section that is copied directly from the parent. 2636 */ 2637 memset(&p2->p_startzero, 0, 2638 (unsigned) ((char *)&p2->p_endzero - (char *)&p2->p_startzero)); 2639 memcpy(&p2->p_startcopy, &p1->p_startcopy, 2640 (unsigned) ((char *)&p2->p_endcopy - (char *)&p2->p_startcopy)); 2641 2642 /* 2643 * Allocate an empty user vmspace for the new process now. 2644 * The min/max and topdown parameters given here are just placeholders, 2645 * the right values will be assigned in uvmspace_exec(). 2646 */ 2647 p2->p_vmspace = uvmspace_alloc(exec_vm_minaddr(VM_MIN_ADDRESS), 2648 VM_MAXUSER_ADDRESS, true); 2649 2650 TAILQ_INIT(&p2->p_sigpend.sp_info); 2651 2652 LIST_INIT(&p2->p_lwps); 2653 LIST_INIT(&p2->p_sigwaiters); 2654 2655 /* 2656 * Duplicate sub-structures as needed. 2657 * Increase reference counts on shared objects. 2658 * Inherit flags we want to keep. The flags related to SIGCHLD 2659 * handling are important in order to keep a consistent behaviour 2660 * for the child after the fork. If we are a 32-bit process, the 2661 * child will be too. 2662 */ 2663 p2->p_flag = 2664 p1->p_flag & (PK_SUGID | PK_NOCLDWAIT | PK_CLDSIGIGN | PK_32); 2665 p2->p_emul = p1->p_emul; 2666 p2->p_execsw = p1->p_execsw; 2667 2668 mutex_init(&p2->p_stmutex, MUTEX_DEFAULT, IPL_HIGH); 2669 mutex_init(&p2->p_auxlock, MUTEX_DEFAULT, IPL_NONE); 2670 rw_init(&p2->p_reflock); 2671 cv_init(&p2->p_waitcv, "wait"); 2672 cv_init(&p2->p_lwpcv, "lwpwait"); 2673 2674 p2->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 2675 2676 kauth_proc_fork(p1, p2); 2677 2678 p2->p_raslist = NULL; 2679 p2->p_fd = fd_copy(); 2680 2681 /* XXX racy */ 2682 p2->p_mqueue_cnt = p1->p_mqueue_cnt; 2683 2684 p2->p_cwdi = cwdinit(); 2685 2686 /* 2687 * Note: p_limit (rlimit stuff) is copy-on-write, so normally 2688 * we just need increase pl_refcnt. 2689 */ 2690 if (!p1->p_limit->pl_writeable) { 2691 lim_addref(p1->p_limit); 2692 p2->p_limit = p1->p_limit; 2693 } else { 2694 p2->p_limit = lim_copy(p1->p_limit); 2695 } 2696 2697 p2->p_lflag = 0; 2698 l1->l_vforkwaiting = false; 2699 p2->p_sflag = 0; 2700 p2->p_slflag = 0; 2701 p2->p_pptr = p1; 2702 p2->p_ppid = p1->p_pid; 2703 LIST_INIT(&p2->p_children); 2704 2705 p2->p_aio = NULL; 2706 2707 #ifdef KTRACE 2708 /* 2709 * Copy traceflag and tracefile if enabled. 2710 * If not inherited, these were zeroed above. 2711 */ 2712 if (p1->p_traceflag & KTRFAC_INHERIT) { 2713 mutex_enter(&ktrace_lock); 2714 p2->p_traceflag = p1->p_traceflag; 2715 if ((p2->p_tracep = p1->p_tracep) != NULL) 2716 ktradref(p2); 2717 mutex_exit(&ktrace_lock); 2718 } 2719 #endif 2720 2721 /* 2722 * Create signal actions for the child process. 2723 */ 2724 p2->p_sigacts = sigactsinit(p1, 0); 2725 mutex_enter(p1->p_lock); 2726 p2->p_sflag |= 2727 (p1->p_sflag & (PS_STOPFORK | PS_STOPEXEC | PS_NOCLDSTOP)); 2728 sched_proc_fork(p1, p2); 2729 mutex_exit(p1->p_lock); 2730 2731 p2->p_stflag = p1->p_stflag; 2732 2733 /* 2734 * p_stats. 2735 * Copy parts of p_stats, and zero out the rest. 2736 */ 2737 p2->p_stats = pstatscopy(p1->p_stats); 2738 2739 /* copy over machdep flags to the new proc */ 2740 cpu_proc_fork(p1, p2); 2741 2742 /* 2743 * Prepare remaining parts of spawn data 2744 */ 2745 spawn_data->sed_actions = fa; 2746 spawn_data->sed_attrs = sa; 2747 2748 spawn_data->sed_parent = p1; 2749 2750 /* create LWP */ 2751 lwp_create(l1, p2, uaddr, 0, NULL, 0, spawn_return, spawn_data, 2752 &l2, l1->l_class, &l1->l_sigmask, &l1->l_sigstk); 2753 l2->l_ctxlink = NULL; /* reset ucontext link */ 2754 2755 /* 2756 * Copy the credential so other references don't see our changes. 2757 * Test to see if this is necessary first, since in the common case 2758 * we won't need a private reference. 2759 */ 2760 if (kauth_cred_geteuid(l2->l_cred) != kauth_cred_getsvuid(l2->l_cred) || 2761 kauth_cred_getegid(l2->l_cred) != kauth_cred_getsvgid(l2->l_cred)) { 2762 l2->l_cred = kauth_cred_copy(l2->l_cred); 2763 kauth_cred_setsvuid(l2->l_cred, kauth_cred_geteuid(l2->l_cred)); 2764 kauth_cred_setsvgid(l2->l_cred, kauth_cred_getegid(l2->l_cred)); 2765 } 2766 2767 /* Update the master credentials. */ 2768 if (l2->l_cred != p2->p_cred) { 2769 kauth_cred_t ocred; 2770 mutex_enter(p2->p_lock); 2771 ocred = p2->p_cred; 2772 p2->p_cred = kauth_cred_hold(l2->l_cred); 2773 mutex_exit(p2->p_lock); 2774 kauth_cred_free(ocred); 2775 } 2776 2777 *child_ok = true; 2778 spawn_data->sed_refcnt = 2; /* child gets it as well */ 2779 #if 0 2780 l2->l_nopreempt = 1; /* start it non-preemptable */ 2781 #endif 2782 2783 /* 2784 * It's now safe for the scheduler and other processes to see the 2785 * child process. 2786 */ 2787 mutex_enter(&proc_lock); 2788 2789 if (p1->p_session->s_ttyvp != NULL && p1->p_lflag & PL_CONTROLT) 2790 p2->p_lflag |= PL_CONTROLT; 2791 2792 LIST_INSERT_HEAD(&p1->p_children, p2, p_sibling); 2793 p2->p_exitsig = SIGCHLD; /* signal for parent on exit */ 2794 2795 if ((p1->p_slflag & (PSL_TRACEPOSIX_SPAWN|PSL_TRACED)) == 2796 (PSL_TRACEPOSIX_SPAWN|PSL_TRACED)) { 2797 proc_changeparent(p2, p1->p_pptr); 2798 SET(p2->p_slflag, PSL_TRACEDCHILD); 2799 } 2800 2801 p2->p_oppid = p1->p_pid; /* Remember the original parent id. */ 2802 2803 LIST_INSERT_AFTER(p1, p2, p_pglist); 2804 LIST_INSERT_HEAD(&allproc, p2, p_list); 2805 2806 p2->p_trace_enabled = trace_is_enabled(p2); 2807 #ifdef __HAVE_SYSCALL_INTERN 2808 (*p2->p_emul->e_syscall_intern)(p2); 2809 #endif 2810 2811 /* 2812 * Make child runnable, set start time, and add to run queue except 2813 * if the parent requested the child to start in SSTOP state. 2814 */ 2815 mutex_enter(p2->p_lock); 2816 2817 getmicrotime(&p2->p_stats->p_start); 2818 2819 lwp_lock(l2); 2820 KASSERT(p2->p_nrlwps == 1); 2821 KASSERT(l2->l_stat == LSIDL); 2822 p2->p_nrlwps = 1; 2823 p2->p_stat = SACTIVE; 2824 setrunnable(l2); 2825 /* LWP now unlocked */ 2826 2827 mutex_exit(p2->p_lock); 2828 mutex_exit(&proc_lock); 2829 2830 while (!spawn_data->sed_child_ready) { 2831 cv_wait(&spawn_data->sed_cv_child_ready, 2832 &spawn_data->sed_mtx_child); 2833 } 2834 error = spawn_data->sed_error; 2835 mutex_exit(&spawn_data->sed_mtx_child); 2836 spawn_exec_data_release(spawn_data); 2837 2838 rw_exit(&p1->p_reflock); 2839 rw_exit(&exec_lock); 2840 have_exec_lock = false; 2841 2842 *pid_res = pid; 2843 2844 if (error) 2845 return error; 2846 2847 if (p1->p_slflag & PSL_TRACED) { 2848 /* Paranoid check */ 2849 mutex_enter(&proc_lock); 2850 if ((p1->p_slflag & (PSL_TRACEPOSIX_SPAWN|PSL_TRACED)) != 2851 (PSL_TRACEPOSIX_SPAWN|PSL_TRACED)) { 2852 mutex_exit(&proc_lock); 2853 return 0; 2854 } 2855 2856 mutex_enter(p1->p_lock); 2857 eventswitch(TRAP_CHLD, PTRACE_POSIX_SPAWN, pid); 2858 } 2859 return 0; 2860 2861 error_exit: 2862 if (have_exec_lock) { 2863 execve_free_data(&spawn_data->sed_exec); 2864 rw_exit(&p1->p_reflock); 2865 rw_exit(&exec_lock); 2866 } 2867 mutex_exit(&spawn_data->sed_mtx_child); 2868 spawn_exec_data_release(spawn_data); 2869 if (uaddr != 0) 2870 uvm_uarea_free(uaddr); 2871 2872 return error; 2873 } 2874 2875 int 2876 sys_posix_spawn(struct lwp *l1, const struct sys_posix_spawn_args *uap, 2877 register_t *retval) 2878 { 2879 /* { 2880 syscallarg(pid_t *) pid; 2881 syscallarg(const char *) path; 2882 syscallarg(const struct posix_spawn_file_actions *) file_actions; 2883 syscallarg(const struct posix_spawnattr *) attrp; 2884 syscallarg(char *const *) argv; 2885 syscallarg(char *const *) envp; 2886 } */ 2887 2888 int error; 2889 struct posix_spawn_file_actions *fa = NULL; 2890 struct posix_spawnattr *sa = NULL; 2891 pid_t pid; 2892 bool child_ok = false; 2893 rlim_t max_fileactions; 2894 proc_t *p = l1->l_proc; 2895 2896 /* check_posix_spawn() increments nprocs for us. */ 2897 error = check_posix_spawn(l1); 2898 if (error) { 2899 *retval = error; 2900 return 0; 2901 } 2902 2903 /* copy in file_actions struct */ 2904 if (SCARG(uap, file_actions) != NULL) { 2905 max_fileactions = 2 * uimin(p->p_rlimit[RLIMIT_NOFILE].rlim_cur, 2906 maxfiles); 2907 error = posix_spawn_fa_alloc(&fa, SCARG(uap, file_actions), 2908 max_fileactions); 2909 if (error) 2910 goto error_exit; 2911 } 2912 2913 /* copyin posix_spawnattr struct */ 2914 if (SCARG(uap, attrp) != NULL) { 2915 sa = kmem_alloc(sizeof(*sa), KM_SLEEP); 2916 error = copyin(SCARG(uap, attrp), sa, sizeof(*sa)); 2917 if (error) 2918 goto error_exit; 2919 } 2920 2921 /* 2922 * Do the spawn 2923 */ 2924 error = do_posix_spawn(l1, &pid, &child_ok, SCARG(uap, path), fa, sa, 2925 SCARG(uap, argv), SCARG(uap, envp), execve_fetch_element); 2926 if (error) 2927 goto error_exit; 2928 2929 if (error == 0 && SCARG(uap, pid) != NULL) 2930 error = copyout(&pid, SCARG(uap, pid), sizeof(pid)); 2931 2932 *retval = error; 2933 return 0; 2934 2935 error_exit: 2936 if (!child_ok) { 2937 (void)chgproccnt(kauth_cred_getuid(l1->l_cred), -1); 2938 atomic_dec_uint(&nprocs); 2939 2940 if (sa) 2941 kmem_free(sa, sizeof(*sa)); 2942 if (fa) 2943 posix_spawn_fa_free(fa, fa->len); 2944 } 2945 2946 *retval = error; 2947 return 0; 2948 } 2949 2950 void 2951 exec_free_emul_arg(struct exec_package *epp) 2952 { 2953 if (epp->ep_emul_arg_free != NULL) { 2954 KASSERT(epp->ep_emul_arg != NULL); 2955 (*epp->ep_emul_arg_free)(epp->ep_emul_arg); 2956 epp->ep_emul_arg_free = NULL; 2957 epp->ep_emul_arg = NULL; 2958 } else { 2959 KASSERT(epp->ep_emul_arg == NULL); 2960 } 2961 } 2962 2963 #ifdef DEBUG_EXEC 2964 static void 2965 dump_vmcmds(const struct exec_package * const epp, size_t x, int error) 2966 { 2967 struct exec_vmcmd *vp = &epp->ep_vmcmds.evs_cmds[0]; 2968 size_t j; 2969 2970 if (error == 0) 2971 DPRINTF(("vmcmds %u\n", epp->ep_vmcmds.evs_used)); 2972 else 2973 DPRINTF(("vmcmds %zu/%u, error %d\n", x, 2974 epp->ep_vmcmds.evs_used, error)); 2975 2976 for (j = 0; j < epp->ep_vmcmds.evs_used; j++) { 2977 DPRINTF(("vmcmd[%zu] = vmcmd_map_%s %#" 2978 PRIxVADDR"/%#"PRIxVSIZE" fd@%#" 2979 PRIxVSIZE" prot=0%o flags=%d\n", j, 2980 vp[j].ev_proc == vmcmd_map_pagedvn ? 2981 "pagedvn" : 2982 vp[j].ev_proc == vmcmd_map_readvn ? 2983 "readvn" : 2984 vp[j].ev_proc == vmcmd_map_zero ? 2985 "zero" : "*unknown*", 2986 vp[j].ev_addr, vp[j].ev_len, 2987 vp[j].ev_offset, vp[j].ev_prot, 2988 vp[j].ev_flags)); 2989 if (error != 0 && j == x) 2990 DPRINTF((" ^--- failed\n")); 2991 } 2992 } 2993 #endif 2994