kern_exec.c revision 1.526 1 /* $NetBSD: kern_exec.c,v 1.526 2025/03/15 12:11:09 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2008, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*-
33 * Copyright (C) 1993, 1994, 1996 Christopher G. Demetriou
34 * Copyright (C) 1992 Wolfgang Solfrank.
35 * Copyright (C) 1992 TooLs GmbH.
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by TooLs GmbH.
49 * 4. The name of TooLs GmbH may not be used to endorse or promote products
50 * derived from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
53 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
54 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
55 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
57 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
58 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
59 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
60 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
61 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 */
63
64 #include <sys/cdefs.h>
65 __KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.526 2025/03/15 12:11:09 riastradh Exp $");
66
67 #include "opt_exec.h"
68 #include "opt_execfmt.h"
69 #include "opt_ktrace.h"
70 #include "opt_modular.h"
71 #include "opt_pax.h"
72 #include "opt_syscall_debug.h"
73 #include "veriexec.h"
74
75 #include <sys/param.h>
76 #include <sys/types.h>
77
78 #include <sys/acct.h>
79 #include <sys/atomic.h>
80 #include <sys/cprng.h>
81 #include <sys/cpu.h>
82 #include <sys/exec.h>
83 #include <sys/file.h>
84 #include <sys/filedesc.h>
85 #include <sys/futex.h>
86 #include <sys/kauth.h>
87 #include <sys/kernel.h>
88 #include <sys/kmem.h>
89 #include <sys/ktrace.h>
90 #include <sys/lwpctl.h>
91 #include <sys/mman.h>
92 #include <sys/module.h>
93 #include <sys/mount.h>
94 #include <sys/namei.h>
95 #include <sys/pax.h>
96 #include <sys/proc.h>
97 #include <sys/prot.h>
98 #include <sys/ptrace.h>
99 #include <sys/ras.h>
100 #include <sys/sdt.h>
101 #include <sys/signalvar.h>
102 #include <sys/spawn.h>
103 #include <sys/stat.h>
104 #include <sys/syscall.h>
105 #include <sys/syscallargs.h>
106 #include <sys/syscallvar.h>
107 #include <sys/systm.h>
108 #include <sys/uidinfo.h>
109 #if NVERIEXEC > 0
110 #include <sys/verified_exec.h>
111 #endif /* NVERIEXEC > 0 */
112 #include <sys/vfs_syscalls.h>
113 #include <sys/vnode.h>
114 #include <sys/wait.h>
115
116 #include <uvm/uvm_extern.h>
117
118 #include <machine/reg.h>
119
120 #include <compat/common/compat_util.h>
121
122 #ifndef MD_TOPDOWN_INIT
123 #ifdef __USE_TOPDOWN_VM
124 #define MD_TOPDOWN_INIT(epp) (epp)->ep_flags |= EXEC_TOPDOWN_VM
125 #else
126 #define MD_TOPDOWN_INIT(epp)
127 #endif
128 #endif
129
130 struct execve_data;
131
132 extern int user_va0_disable;
133
134 static size_t calcargs(struct execve_data * restrict, const size_t);
135 static size_t calcstack(struct execve_data * restrict, const size_t);
136 static int copyoutargs(struct execve_data * restrict, struct lwp *,
137 char * const);
138 static int copyoutpsstrs(struct execve_data * restrict, struct proc *);
139 static int copyinargs(struct execve_data * restrict, char * const *,
140 char * const *, execve_fetch_element_t, char **);
141 static int copyinargstrs(struct execve_data * restrict, char * const *,
142 execve_fetch_element_t, char **, size_t *, void (*)(const void *, size_t));
143 static int exec_sigcode_map(struct proc *, const struct emul *);
144
145 #if defined(DEBUG) && !defined(DEBUG_EXEC)
146 #define DEBUG_EXEC
147 #endif
148 #ifdef DEBUG_EXEC
149 #define DPRINTF(a) printf a
150 #define COPYPRINTF(s, a, b) printf("%s, %d: copyout%s @%p %zu\n", __func__, \
151 __LINE__, (s), (a), (b))
152 static void dump_vmcmds(const struct exec_package * const, size_t, int);
153 #define DUMPVMCMDS(p, x, e) do { dump_vmcmds((p), (x), (e)); } while (0)
154 #else
155 #define DPRINTF(a)
156 #define COPYPRINTF(s, a, b)
157 #define DUMPVMCMDS(p, x, e) do {} while (0)
158 #endif /* DEBUG_EXEC */
159
160 /*
161 * DTrace SDT provider definitions
162 */
163 SDT_PROVIDER_DECLARE(proc);
164 SDT_PROBE_DEFINE1(proc, kernel, , exec, "char *");
165 SDT_PROBE_DEFINE1(proc, kernel, , exec__success, "char *");
166 SDT_PROBE_DEFINE1(proc, kernel, , exec__failure, "int");
167
168 /*
169 * Exec function switch:
170 *
171 * Note that each makecmds function is responsible for loading the
172 * exec package with the necessary functions for any exec-type-specific
173 * handling.
174 *
175 * Functions for specific exec types should be defined in their own
176 * header file.
177 */
178 static const struct execsw **execsw = NULL;
179 static int nexecs;
180
181 u_int exec_maxhdrsz; /* must not be static - used by netbsd32 */
182
183 /* list of dynamically loaded execsw entries */
184 static LIST_HEAD(execlist_head, exec_entry) ex_head =
185 LIST_HEAD_INITIALIZER(ex_head);
186 struct exec_entry {
187 LIST_ENTRY(exec_entry) ex_list;
188 SLIST_ENTRY(exec_entry) ex_slist;
189 const struct execsw *ex_sw;
190 };
191
192 #ifndef __HAVE_SYSCALL_INTERN
193 void syscall(void);
194 #endif
195
196 /* NetBSD autoloadable syscalls */
197 #ifdef MODULAR
198 #include <kern/syscalls_autoload.c>
199 #endif
200
201 /* NetBSD emul struct */
202 struct emul emul_netbsd = {
203 .e_name = "netbsd",
204 #ifdef EMUL_NATIVEROOT
205 .e_path = EMUL_NATIVEROOT,
206 #else
207 .e_path = NULL,
208 #endif
209 #ifndef __HAVE_MINIMAL_EMUL
210 .e_flags = EMUL_HAS_SYS___syscall,
211 .e_errno = NULL,
212 .e_nosys = SYS_syscall,
213 .e_nsysent = SYS_NSYSENT,
214 #endif
215 #ifdef MODULAR
216 .e_sc_autoload = netbsd_syscalls_autoload,
217 #endif
218 .e_sysent = sysent,
219 .e_nomodbits = sysent_nomodbits,
220 #ifdef SYSCALL_DEBUG
221 .e_syscallnames = syscallnames,
222 #else
223 .e_syscallnames = NULL,
224 #endif
225 .e_sendsig = sendsig,
226 .e_trapsignal = trapsignal,
227 .e_sigcode = NULL,
228 .e_esigcode = NULL,
229 .e_sigobject = NULL,
230 .e_setregs = setregs,
231 .e_proc_exec = NULL,
232 .e_proc_fork = NULL,
233 .e_proc_exit = NULL,
234 .e_lwp_fork = NULL,
235 .e_lwp_exit = NULL,
236 #ifdef __HAVE_SYSCALL_INTERN
237 .e_syscall_intern = syscall_intern,
238 #else
239 .e_syscall = syscall,
240 #endif
241 .e_sysctlovly = NULL,
242 .e_vm_default_addr = uvm_default_mapaddr,
243 .e_usertrap = NULL,
244 .e_ucsize = sizeof(ucontext_t),
245 .e_startlwp = startlwp
246 };
247
248 /*
249 * Exec lock. Used to control access to execsw[] structures.
250 * This must not be static so that netbsd32 can access it, too.
251 */
252 krwlock_t exec_lock __cacheline_aligned;
253
254 /*
255 * Data used between a loadvm and execve part of an "exec" operation
256 */
257 struct execve_data {
258 struct exec_package ed_pack;
259 struct pathbuf *ed_pathbuf;
260 struct vattr ed_attr;
261 struct ps_strings ed_arginfo;
262 char *ed_argp;
263 const char *ed_pathstring;
264 char *ed_resolvedname;
265 size_t ed_ps_strings_sz;
266 int ed_szsigcode;
267 size_t ed_argslen;
268 long ed_argc;
269 long ed_envc;
270 };
271
272 /*
273 * data passed from parent lwp to child during a posix_spawn()
274 */
275 struct spawn_exec_data {
276 struct execve_data sed_exec;
277 struct posix_spawn_file_actions
278 *sed_actions;
279 struct posix_spawnattr *sed_attrs;
280 struct proc *sed_parent;
281 kcondvar_t sed_cv_child_ready;
282 kmutex_t sed_mtx_child;
283 int sed_error;
284 bool sed_child_ready;
285 volatile uint32_t sed_refcnt;
286 };
287
288 static struct vm_map *exec_map;
289 static struct pool exec_pool;
290
291 static void *
292 exec_pool_alloc(struct pool *pp, int flags)
293 {
294
295 return (void *)uvm_km_alloc(exec_map, NCARGS, 0,
296 UVM_KMF_PAGEABLE | UVM_KMF_WAITVA);
297 }
298
299 static void
300 exec_pool_free(struct pool *pp, void *addr)
301 {
302
303 uvm_km_free(exec_map, (vaddr_t)addr, NCARGS, UVM_KMF_PAGEABLE);
304 }
305
306 static struct pool_allocator exec_palloc = {
307 .pa_alloc = exec_pool_alloc,
308 .pa_free = exec_pool_free,
309 .pa_pagesz = NCARGS
310 };
311
312 static void
313 exec_path_free(struct execve_data *data)
314 {
315 pathbuf_stringcopy_put(data->ed_pathbuf, data->ed_pathstring);
316 pathbuf_destroy(data->ed_pathbuf);
317 if (data->ed_resolvedname)
318 PNBUF_PUT(data->ed_resolvedname);
319 }
320
321 static int
322 exec_resolvename(struct lwp *l, struct exec_package *epp, struct vnode *vp,
323 char **rpath)
324 {
325 int error;
326 char *p;
327
328 KASSERT(rpath != NULL);
329
330 *rpath = PNBUF_GET();
331 error = vnode_to_path(*rpath, MAXPATHLEN, vp, l, l->l_proc);
332 if (error) {
333 DPRINTF(("%s: can't resolve name for %s, error %d\n",
334 __func__, epp->ep_kname, error));
335 PNBUF_PUT(*rpath);
336 *rpath = NULL;
337 return error;
338 }
339 epp->ep_resolvedname = *rpath;
340 if ((p = strrchr(*rpath, '/')) != NULL)
341 epp->ep_kname = p + 1;
342 return 0;
343 }
344
345
346 /*
347 * check exec:
348 * given an "executable" described in the exec package's namei info,
349 * see what we can do with it.
350 *
351 * ON ENTRY:
352 * exec package with appropriate namei info
353 * lwp pointer of exec'ing lwp
354 * NO SELF-LOCKED VNODES
355 *
356 * ON EXIT:
357 * error: nothing held, etc. exec header still allocated.
358 * ok: filled exec package, executable's vnode (unlocked).
359 *
360 * EXEC SWITCH ENTRY:
361 * Locked vnode to check, exec package, proc.
362 *
363 * EXEC SWITCH EXIT:
364 * ok: return 0, filled exec package, executable's vnode (unlocked).
365 * error: destructive:
366 * everything deallocated execept exec header.
367 * non-destructive:
368 * error code, executable's vnode (unlocked),
369 * exec header unmodified.
370 */
371 int
372 /*ARGSUSED*/
373 check_exec(struct lwp *l, struct exec_package *epp, struct pathbuf *pb,
374 char **rpath)
375 {
376 int error, i;
377 struct vnode *vp;
378 size_t resid;
379
380 if (epp->ep_resolvedname) {
381 struct nameidata nd;
382
383 // grab the absolute pathbuf here before namei() trashes it.
384 pathbuf_copystring(pb, epp->ep_resolvedname, PATH_MAX);
385 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, pb);
386
387 /* first get the vnode */
388 if ((error = namei(&nd)) != 0)
389 return error;
390
391 epp->ep_vp = vp = nd.ni_vp;
392 #ifdef DIAGNOSTIC
393 /* paranoia (take this out once namei stuff stabilizes) */
394 memset(nd.ni_pnbuf, '~', PATH_MAX);
395 #endif
396 } else {
397 struct file *fp;
398
399 if ((error = fd_getvnode(epp->ep_xfd, &fp)) != 0)
400 return error;
401 epp->ep_vp = vp = fp->f_vnode;
402 vref(vp);
403 fd_putfile(epp->ep_xfd);
404 if ((error = exec_resolvename(l, epp, vp, rpath)) != 0)
405 return error;
406 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
407 }
408
409 /* check access and type */
410 if (vp->v_type != VREG) {
411 error = SET_ERROR(EACCES);
412 goto bad1;
413 }
414 if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred)) != 0)
415 goto bad1;
416
417 /* get attributes */
418 /* XXX VOP_GETATTR is the only thing that needs LK_EXCLUSIVE here */
419 if ((error = VOP_GETATTR(vp, epp->ep_vap, l->l_cred)) != 0)
420 goto bad1;
421
422 /* Check mount point */
423 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
424 error = SET_ERROR(EACCES);
425 goto bad1;
426 }
427 if (vp->v_mount->mnt_flag & MNT_NOSUID)
428 epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID);
429
430 /* try to open it */
431 if ((error = VOP_OPEN(vp, FREAD, l->l_cred)) != 0)
432 goto bad1;
433
434 /* now we have the file, get the exec header */
435 error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0,
436 UIO_SYSSPACE, IO_NODELOCKED, l->l_cred, &resid, NULL);
437 if (error)
438 goto bad1;
439
440 /* unlock vp, since we need it unlocked from here on out. */
441 VOP_UNLOCK(vp);
442
443 #if NVERIEXEC > 0
444 error = veriexec_verify(l, vp,
445 epp->ep_resolvedname ? epp->ep_resolvedname : epp->ep_kname,
446 epp->ep_flags & EXEC_INDIR ? VERIEXEC_INDIRECT : VERIEXEC_DIRECT,
447 NULL);
448 if (error)
449 goto bad2;
450 #endif /* NVERIEXEC > 0 */
451
452 #ifdef PAX_SEGVGUARD
453 error = pax_segvguard(l, vp, epp->ep_resolvedname, false);
454 if (error)
455 goto bad2;
456 #endif /* PAX_SEGVGUARD */
457
458 epp->ep_hdrvalid = epp->ep_hdrlen - resid;
459
460 /*
461 * Set up default address space limits. Can be overridden
462 * by individual exec packages.
463 */
464 epp->ep_vm_minaddr = exec_vm_minaddr(VM_MIN_ADDRESS);
465 epp->ep_vm_maxaddr = VM_MAXUSER_ADDRESS;
466
467 /*
468 * set up the vmcmds for creation of the process
469 * address space
470 */
471 error = nexecs == 0 ? SET_ERROR(ENOEXEC) : ENOEXEC;
472 for (i = 0; i < nexecs; i++) {
473 int newerror;
474
475 epp->ep_esch = execsw[i];
476 newerror = (*execsw[i]->es_makecmds)(l, epp);
477
478 if (!newerror) {
479 /* Seems ok: check that entry point is not too high */
480 if (epp->ep_entry >= epp->ep_vm_maxaddr) {
481 #ifdef DIAGNOSTIC
482 printf("%s: rejecting %p due to "
483 "too high entry address (>= %p)\n",
484 __func__, (void *)epp->ep_entry,
485 (void *)epp->ep_vm_maxaddr);
486 #endif
487 error = SET_ERROR(ENOEXEC);
488 break;
489 }
490 /* Seems ok: check that entry point is not too low */
491 if (epp->ep_entry < epp->ep_vm_minaddr) {
492 #ifdef DIAGNOSTIC
493 printf("%s: rejecting %p due to "
494 "too low entry address (< %p)\n",
495 __func__, (void *)epp->ep_entry,
496 (void *)epp->ep_vm_minaddr);
497 #endif
498 error = SET_ERROR(ENOEXEC);
499 break;
500 }
501
502 /* check limits */
503 #ifdef DIAGNOSTIC
504 #define LMSG "%s: rejecting due to %s limit (%ju > %ju)\n"
505 #endif
506 #ifdef MAXTSIZ
507 if (epp->ep_tsize > MAXTSIZ) {
508 #ifdef DIAGNOSTIC
509 printf(LMSG, __func__, "text",
510 (uintmax_t)epp->ep_tsize,
511 (uintmax_t)MAXTSIZ);
512 #endif
513 error = SET_ERROR(ENOMEM);
514 break;
515 }
516 #endif
517 vsize_t dlimit =
518 (vsize_t)l->l_proc->p_rlimit[RLIMIT_DATA].rlim_cur;
519 if (epp->ep_dsize > dlimit) {
520 #ifdef DIAGNOSTIC
521 printf(LMSG, __func__, "data",
522 (uintmax_t)epp->ep_dsize,
523 (uintmax_t)dlimit);
524 #endif
525 error = SET_ERROR(ENOMEM);
526 break;
527 }
528 return 0;
529 }
530
531 /*
532 * Reset all the fields that may have been modified by the
533 * loader.
534 */
535 KASSERT(epp->ep_emul_arg == NULL);
536 if (epp->ep_emul_root != NULL) {
537 vrele(epp->ep_emul_root);
538 epp->ep_emul_root = NULL;
539 }
540 if (epp->ep_interp != NULL) {
541 vrele(epp->ep_interp);
542 epp->ep_interp = NULL;
543 }
544 epp->ep_pax_flags = 0;
545
546 /* make sure the first "interesting" error code is saved. */
547 if (error == ENOEXEC)
548 error = newerror;
549
550 if (epp->ep_flags & EXEC_DESTR)
551 /* Error from "#!" code, tidied up by recursive call */
552 return error;
553 }
554
555 /* not found, error */
556
557 /*
558 * free any vmspace-creation commands,
559 * and release their references
560 */
561 kill_vmcmds(&epp->ep_vmcmds);
562
563 #if NVERIEXEC > 0 || defined(PAX_SEGVGUARD)
564 bad2:
565 #endif
566 /*
567 * close and release the vnode, restore the old one, free the
568 * pathname buf, and punt.
569 */
570 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
571 VOP_CLOSE(vp, FREAD, l->l_cred);
572 vput(vp);
573 return error;
574
575 bad1:
576 /*
577 * free the namei pathname buffer, and put the vnode
578 * (which we don't yet have open).
579 */
580 vput(vp); /* was still locked */
581 return error;
582 }
583
584 #ifdef __MACHINE_STACK_GROWS_UP
585 #define STACK_PTHREADSPACE NBPG
586 #else
587 #define STACK_PTHREADSPACE 0
588 #endif
589
590 static int
591 execve_fetch_element(char * const *array, size_t index, char **value)
592 {
593 return copyin(array + index, value, sizeof(*value));
594 }
595
596 /*
597 * exec system call
598 */
599 int
600 sys_execve(struct lwp *l, const struct sys_execve_args *uap, register_t *retval)
601 {
602 /* {
603 syscallarg(const char *) path;
604 syscallarg(char * const *) argp;
605 syscallarg(char * const *) envp;
606 } */
607
608 return execve1(l, true, SCARG(uap, path), -1, SCARG(uap, argp),
609 SCARG(uap, envp), execve_fetch_element);
610 }
611
612 int
613 sys_fexecve(struct lwp *l, const struct sys_fexecve_args *uap,
614 register_t *retval)
615 {
616 /* {
617 syscallarg(int) fd;
618 syscallarg(char * const *) argp;
619 syscallarg(char * const *) envp;
620 } */
621
622 return execve1(l, false, NULL, SCARG(uap, fd), SCARG(uap, argp),
623 SCARG(uap, envp), execve_fetch_element);
624 }
625
626 /*
627 * Load modules to try and execute an image that we do not understand.
628 * If no execsw entries are present, we load those likely to be needed
629 * in order to run native images only. Otherwise, we autoload all
630 * possible modules that could let us run the binary. XXX lame
631 */
632 static void
633 exec_autoload(void)
634 {
635 #ifdef MODULAR
636 static const char * const native[] = {
637 "exec_elf32",
638 "exec_elf64",
639 "exec_script",
640 NULL
641 };
642 static const char * const compat[] = {
643 "exec_elf32",
644 "exec_elf64",
645 "exec_script",
646 "exec_aout",
647 "exec_coff",
648 "exec_ecoff",
649 "compat_aoutm68k",
650 "compat_netbsd32",
651 #if 0
652 "compat_linux",
653 "compat_linux32",
654 #endif
655 "compat_sunos",
656 "compat_sunos32",
657 "compat_ultrix",
658 NULL
659 };
660 char const * const *list;
661 int i;
662
663 list = nexecs == 0 ? native : compat;
664 for (i = 0; list[i] != NULL; i++) {
665 if (module_autoload(list[i], MODULE_CLASS_EXEC) != 0) {
666 continue;
667 }
668 yield();
669 }
670 #endif
671 }
672
673 /*
674 * Copy the user or kernel supplied upath to the allocated pathbuffer pbp
675 * making it absolute in the process, by prepending the current working
676 * directory if it is not. If offs is supplied it will contain the offset
677 * where the original supplied copy of upath starts.
678 */
679 int
680 exec_makepathbuf(struct lwp *l, const char *upath, enum uio_seg seg,
681 struct pathbuf **pbp, size_t *offs)
682 {
683 char *path, *bp;
684 size_t len, tlen;
685 int error;
686 struct cwdinfo *cwdi;
687
688 path = PNBUF_GET();
689 if (seg == UIO_SYSSPACE) {
690 error = copystr(upath, path, MAXPATHLEN, &len);
691 } else {
692 error = copyinstr(upath, path, MAXPATHLEN, &len);
693 }
694 if (error)
695 goto err;
696
697 if (path[0] == '/') {
698 if (offs)
699 *offs = 0;
700 goto out;
701 }
702
703 len++;
704 if (len + 1 >= MAXPATHLEN) {
705 error = SET_ERROR(ENAMETOOLONG);
706 goto err;
707 }
708 bp = path + MAXPATHLEN - len;
709 memmove(bp, path, len);
710 *(--bp) = '/';
711
712 cwdi = l->l_proc->p_cwdi;
713 rw_enter(&cwdi->cwdi_lock, RW_READER);
714 error = getcwd_common(cwdi->cwdi_cdir, NULL, &bp, path, MAXPATHLEN / 2,
715 GETCWD_CHECK_ACCESS, l);
716 rw_exit(&cwdi->cwdi_lock);
717
718 if (error)
719 goto err;
720 tlen = path + MAXPATHLEN - bp;
721
722 memmove(path, bp, tlen);
723 path[tlen - 1] = '\0';
724 if (offs)
725 *offs = tlen - len;
726 out:
727 *pbp = pathbuf_assimilate(path);
728 return 0;
729 err:
730 PNBUF_PUT(path);
731 return error;
732 }
733
734 vaddr_t
735 exec_vm_minaddr(vaddr_t va_min)
736 {
737 /*
738 * Increase va_min if we don't want NULL to be mappable by the
739 * process.
740 */
741 #define VM_MIN_GUARD PAGE_SIZE
742 if (user_va0_disable && (va_min < VM_MIN_GUARD))
743 return VM_MIN_GUARD;
744 return va_min;
745 }
746
747 static int
748 execve_loadvm(struct lwp *l, bool has_path, const char *path, int fd,
749 char * const *args, char * const *envs,
750 execve_fetch_element_t fetch_element,
751 struct execve_data * restrict data)
752 {
753 struct exec_package * const epp = &data->ed_pack;
754 int error;
755 struct proc *p;
756 char *dp;
757 u_int modgen;
758
759 KASSERT(data != NULL);
760
761 p = l->l_proc;
762 modgen = 0;
763
764 SDT_PROBE(proc, kernel, , exec, path, 0, 0, 0, 0);
765
766 /*
767 * Check if we have exceeded our number of processes limit.
768 * This is so that we handle the case where a root daemon
769 * forked, ran setuid to become the desired user and is trying
770 * to exec. The obvious place to do the reference counting check
771 * is setuid(), but we don't do the reference counting check there
772 * like other OS's do because then all the programs that use setuid()
773 * must be modified to check the return code of setuid() and exit().
774 * It is dangerous to make setuid() fail, because it fails open and
775 * the program will continue to run as root. If we make it succeed
776 * and return an error code, again we are not enforcing the limit.
777 * The best place to enforce the limit is here, when the process tries
778 * to execute a new image, because eventually the process will need
779 * to call exec in order to do something useful.
780 */
781 retry:
782 if (p->p_flag & PK_SUGID) {
783 if (kauth_authorize_process(l->l_cred, KAUTH_PROCESS_RLIMIT,
784 p, KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS),
785 &p->p_rlimit[RLIMIT_NPROC],
786 KAUTH_ARG(RLIMIT_NPROC)) != 0 &&
787 chgproccnt(kauth_cred_getuid(l->l_cred), 0) >
788 p->p_rlimit[RLIMIT_NPROC].rlim_cur)
789 return SET_ERROR(EAGAIN);
790 }
791
792 /*
793 * Drain existing references and forbid new ones. The process
794 * should be left alone until we're done here. This is necessary
795 * to avoid race conditions - e.g. in ptrace() - that might allow
796 * a local user to illicitly obtain elevated privileges.
797 */
798 rw_enter(&p->p_reflock, RW_WRITER);
799
800 if (has_path) {
801 size_t offs;
802 /*
803 * Init the namei data to point the file user's program name.
804 * This is done here rather than in check_exec(), so that it's
805 * possible to override this settings if any of makecmd/probe
806 * functions call check_exec() recursively - for example,
807 * see exec_script_makecmds().
808 */
809 if ((error = exec_makepathbuf(l, path, UIO_USERSPACE,
810 &data->ed_pathbuf, &offs)) != 0)
811 goto clrflg;
812 data->ed_pathstring = pathbuf_stringcopy_get(data->ed_pathbuf);
813 epp->ep_kname = data->ed_pathstring + offs;
814 data->ed_resolvedname = PNBUF_GET();
815 epp->ep_resolvedname = data->ed_resolvedname;
816 epp->ep_xfd = -1;
817 } else {
818 data->ed_pathbuf = pathbuf_assimilate(strcpy(PNBUF_GET(), "/"));
819 data->ed_pathstring = pathbuf_stringcopy_get(data->ed_pathbuf);
820 epp->ep_kname = "*fexecve*";
821 data->ed_resolvedname = NULL;
822 epp->ep_resolvedname = NULL;
823 epp->ep_xfd = fd;
824 }
825
826
827 /*
828 * initialize the fields of the exec package.
829 */
830 epp->ep_hdr = kmem_alloc(exec_maxhdrsz, KM_SLEEP);
831 epp->ep_hdrlen = exec_maxhdrsz;
832 epp->ep_hdrvalid = 0;
833 epp->ep_emul_arg = NULL;
834 epp->ep_emul_arg_free = NULL;
835 memset(&epp->ep_vmcmds, 0, sizeof(epp->ep_vmcmds));
836 epp->ep_vap = &data->ed_attr;
837 epp->ep_flags = (p->p_flag & PK_32) ? EXEC_FROM32 : 0;
838 MD_TOPDOWN_INIT(epp);
839 epp->ep_emul_root = NULL;
840 epp->ep_interp = NULL;
841 epp->ep_esch = NULL;
842 epp->ep_pax_flags = 0;
843 memset(epp->ep_machine_arch, 0, sizeof(epp->ep_machine_arch));
844
845 rw_enter(&exec_lock, RW_READER);
846
847 /* see if we can run it. */
848 if ((error = check_exec(l, epp, data->ed_pathbuf,
849 &data->ed_resolvedname)) != 0) {
850 if (error != ENOENT && error != EACCES && error != ENOEXEC) {
851 DPRINTF(("%s: check exec failed for %s, error %d\n",
852 __func__, epp->ep_kname, error));
853 }
854 goto freehdr;
855 }
856
857 /* allocate an argument buffer */
858 data->ed_argp = pool_get(&exec_pool, PR_WAITOK);
859 KASSERT(data->ed_argp != NULL);
860 dp = data->ed_argp;
861
862 if ((error = copyinargs(data, args, envs, fetch_element, &dp)) != 0) {
863 goto bad;
864 }
865
866 /*
867 * Calculate the new stack size.
868 */
869
870 #ifdef __MACHINE_STACK_GROWS_UP
871 /*
872 * copyargs() fills argc/argv/envp from the lower address even on
873 * __MACHINE_STACK_GROWS_UP machines. Reserve a few words just below the SP
874 * so that _rtld() use it.
875 */
876 #define RTLD_GAP 32
877 #else
878 #define RTLD_GAP 0
879 #endif
880
881 const size_t argenvstrlen = (char *)ALIGN(dp) - data->ed_argp;
882
883 data->ed_argslen = calcargs(data, argenvstrlen);
884
885 const size_t len = calcstack(data, pax_aslr_stack_gap(epp) + RTLD_GAP);
886
887 if (len > epp->ep_ssize) {
888 /* in effect, compare to initial limit */
889 DPRINTF(("%s: stack limit exceeded %zu\n", __func__, len));
890 error = SET_ERROR(ENOMEM);
891 goto bad;
892 }
893 /* adjust "active stack depth" for process VSZ */
894 epp->ep_ssize = len;
895
896 return 0;
897
898 bad:
899 /* free the vmspace-creation commands, and release their references */
900 kill_vmcmds(&epp->ep_vmcmds);
901 /* kill any opened file descriptor, if necessary */
902 if (epp->ep_flags & EXEC_HASFD) {
903 epp->ep_flags &= ~EXEC_HASFD;
904 fd_close(epp->ep_fd);
905 }
906 /* close and put the exec'd file */
907 vn_lock(epp->ep_vp, LK_EXCLUSIVE | LK_RETRY);
908 VOP_CLOSE(epp->ep_vp, FREAD, l->l_cred);
909 vput(epp->ep_vp);
910 pool_put(&exec_pool, data->ed_argp);
911
912 freehdr:
913 kmem_free(epp->ep_hdr, epp->ep_hdrlen);
914 if (epp->ep_emul_root != NULL)
915 vrele(epp->ep_emul_root);
916 if (epp->ep_interp != NULL)
917 vrele(epp->ep_interp);
918
919 rw_exit(&exec_lock);
920
921 exec_path_free(data);
922
923 clrflg:
924 rw_exit(&p->p_reflock);
925
926 if (modgen != module_gen && error == ENOEXEC) {
927 modgen = module_gen;
928 exec_autoload();
929 goto retry;
930 }
931
932 SDT_PROBE(proc, kernel, , exec__failure, error, 0, 0, 0, 0);
933 return error;
934 }
935
936 static int
937 execve_dovmcmds(struct lwp *l, struct execve_data * restrict data)
938 {
939 struct exec_package * const epp = &data->ed_pack;
940 struct proc *p = l->l_proc;
941 struct exec_vmcmd *base_vcp;
942 int error = 0;
943 size_t i;
944
945 /* record proc's vnode, for use by procfs and others */
946 if (p->p_textvp)
947 vrele(p->p_textvp);
948 vref(epp->ep_vp);
949 p->p_textvp = epp->ep_vp;
950
951 /* create the new process's VM space by running the vmcmds */
952 KASSERTMSG(epp->ep_vmcmds.evs_used != 0, "%s: no vmcmds", __func__);
953
954 #ifdef TRACE_EXEC
955 DUMPVMCMDS(epp, 0, 0);
956 #endif
957
958 base_vcp = NULL;
959
960 for (i = 0; i < epp->ep_vmcmds.evs_used && !error; i++) {
961 struct exec_vmcmd *vcp;
962
963 vcp = &epp->ep_vmcmds.evs_cmds[i];
964 if (vcp->ev_flags & VMCMD_RELATIVE) {
965 KASSERTMSG(base_vcp != NULL,
966 "%s: relative vmcmd with no base", __func__);
967 KASSERTMSG((vcp->ev_flags & VMCMD_BASE) == 0,
968 "%s: illegal base & relative vmcmd", __func__);
969 vcp->ev_addr += base_vcp->ev_addr;
970 }
971 error = (*vcp->ev_proc)(l, vcp);
972 if (error)
973 DUMPVMCMDS(epp, i, error);
974 if (vcp->ev_flags & VMCMD_BASE)
975 base_vcp = vcp;
976 }
977
978 /* free the vmspace-creation commands, and release their references */
979 kill_vmcmds(&epp->ep_vmcmds);
980
981 vn_lock(epp->ep_vp, LK_EXCLUSIVE | LK_RETRY);
982 VOP_CLOSE(epp->ep_vp, FREAD, l->l_cred);
983 vput(epp->ep_vp);
984
985 /* if an error happened, deallocate and punt */
986 if (error != 0) {
987 DPRINTF(("%s: vmcmd %zu failed: %d\n", __func__, i - 1, error));
988 }
989 return error;
990 }
991
992 static void
993 execve_free_data(struct execve_data *data)
994 {
995 struct exec_package * const epp = &data->ed_pack;
996
997 /* free the vmspace-creation commands, and release their references */
998 kill_vmcmds(&epp->ep_vmcmds);
999 /* kill any opened file descriptor, if necessary */
1000 if (epp->ep_flags & EXEC_HASFD) {
1001 epp->ep_flags &= ~EXEC_HASFD;
1002 fd_close(epp->ep_fd);
1003 }
1004
1005 /* close and put the exec'd file */
1006 vn_lock(epp->ep_vp, LK_EXCLUSIVE | LK_RETRY);
1007 VOP_CLOSE(epp->ep_vp, FREAD, curlwp->l_cred);
1008 vput(epp->ep_vp);
1009 pool_put(&exec_pool, data->ed_argp);
1010
1011 kmem_free(epp->ep_hdr, epp->ep_hdrlen);
1012 if (epp->ep_emul_root != NULL)
1013 vrele(epp->ep_emul_root);
1014 if (epp->ep_interp != NULL)
1015 vrele(epp->ep_interp);
1016
1017 exec_path_free(data);
1018 }
1019
1020 static void
1021 pathexec(struct proc *p, const char *resolvedname)
1022 {
1023 /* set command name & other accounting info */
1024 const char *cmdname;
1025
1026 if (resolvedname == NULL) {
1027 cmdname = "*fexecve*";
1028 resolvedname = "/";
1029 } else {
1030 cmdname = strrchr(resolvedname, '/') + 1;
1031 }
1032 KASSERTMSG(resolvedname[0] == '/', "bad resolvedname `%s'",
1033 resolvedname);
1034
1035 strlcpy(p->p_comm, cmdname, sizeof(p->p_comm));
1036
1037 kmem_strfree(p->p_path);
1038 p->p_path = kmem_strdupsize(resolvedname, NULL, KM_SLEEP);
1039 }
1040
1041 /* XXX elsewhere */
1042 static int
1043 credexec(struct lwp *l, struct execve_data *data)
1044 {
1045 struct proc *p = l->l_proc;
1046 struct vattr *attr = &data->ed_attr;
1047 int error;
1048
1049 /*
1050 * Deal with set[ug]id. MNT_NOSUID has already been used to disable
1051 * s[ug]id. It's OK to check for PSL_TRACED here as we have blocked
1052 * out additional references on the process for the moment.
1053 */
1054 if ((p->p_slflag & PSL_TRACED) == 0 &&
1055
1056 (((attr->va_mode & S_ISUID) != 0 &&
1057 kauth_cred_geteuid(l->l_cred) != attr->va_uid) ||
1058
1059 ((attr->va_mode & S_ISGID) != 0 &&
1060 kauth_cred_getegid(l->l_cred) != attr->va_gid))) {
1061 /*
1062 * Mark the process as SUGID before we do
1063 * anything that might block.
1064 */
1065 proc_crmod_enter();
1066 proc_crmod_leave(NULL, NULL, true);
1067 if (data->ed_argc == 0) {
1068 DPRINTF((
1069 "%s: not executing set[ug]id binary with no args\n",
1070 __func__));
1071 return SET_ERROR(EINVAL);
1072 }
1073
1074 /* Make sure file descriptors 0..2 are in use. */
1075 if ((error = fd_checkstd()) != 0) {
1076 DPRINTF(("%s: fdcheckstd failed %d\n",
1077 __func__, error));
1078 return error;
1079 }
1080
1081 /*
1082 * Copy the credential so other references don't see our
1083 * changes.
1084 */
1085 l->l_cred = kauth_cred_copy(l->l_cred);
1086 #ifdef KTRACE
1087 /*
1088 * If the persistent trace flag isn't set, turn off.
1089 */
1090 if (p->p_tracep) {
1091 mutex_enter(&ktrace_lock);
1092 if (!(p->p_traceflag & KTRFAC_PERSISTENT))
1093 ktrderef(p);
1094 mutex_exit(&ktrace_lock);
1095 }
1096 #endif
1097 if (attr->va_mode & S_ISUID)
1098 kauth_cred_seteuid(l->l_cred, attr->va_uid);
1099 if (attr->va_mode & S_ISGID)
1100 kauth_cred_setegid(l->l_cred, attr->va_gid);
1101 } else {
1102 if (kauth_cred_geteuid(l->l_cred) ==
1103 kauth_cred_getuid(l->l_cred) &&
1104 kauth_cred_getegid(l->l_cred) ==
1105 kauth_cred_getgid(l->l_cred))
1106 p->p_flag &= ~PK_SUGID;
1107 }
1108
1109 /*
1110 * Copy the credential so other references don't see our changes.
1111 * Test to see if this is necessary first, since in the common case
1112 * we won't need a private reference.
1113 */
1114 if (kauth_cred_geteuid(l->l_cred) != kauth_cred_getsvuid(l->l_cred) ||
1115 kauth_cred_getegid(l->l_cred) != kauth_cred_getsvgid(l->l_cred)) {
1116 l->l_cred = kauth_cred_copy(l->l_cred);
1117 kauth_cred_setsvuid(l->l_cred, kauth_cred_geteuid(l->l_cred));
1118 kauth_cred_setsvgid(l->l_cred, kauth_cred_getegid(l->l_cred));
1119 }
1120
1121 /* Update the master credentials. */
1122 if (l->l_cred != p->p_cred) {
1123 kauth_cred_t ocred;
1124 mutex_enter(p->p_lock);
1125 ocred = p->p_cred;
1126 p->p_cred = kauth_cred_hold(l->l_cred);
1127 mutex_exit(p->p_lock);
1128 kauth_cred_free(ocred);
1129 }
1130
1131 return 0;
1132 }
1133
1134 static void
1135 emulexec(struct lwp *l, struct exec_package *epp)
1136 {
1137 struct proc *p = l->l_proc;
1138
1139 /* The emulation root will usually have been found when we looked
1140 * for the elf interpreter (or similar), if not look now. */
1141 if (epp->ep_esch->es_emul->e_path != NULL &&
1142 epp->ep_emul_root == NULL)
1143 emul_find_root(l, epp);
1144
1145 /* Any old emulation root got removed by fdcloseexec */
1146 rw_enter(&p->p_cwdi->cwdi_lock, RW_WRITER);
1147 p->p_cwdi->cwdi_edir = epp->ep_emul_root;
1148 rw_exit(&p->p_cwdi->cwdi_lock);
1149 epp->ep_emul_root = NULL;
1150 if (epp->ep_interp != NULL)
1151 vrele(epp->ep_interp);
1152
1153 /*
1154 * Call emulation specific exec hook. This can setup per-process
1155 * p->p_emuldata or do any other per-process stuff an emulation needs.
1156 *
1157 * If we are executing process of different emulation than the
1158 * original forked process, call e_proc_exit() of the old emulation
1159 * first, then e_proc_exec() of new emulation. If the emulation is
1160 * same, the exec hook code should deallocate any old emulation
1161 * resources held previously by this process.
1162 */
1163 if (p->p_emul && p->p_emul->e_proc_exit
1164 && p->p_emul != epp->ep_esch->es_emul)
1165 (*p->p_emul->e_proc_exit)(p);
1166
1167 /*
1168 * Call exec hook. Emulation code may NOT store reference to anything
1169 * from &pack.
1170 */
1171 if (epp->ep_esch->es_emul->e_proc_exec)
1172 (*epp->ep_esch->es_emul->e_proc_exec)(p, epp);
1173
1174 /* update p_emul, the old value is no longer needed */
1175 p->p_emul = epp->ep_esch->es_emul;
1176
1177 /* ...and the same for p_execsw */
1178 p->p_execsw = epp->ep_esch;
1179
1180 #ifdef __HAVE_SYSCALL_INTERN
1181 (*p->p_emul->e_syscall_intern)(p);
1182 #endif
1183 ktremul();
1184 }
1185
1186 static int
1187 execve_runproc(struct lwp *l, struct execve_data * restrict data,
1188 bool no_local_exec_lock, bool is_spawn)
1189 {
1190 struct exec_package * const epp = &data->ed_pack;
1191 int error = 0;
1192 struct proc *p;
1193 struct vmspace *vm;
1194
1195 /*
1196 * In case of a posix_spawn operation, the child doing the exec
1197 * might not hold the reader lock on exec_lock, but the parent
1198 * will do this instead.
1199 */
1200 KASSERT(no_local_exec_lock || rw_lock_held(&exec_lock));
1201 KASSERT(!no_local_exec_lock || is_spawn);
1202 KASSERT(data != NULL);
1203
1204 p = l->l_proc;
1205
1206 /* Get rid of other LWPs. */
1207 if (p->p_nlwps > 1) {
1208 mutex_enter(p->p_lock);
1209 exit_lwps(l);
1210 mutex_exit(p->p_lock);
1211 }
1212 KDASSERT(p->p_nlwps == 1);
1213
1214 /*
1215 * All of the other LWPs got rid of their robust futexes
1216 * when they exited above, but we might still have some
1217 * to dispose of. Do that now.
1218 */
1219 if (__predict_false(l->l_robust_head != 0)) {
1220 futex_release_all_lwp(l);
1221 /*
1222 * Since this LWP will live on with a different
1223 * program image, we need to clear the robust
1224 * futex list pointer here.
1225 */
1226 l->l_robust_head = 0;
1227 }
1228
1229 /* Destroy any lwpctl info. */
1230 if (p->p_lwpctl != NULL)
1231 lwp_ctl_exit();
1232
1233 /* Remove POSIX timers */
1234 ptimers_free(p, TIMERS_POSIX);
1235
1236 /* Set the PaX flags. */
1237 pax_set_flags(epp, p);
1238
1239 /*
1240 * Do whatever is necessary to prepare the address space
1241 * for remapping. Note that this might replace the current
1242 * vmspace with another!
1243 *
1244 * vfork(): do not touch any user space data in the new child
1245 * until we have awoken the parent below, or it will defeat
1246 * lazy pmap switching (on x86).
1247 */
1248 if (is_spawn)
1249 uvmspace_spawn(l, epp->ep_vm_minaddr,
1250 epp->ep_vm_maxaddr,
1251 epp->ep_flags & EXEC_TOPDOWN_VM);
1252 else
1253 uvmspace_exec(l, epp->ep_vm_minaddr,
1254 epp->ep_vm_maxaddr,
1255 epp->ep_flags & EXEC_TOPDOWN_VM);
1256 vm = p->p_vmspace;
1257
1258 vm->vm_taddr = (void *)epp->ep_taddr;
1259 vm->vm_tsize = btoc(epp->ep_tsize);
1260 vm->vm_daddr = (void*)epp->ep_daddr;
1261 vm->vm_dsize = btoc(epp->ep_dsize);
1262 vm->vm_ssize = btoc(epp->ep_ssize);
1263 vm->vm_issize = 0;
1264 vm->vm_maxsaddr = (void *)epp->ep_maxsaddr;
1265 vm->vm_minsaddr = (void *)epp->ep_minsaddr;
1266
1267 pax_aslr_init_vm(l, vm, epp);
1268
1269 cwdexec(p);
1270 fd_closeexec(); /* handle close on exec */
1271
1272 if (__predict_false(ktrace_on))
1273 fd_ktrexecfd();
1274
1275 execsigs(p); /* reset caught signals */
1276
1277 mutex_enter(p->p_lock);
1278 l->l_ctxlink = NULL; /* reset ucontext link */
1279 p->p_acflag &= ~AFORK;
1280 p->p_flag |= PK_EXEC;
1281 mutex_exit(p->p_lock);
1282
1283 error = credexec(l, data);
1284 if (error)
1285 goto exec_abort;
1286
1287 #if defined(__HAVE_RAS)
1288 /*
1289 * Remove all RASs from the address space.
1290 */
1291 ras_purgeall();
1292 #endif
1293
1294 /*
1295 * Stop profiling.
1296 */
1297 if ((p->p_stflag & PST_PROFIL) != 0) {
1298 mutex_spin_enter(&p->p_stmutex);
1299 stopprofclock(p);
1300 mutex_spin_exit(&p->p_stmutex);
1301 }
1302
1303 /*
1304 * It's OK to test PL_PPWAIT unlocked here, as other LWPs have
1305 * exited and exec()/exit() are the only places it will be cleared.
1306 *
1307 * Once the parent has been awoken, curlwp may teleport to a new CPU
1308 * in sched_vforkexec(), and it's then OK to start messing with user
1309 * data. See comment above.
1310 */
1311 if ((p->p_lflag & PL_PPWAIT) != 0) {
1312 bool samecpu;
1313 lwp_t *lp;
1314
1315 mutex_enter(&proc_lock);
1316 lp = p->p_vforklwp;
1317 p->p_vforklwp = NULL;
1318 l->l_lwpctl = NULL; /* was on loan from blocked parent */
1319
1320 /* Clear flags after cv_broadcast() (scheduler needs them). */
1321 p->p_lflag &= ~PL_PPWAIT;
1322 lp->l_vforkwaiting = false;
1323
1324 /* If parent is still on same CPU, teleport curlwp elsewhere. */
1325 samecpu = (lp->l_cpu == curlwp->l_cpu);
1326 cv_broadcast(&lp->l_waitcv);
1327 mutex_exit(&proc_lock);
1328
1329 /* Give the parent its CPU back - find a new home. */
1330 KASSERT(!is_spawn);
1331 sched_vforkexec(l, samecpu);
1332 }
1333
1334 /* Now map address space. */
1335 error = execve_dovmcmds(l, data);
1336 if (error != 0)
1337 goto exec_abort;
1338
1339 pathexec(p, epp->ep_resolvedname);
1340
1341 char * const newstack = STACK_GROW(vm->vm_minsaddr, epp->ep_ssize);
1342
1343 error = copyoutargs(data, l, newstack);
1344 if (error != 0)
1345 goto exec_abort;
1346
1347 doexechooks(p);
1348
1349 /*
1350 * Set initial SP at the top of the stack.
1351 *
1352 * Note that on machines where stack grows up (e.g. hppa), SP points to
1353 * the end of arg/env strings. Userland guesses the address of argc
1354 * via ps_strings::ps_argvstr.
1355 */
1356
1357 /* Setup new registers and do misc. setup. */
1358 (*epp->ep_esch->es_emul->e_setregs)(l, epp, (vaddr_t)newstack);
1359 if (epp->ep_esch->es_setregs)
1360 (*epp->ep_esch->es_setregs)(l, epp, (vaddr_t)newstack);
1361
1362 /* Provide a consistent LWP private setting */
1363 (void)lwp_setprivate(l, NULL);
1364
1365 /* Discard all PCU state; need to start fresh */
1366 pcu_discard_all(l);
1367
1368 /* map the process's signal trampoline code */
1369 if ((error = exec_sigcode_map(p, epp->ep_esch->es_emul)) != 0) {
1370 DPRINTF(("%s: map sigcode failed %d\n", __func__, error));
1371 goto exec_abort;
1372 }
1373
1374 pool_put(&exec_pool, data->ed_argp);
1375
1376 /*
1377 * Notify anyone who might care that we've exec'd.
1378 *
1379 * This is slightly racy; someone could sneak in and
1380 * attach a knote after we've decided not to notify,
1381 * or vice-versa, but that's not particularly bothersome.
1382 * knote_proc_exec() will acquire p->p_lock as needed.
1383 */
1384 if (!SLIST_EMPTY(&p->p_klist)) {
1385 knote_proc_exec(p);
1386 }
1387
1388 kmem_free(epp->ep_hdr, epp->ep_hdrlen);
1389
1390 SDT_PROBE(proc, kernel, , exec__success, epp->ep_kname, 0, 0, 0, 0);
1391
1392 emulexec(l, epp);
1393
1394 /* Allow new references from the debugger/procfs. */
1395 rw_exit(&p->p_reflock);
1396 if (!no_local_exec_lock)
1397 rw_exit(&exec_lock);
1398
1399 mutex_enter(&proc_lock);
1400
1401 /* posix_spawn(3) reports a single event with implied exec(3) */
1402 if ((p->p_slflag & PSL_TRACED) && !is_spawn) {
1403 mutex_enter(p->p_lock);
1404 eventswitch(TRAP_EXEC, 0, 0);
1405 mutex_enter(&proc_lock);
1406 }
1407
1408 if (p->p_sflag & PS_STOPEXEC) {
1409 ksiginfoq_t kq;
1410
1411 KASSERT(l->l_blcnt == 0);
1412 p->p_pptr->p_nstopchild++;
1413 p->p_waited = 0;
1414 mutex_enter(p->p_lock);
1415 ksiginfo_queue_init(&kq);
1416 sigclearall(p, &contsigmask, &kq);
1417 lwp_lock(l);
1418 l->l_stat = LSSTOP;
1419 p->p_stat = SSTOP;
1420 p->p_nrlwps--;
1421 lwp_unlock(l);
1422 mutex_exit(p->p_lock);
1423 mutex_exit(&proc_lock);
1424 lwp_lock(l);
1425 spc_lock(l->l_cpu);
1426 mi_switch(l);
1427 ksiginfo_queue_drain(&kq);
1428 } else {
1429 mutex_exit(&proc_lock);
1430 }
1431
1432 exec_path_free(data);
1433 #ifdef TRACE_EXEC
1434 DPRINTF(("%s finished\n", __func__));
1435 #endif
1436 return EJUSTRETURN;
1437
1438 exec_abort:
1439 SDT_PROBE(proc, kernel, , exec__failure, error, 0, 0, 0, 0);
1440 rw_exit(&p->p_reflock);
1441 if (!no_local_exec_lock)
1442 rw_exit(&exec_lock);
1443
1444 exec_path_free(data);
1445
1446 /*
1447 * the old process doesn't exist anymore. exit gracefully.
1448 * get rid of the (new) address space we have created, if any, get rid
1449 * of our namei data and vnode, and exit noting failure
1450 */
1451 if (vm != NULL) {
1452 uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS,
1453 VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS);
1454 }
1455
1456 exec_free_emul_arg(epp);
1457 pool_put(&exec_pool, data->ed_argp);
1458 kmem_free(epp->ep_hdr, epp->ep_hdrlen);
1459 if (epp->ep_emul_root != NULL)
1460 vrele(epp->ep_emul_root);
1461 if (epp->ep_interp != NULL)
1462 vrele(epp->ep_interp);
1463
1464 /* Acquire the sched-state mutex (exit1() will release it). */
1465 if (!is_spawn) {
1466 mutex_enter(p->p_lock);
1467 exit1(l, error, SIGABRT);
1468 }
1469
1470 return error;
1471 }
1472
1473 int
1474 execve1(struct lwp *l, bool has_path, const char *path, int fd,
1475 char * const *args, char * const *envs,
1476 execve_fetch_element_t fetch_element)
1477 {
1478 struct execve_data data;
1479 int error;
1480
1481 error = execve_loadvm(l, has_path, path, fd, args, envs, fetch_element,
1482 &data);
1483 if (error)
1484 return error;
1485 error = execve_runproc(l, &data, false, false);
1486 return error;
1487 }
1488
1489 static size_t
1490 fromptrsz(const struct exec_package *epp)
1491 {
1492 return (epp->ep_flags & EXEC_FROM32) ? sizeof(int) : sizeof(char *);
1493 }
1494
1495 static size_t
1496 ptrsz(const struct exec_package *epp)
1497 {
1498 return (epp->ep_flags & EXEC_32) ? sizeof(int) : sizeof(char *);
1499 }
1500
1501 static size_t
1502 calcargs(struct execve_data * restrict data, const size_t argenvstrlen)
1503 {
1504 struct exec_package * const epp = &data->ed_pack;
1505
1506 const size_t nargenvptrs =
1507 1 + /* long argc */
1508 data->ed_argc + /* char *argv[] */
1509 1 + /* \0 */
1510 data->ed_envc + /* char *env[] */
1511 1; /* \0 */
1512
1513 return (nargenvptrs * ptrsz(epp)) /* pointers */
1514 + argenvstrlen /* strings */
1515 + epp->ep_esch->es_arglen; /* auxinfo */
1516 }
1517
1518 static size_t
1519 calcstack(struct execve_data * restrict data, const size_t gaplen)
1520 {
1521 struct exec_package * const epp = &data->ed_pack;
1522
1523 data->ed_szsigcode = epp->ep_esch->es_emul->e_esigcode -
1524 epp->ep_esch->es_emul->e_sigcode;
1525
1526 data->ed_ps_strings_sz = (epp->ep_flags & EXEC_32) ?
1527 sizeof(struct ps_strings32) : sizeof(struct ps_strings);
1528
1529 const size_t sigcode_psstr_sz =
1530 data->ed_szsigcode + /* sigcode */
1531 data->ed_ps_strings_sz + /* ps_strings */
1532 STACK_PTHREADSPACE; /* pthread space */
1533
1534 const size_t stacklen =
1535 data->ed_argslen +
1536 gaplen +
1537 sigcode_psstr_sz;
1538
1539 /* make the stack "safely" aligned */
1540 return STACK_LEN_ALIGN(stacklen, STACK_ALIGNBYTES);
1541 }
1542
1543 static int
1544 copyoutargs(struct execve_data * restrict data, struct lwp *l,
1545 char * const newstack)
1546 {
1547 struct exec_package * const epp = &data->ed_pack;
1548 struct proc *p = l->l_proc;
1549 int error;
1550
1551 memset(&data->ed_arginfo, 0, sizeof(data->ed_arginfo));
1552
1553 /* remember information about the process */
1554 data->ed_arginfo.ps_nargvstr = data->ed_argc;
1555 data->ed_arginfo.ps_nenvstr = data->ed_envc;
1556
1557 /*
1558 * Allocate the stack address passed to the newly execve()'ed process.
1559 *
1560 * The new stack address will be set to the SP (stack pointer) register
1561 * in setregs().
1562 */
1563
1564 char *newargs = STACK_ALLOC(
1565 STACK_SHRINK(newstack, data->ed_argslen), data->ed_argslen);
1566
1567 error = (*epp->ep_esch->es_copyargs)(l, epp,
1568 &data->ed_arginfo, &newargs, data->ed_argp);
1569
1570 if (error) {
1571 DPRINTF(("%s: copyargs failed %d\n", __func__, error));
1572 return error;
1573 }
1574
1575 error = copyoutpsstrs(data, p);
1576 if (error != 0)
1577 return error;
1578
1579 return 0;
1580 }
1581
1582 static int
1583 copyoutpsstrs(struct execve_data * restrict data, struct proc *p)
1584 {
1585 struct exec_package * const epp = &data->ed_pack;
1586 struct ps_strings32 arginfo32;
1587 void *aip;
1588 int error;
1589
1590 /* fill process ps_strings info */
1591 p->p_psstrp = (vaddr_t)STACK_ALLOC(STACK_GROW(epp->ep_minsaddr,
1592 STACK_PTHREADSPACE), data->ed_ps_strings_sz);
1593
1594 if (epp->ep_flags & EXEC_32) {
1595 aip = &arginfo32;
1596 arginfo32.ps_argvstr = (vaddr_t)data->ed_arginfo.ps_argvstr;
1597 arginfo32.ps_nargvstr = data->ed_arginfo.ps_nargvstr;
1598 arginfo32.ps_envstr = (vaddr_t)data->ed_arginfo.ps_envstr;
1599 arginfo32.ps_nenvstr = data->ed_arginfo.ps_nenvstr;
1600 } else
1601 aip = &data->ed_arginfo;
1602
1603 /* copy out the process's ps_strings structure */
1604 if ((error = copyout(aip, (void *)p->p_psstrp, data->ed_ps_strings_sz))
1605 != 0) {
1606 DPRINTF(("%s: ps_strings copyout %p->%p size %zu failed\n",
1607 __func__, aip, (void *)p->p_psstrp, data->ed_ps_strings_sz));
1608 return error;
1609 }
1610
1611 return 0;
1612 }
1613
1614 static int
1615 copyinargs(struct execve_data * restrict data, char * const *args,
1616 char * const *envs, execve_fetch_element_t fetch_element, char **dpp)
1617 {
1618 struct exec_package * const epp = &data->ed_pack;
1619 char *dp;
1620 size_t i;
1621 int error;
1622
1623 dp = *dpp;
1624
1625 data->ed_argc = 0;
1626
1627 /* copy the fake args list, if there's one, freeing it as we go */
1628 if (epp->ep_flags & EXEC_HASARGL) {
1629 struct exec_fakearg *fa = epp->ep_fa;
1630
1631 while (fa->fa_arg != NULL) {
1632 const size_t maxlen = ARG_MAX - (dp - data->ed_argp);
1633 size_t len;
1634
1635 len = strlcpy(dp, fa->fa_arg, maxlen);
1636 /* Count NUL into len. */
1637 if (len < maxlen)
1638 len++;
1639 else {
1640 while (fa->fa_arg != NULL) {
1641 kmem_free(fa->fa_arg, fa->fa_len);
1642 fa++;
1643 }
1644 kmem_free(epp->ep_fa, epp->ep_fa_len);
1645 epp->ep_flags &= ~EXEC_HASARGL;
1646 return SET_ERROR(E2BIG);
1647 }
1648 ktrexecarg(fa->fa_arg, len - 1);
1649 dp += len;
1650
1651 kmem_free(fa->fa_arg, fa->fa_len);
1652 fa++;
1653 data->ed_argc++;
1654 }
1655 kmem_free(epp->ep_fa, epp->ep_fa_len);
1656 epp->ep_flags &= ~EXEC_HASARGL;
1657 }
1658
1659 /*
1660 * Read and count argument strings from user.
1661 */
1662
1663 if (args == NULL) {
1664 DPRINTF(("%s: null args\n", __func__));
1665 return SET_ERROR(EINVAL);
1666 }
1667 if (epp->ep_flags & EXEC_SKIPARG)
1668 args = (const void *)((const char *)args + fromptrsz(epp));
1669 i = 0;
1670 error = copyinargstrs(data, args, fetch_element, &dp, &i, ktr_execarg);
1671 if (error != 0) {
1672 DPRINTF(("%s: copyin arg %d\n", __func__, error));
1673 return error;
1674 }
1675 data->ed_argc += i;
1676
1677 /*
1678 * Read and count environment strings from user.
1679 */
1680
1681 data->ed_envc = 0;
1682 /* environment need not be there */
1683 if (envs == NULL)
1684 goto done;
1685 i = 0;
1686 error = copyinargstrs(data, envs, fetch_element, &dp, &i, ktr_execenv);
1687 if (error != 0) {
1688 DPRINTF(("%s: copyin env %d\n", __func__, error));
1689 return error;
1690 }
1691 data->ed_envc += i;
1692
1693 done:
1694 *dpp = dp;
1695
1696 return 0;
1697 }
1698
1699 static int
1700 copyinargstrs(struct execve_data * restrict data, char * const *strs,
1701 execve_fetch_element_t fetch_element, char **dpp, size_t *ip,
1702 void (*ktr)(const void *, size_t))
1703 {
1704 char *dp, *sp;
1705 size_t i;
1706 int error;
1707
1708 dp = *dpp;
1709
1710 i = 0;
1711 while (1) {
1712 const size_t maxlen = ARG_MAX - (dp - data->ed_argp);
1713 size_t len;
1714
1715 if ((error = (*fetch_element)(strs, i, &sp)) != 0) {
1716 return error;
1717 }
1718 if (!sp)
1719 break;
1720 if ((error = copyinstr(sp, dp, maxlen, &len)) != 0) {
1721 if (error == ENAMETOOLONG)
1722 error = SET_ERROR(E2BIG);
1723 return error;
1724 }
1725 if (__predict_false(ktrace_on))
1726 (*ktr)(dp, len - 1);
1727 dp += len;
1728 i++;
1729 }
1730
1731 *dpp = dp;
1732 *ip = i;
1733
1734 return 0;
1735 }
1736
1737 /*
1738 * Copy argv and env strings from kernel buffer (argp) to the new stack.
1739 * Those strings are located just after auxinfo.
1740 */
1741 int
1742 copyargs(struct lwp *l, struct exec_package *pack, struct ps_strings *arginfo,
1743 char **stackp, void *argp)
1744 {
1745 char **cpp, *dp, *sp;
1746 size_t len;
1747 void *nullp;
1748 long argc, envc;
1749 int error;
1750
1751 cpp = (char **)*stackp;
1752 nullp = NULL;
1753 argc = arginfo->ps_nargvstr;
1754 envc = arginfo->ps_nenvstr;
1755
1756 /* argc on stack is long */
1757 CTASSERT(sizeof(*cpp) == sizeof(argc));
1758
1759 dp = (char *)(cpp +
1760 1 + /* long argc */
1761 argc + /* char *argv[] */
1762 1 + /* \0 */
1763 envc + /* char *env[] */
1764 1) + /* \0 */
1765 pack->ep_esch->es_arglen; /* auxinfo */
1766 sp = argp;
1767
1768 if ((error = copyout(&argc, cpp++, sizeof(argc))) != 0) {
1769 COPYPRINTF("", cpp - 1, sizeof(argc));
1770 return error;
1771 }
1772
1773 /* XXX don't copy them out, remap them! */
1774 arginfo->ps_argvstr = cpp; /* remember location of argv for later */
1775
1776 for (; --argc >= 0; sp += len, dp += len) {
1777 if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0) {
1778 COPYPRINTF("", cpp - 1, sizeof(dp));
1779 return error;
1780 }
1781 if ((error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0) {
1782 COPYPRINTF("str", dp, (size_t)ARG_MAX);
1783 return error;
1784 }
1785 }
1786
1787 if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0) {
1788 COPYPRINTF("", cpp - 1, sizeof(nullp));
1789 return error;
1790 }
1791
1792 arginfo->ps_envstr = cpp; /* remember location of envp for later */
1793
1794 for (; --envc >= 0; sp += len, dp += len) {
1795 if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0) {
1796 COPYPRINTF("", cpp - 1, sizeof(dp));
1797 return error;
1798 }
1799 if ((error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0) {
1800 COPYPRINTF("str", dp, (size_t)ARG_MAX);
1801 return error;
1802 }
1803
1804 }
1805
1806 if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0) {
1807 COPYPRINTF("", cpp - 1, sizeof(nullp));
1808 return error;
1809 }
1810
1811 *stackp = (char *)cpp;
1812 return 0;
1813 }
1814
1815
1816 /*
1817 * Add execsw[] entries.
1818 */
1819 int
1820 exec_add(struct execsw *esp, int count)
1821 {
1822 struct exec_entry *it;
1823 int i, error = 0;
1824
1825 if (count == 0) {
1826 return 0;
1827 }
1828
1829 /* Check for duplicates. */
1830 rw_enter(&exec_lock, RW_WRITER);
1831 for (i = 0; i < count; i++) {
1832 LIST_FOREACH(it, &ex_head, ex_list) {
1833 /* assume unique (makecmds, probe_func, emulation) */
1834 if (it->ex_sw->es_makecmds == esp[i].es_makecmds &&
1835 it->ex_sw->u.elf_probe_func ==
1836 esp[i].u.elf_probe_func &&
1837 it->ex_sw->es_emul == esp[i].es_emul) {
1838 rw_exit(&exec_lock);
1839 return SET_ERROR(EEXIST);
1840 }
1841 }
1842 }
1843
1844 /* Allocate new entries. */
1845 for (i = 0; i < count; i++) {
1846 it = kmem_alloc(sizeof(*it), KM_SLEEP);
1847 it->ex_sw = &esp[i];
1848 error = exec_sigcode_alloc(it->ex_sw->es_emul);
1849 if (error != 0) {
1850 kmem_free(it, sizeof(*it));
1851 break;
1852 }
1853 LIST_INSERT_HEAD(&ex_head, it, ex_list);
1854 }
1855 /* If even one fails, remove them all back. */
1856 if (error != 0) {
1857 for (i--; i >= 0; i--) {
1858 it = LIST_FIRST(&ex_head);
1859 LIST_REMOVE(it, ex_list);
1860 exec_sigcode_free(it->ex_sw->es_emul);
1861 kmem_free(it, sizeof(*it));
1862 }
1863 rw_exit(&exec_lock);
1864 return error;
1865 }
1866
1867 /* update execsw[] */
1868 exec_init(0);
1869 rw_exit(&exec_lock);
1870 return 0;
1871 }
1872
1873 /*
1874 * Remove execsw[] entry.
1875 */
1876 int
1877 exec_remove(struct execsw *esp, int count)
1878 {
1879 struct exec_entry *it, *next;
1880 int i;
1881 const struct proclist_desc *pd;
1882 proc_t *p;
1883
1884 if (count == 0) {
1885 return 0;
1886 }
1887
1888 /* Abort if any are busy. */
1889 rw_enter(&exec_lock, RW_WRITER);
1890 for (i = 0; i < count; i++) {
1891 mutex_enter(&proc_lock);
1892 for (pd = proclists; pd->pd_list != NULL; pd++) {
1893 PROCLIST_FOREACH(p, pd->pd_list) {
1894 if (p->p_execsw == &esp[i]) {
1895 mutex_exit(&proc_lock);
1896 rw_exit(&exec_lock);
1897 return SET_ERROR(EBUSY);
1898 }
1899 }
1900 }
1901 mutex_exit(&proc_lock);
1902 }
1903
1904 /* None are busy, so remove them all. */
1905 for (i = 0; i < count; i++) {
1906 for (it = LIST_FIRST(&ex_head); it != NULL; it = next) {
1907 next = LIST_NEXT(it, ex_list);
1908 if (it->ex_sw == &esp[i]) {
1909 LIST_REMOVE(it, ex_list);
1910 exec_sigcode_free(it->ex_sw->es_emul);
1911 kmem_free(it, sizeof(*it));
1912 break;
1913 }
1914 }
1915 }
1916
1917 /* update execsw[] */
1918 exec_init(0);
1919 rw_exit(&exec_lock);
1920 return 0;
1921 }
1922
1923 /*
1924 * Initialize exec structures. If init_boot is true, also does necessary
1925 * one-time initialization (it's called from main() that way).
1926 * Once system is multiuser, this should be called with exec_lock held,
1927 * i.e. via exec_{add|remove}().
1928 */
1929 int
1930 exec_init(int init_boot)
1931 {
1932 const struct execsw **sw;
1933 struct exec_entry *ex;
1934 SLIST_HEAD(,exec_entry) first;
1935 SLIST_HEAD(,exec_entry) any;
1936 SLIST_HEAD(,exec_entry) last;
1937 int i, sz;
1938
1939 if (init_boot) {
1940 /* do one-time initializations */
1941 vaddr_t vmin = 0, vmax;
1942
1943 rw_init(&exec_lock);
1944 exec_map = uvm_km_suballoc(kernel_map, &vmin, &vmax,
1945 maxexec*NCARGS, VM_MAP_PAGEABLE, false, NULL);
1946 pool_init(&exec_pool, NCARGS, 0, 0, PR_NOALIGN|PR_NOTOUCH,
1947 "execargs", &exec_palloc, IPL_NONE);
1948 pool_sethardlimit(&exec_pool, maxexec, "should not happen", 0);
1949 } else {
1950 KASSERT(rw_write_held(&exec_lock));
1951 }
1952
1953 /* Sort each entry onto the appropriate queue. */
1954 SLIST_INIT(&first);
1955 SLIST_INIT(&any);
1956 SLIST_INIT(&last);
1957 sz = 0;
1958 LIST_FOREACH(ex, &ex_head, ex_list) {
1959 switch(ex->ex_sw->es_prio) {
1960 case EXECSW_PRIO_FIRST:
1961 SLIST_INSERT_HEAD(&first, ex, ex_slist);
1962 break;
1963 case EXECSW_PRIO_ANY:
1964 SLIST_INSERT_HEAD(&any, ex, ex_slist);
1965 break;
1966 case EXECSW_PRIO_LAST:
1967 SLIST_INSERT_HEAD(&last, ex, ex_slist);
1968 break;
1969 default:
1970 panic("%s", __func__);
1971 break;
1972 }
1973 sz++;
1974 }
1975
1976 /*
1977 * Create new execsw[]. Ensure we do not try a zero-sized
1978 * allocation.
1979 */
1980 sw = kmem_alloc(sz * sizeof(struct execsw *) + 1, KM_SLEEP);
1981 i = 0;
1982 SLIST_FOREACH(ex, &first, ex_slist) {
1983 sw[i++] = ex->ex_sw;
1984 }
1985 SLIST_FOREACH(ex, &any, ex_slist) {
1986 sw[i++] = ex->ex_sw;
1987 }
1988 SLIST_FOREACH(ex, &last, ex_slist) {
1989 sw[i++] = ex->ex_sw;
1990 }
1991
1992 /* Replace old execsw[] and free used memory. */
1993 if (execsw != NULL) {
1994 kmem_free(__UNCONST(execsw),
1995 nexecs * sizeof(struct execsw *) + 1);
1996 }
1997 execsw = sw;
1998 nexecs = sz;
1999
2000 /* Figure out the maximum size of an exec header. */
2001 exec_maxhdrsz = sizeof(int);
2002 for (i = 0; i < nexecs; i++) {
2003 if (execsw[i]->es_hdrsz > exec_maxhdrsz)
2004 exec_maxhdrsz = execsw[i]->es_hdrsz;
2005 }
2006
2007 return 0;
2008 }
2009
2010 int
2011 exec_sigcode_alloc(const struct emul *e)
2012 {
2013 vaddr_t va;
2014 vsize_t sz;
2015 int error;
2016 struct uvm_object *uobj;
2017
2018 KASSERT(rw_lock_held(&exec_lock));
2019
2020 if (e == NULL || e->e_sigobject == NULL)
2021 return 0;
2022
2023 sz = (vaddr_t)e->e_esigcode - (vaddr_t)e->e_sigcode;
2024 if (sz == 0)
2025 return 0;
2026
2027 /*
2028 * Create a sigobject for this emulation.
2029 *
2030 * sigobject is an anonymous memory object (just like SYSV shared
2031 * memory) that we keep a permanent reference to and that we map
2032 * in all processes that need this sigcode. The creation is simple,
2033 * we create an object, add a permanent reference to it, map it in
2034 * kernel space, copy out the sigcode to it and unmap it.
2035 * We map it with PROT_READ|PROT_EXEC into the process just
2036 * the way sys_mmap() would map it.
2037 */
2038 if (*e->e_sigobject == NULL) {
2039 uobj = uao_create(sz, 0);
2040 (*uobj->pgops->pgo_reference)(uobj);
2041 va = vm_map_min(kernel_map);
2042 if ((error = uvm_map(kernel_map, &va, round_page(sz),
2043 uobj, 0, 0,
2044 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
2045 UVM_INH_SHARE, UVM_ADV_RANDOM, 0)))) {
2046 printf("sigcode kernel mapping failed %d\n", error);
2047 (*uobj->pgops->pgo_detach)(uobj);
2048 return error;
2049 }
2050 memcpy((void *)va, e->e_sigcode, sz);
2051 #ifdef PMAP_NEED_PROCWR
2052 pmap_procwr(&proc0, va, sz);
2053 #endif
2054 uvm_unmap(kernel_map, va, va + round_page(sz));
2055 *e->e_sigobject = uobj;
2056 KASSERT(uobj->uo_refs == 1);
2057 } else {
2058 /* if already created, reference++ */
2059 uobj = *e->e_sigobject;
2060 (*uobj->pgops->pgo_reference)(uobj);
2061 }
2062
2063 return 0;
2064 }
2065
2066 void
2067 exec_sigcode_free(const struct emul *e)
2068 {
2069 struct uvm_object *uobj;
2070
2071 KASSERT(rw_lock_held(&exec_lock));
2072
2073 if (e == NULL || e->e_sigobject == NULL)
2074 return;
2075
2076 uobj = *e->e_sigobject;
2077 if (uobj == NULL)
2078 return;
2079
2080 if (uobj->uo_refs == 1)
2081 *e->e_sigobject = NULL; /* I'm the last person to reference. */
2082 (*uobj->pgops->pgo_detach)(uobj);
2083 }
2084
2085 static int
2086 exec_sigcode_map(struct proc *p, const struct emul *e)
2087 {
2088 vaddr_t va;
2089 vsize_t sz;
2090 int error;
2091 struct uvm_object *uobj;
2092
2093 sz = (vaddr_t)e->e_esigcode - (vaddr_t)e->e_sigcode;
2094 if (e->e_sigobject == NULL || sz == 0)
2095 return 0;
2096
2097 uobj = *e->e_sigobject;
2098 if (uobj == NULL)
2099 return 0;
2100
2101 /* Just a hint to uvm_map where to put it. */
2102 va = e->e_vm_default_addr(p, (vaddr_t)p->p_vmspace->vm_daddr,
2103 round_page(sz), p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
2104
2105 #ifdef __alpha__
2106 /*
2107 * Tru64 puts /sbin/loader at the end of user virtual memory,
2108 * which causes the above calculation to put the sigcode at
2109 * an invalid address. Put it just below the text instead.
2110 */
2111 if (va == (vaddr_t)vm_map_max(&p->p_vmspace->vm_map)) {
2112 va = (vaddr_t)p->p_vmspace->vm_taddr - round_page(sz);
2113 }
2114 #endif
2115
2116 (*uobj->pgops->pgo_reference)(uobj);
2117 error = uvm_map(&p->p_vmspace->vm_map, &va, round_page(sz),
2118 uobj, 0, 0,
2119 UVM_MAPFLAG(UVM_PROT_RX, UVM_PROT_RX, UVM_INH_SHARE,
2120 UVM_ADV_RANDOM, 0));
2121 if (error) {
2122 DPRINTF(("%s, %d: map %p "
2123 "uvm_map %#"PRIxVSIZE"@%#"PRIxVADDR" failed %d\n",
2124 __func__, __LINE__, &p->p_vmspace->vm_map, round_page(sz),
2125 va, error));
2126 (*uobj->pgops->pgo_detach)(uobj);
2127 return error;
2128 }
2129 p->p_sigctx.ps_sigcode = (void *)va;
2130 return 0;
2131 }
2132
2133 /*
2134 * Release a refcount on spawn_exec_data and destroy memory, if this
2135 * was the last one.
2136 */
2137 static void
2138 spawn_exec_data_release(struct spawn_exec_data *data)
2139 {
2140
2141 membar_release();
2142 if (atomic_dec_32_nv(&data->sed_refcnt) != 0)
2143 return;
2144 membar_acquire();
2145
2146 cv_destroy(&data->sed_cv_child_ready);
2147 mutex_destroy(&data->sed_mtx_child);
2148
2149 if (data->sed_actions)
2150 posix_spawn_fa_free(data->sed_actions,
2151 data->sed_actions->len);
2152 if (data->sed_attrs)
2153 kmem_free(data->sed_attrs,
2154 sizeof(*data->sed_attrs));
2155 kmem_free(data, sizeof(*data));
2156 }
2157
2158 static int
2159 handle_posix_spawn_file_actions(struct posix_spawn_file_actions *actions)
2160 {
2161 struct lwp *l = curlwp;
2162 register_t retval;
2163 int error, newfd;
2164
2165 if (actions == NULL)
2166 return 0;
2167
2168 for (size_t i = 0; i < actions->len; i++) {
2169 const struct posix_spawn_file_actions_entry *fae =
2170 &actions->fae[i];
2171 switch (fae->fae_action) {
2172 case FAE_OPEN:
2173 if (fd_getfile(fae->fae_fildes) != NULL) {
2174 error = fd_close(fae->fae_fildes);
2175 if (error)
2176 return error;
2177 }
2178 error = fd_open(fae->fae_path, fae->fae_oflag,
2179 fae->fae_mode, &newfd);
2180 if (error)
2181 return error;
2182 if (newfd != fae->fae_fildes) {
2183 error = dodup(l, newfd,
2184 fae->fae_fildes, 0, &retval);
2185 if (fd_getfile(newfd) != NULL)
2186 fd_close(newfd);
2187 }
2188 break;
2189 case FAE_DUP2:
2190 error = dodup(l, fae->fae_fildes,
2191 fae->fae_newfildes, 0, &retval);
2192 break;
2193 case FAE_CLOSE:
2194 if (fd_getfile(fae->fae_fildes) == NULL) {
2195 return SET_ERROR(EBADF);
2196 }
2197 error = fd_close(fae->fae_fildes);
2198 break;
2199 case FAE_CHDIR:
2200 error = do_sys_chdir(l, fae->fae_chdir_path,
2201 UIO_SYSSPACE, &retval);
2202 break;
2203 case FAE_FCHDIR:
2204 error = do_sys_fchdir(l, fae->fae_fildes, &retval);
2205 break;
2206 }
2207 if (error)
2208 return error;
2209 }
2210 return 0;
2211 }
2212
2213 static int
2214 handle_posix_spawn_attrs(struct posix_spawnattr *attrs, struct proc *parent)
2215 {
2216 struct sigaction sigact;
2217 int error;
2218 struct proc *p = curproc;
2219 struct lwp *l = curlwp;
2220
2221 if (attrs == NULL)
2222 return 0;
2223
2224 memset(&sigact, 0, sizeof(sigact));
2225 sigact._sa_u._sa_handler = SIG_DFL;
2226 sigact.sa_flags = 0;
2227
2228 /*
2229 * set state to SSTOP so that this proc can be found by pid.
2230 * see proc_enterprp, do_sched_setparam below
2231 */
2232 mutex_enter(&proc_lock);
2233 /*
2234 * p_stat should be SACTIVE, so we need to adjust the
2235 * parent's p_nstopchild here. For safety, just make
2236 * we're on the good side of SDEAD before we adjust.
2237 */
2238 int ostat = p->p_stat;
2239 KASSERT(ostat < SSTOP);
2240 p->p_stat = SSTOP;
2241 p->p_waited = 0;
2242 p->p_pptr->p_nstopchild++;
2243 mutex_exit(&proc_lock);
2244
2245 /* Set process group */
2246 if (attrs->sa_flags & POSIX_SPAWN_SETPGROUP) {
2247 pid_t mypid = p->p_pid;
2248 pid_t pgrp = attrs->sa_pgroup;
2249
2250 if (pgrp == 0)
2251 pgrp = mypid;
2252
2253 error = proc_enterpgrp(parent, mypid, pgrp, false);
2254 if (error)
2255 goto out;
2256 }
2257
2258 /* Set scheduler policy */
2259 if (attrs->sa_flags & POSIX_SPAWN_SETSCHEDULER)
2260 error = do_sched_setparam(p->p_pid, 0, attrs->sa_schedpolicy,
2261 &attrs->sa_schedparam);
2262 else if (attrs->sa_flags & POSIX_SPAWN_SETSCHEDPARAM) {
2263 error = do_sched_setparam(parent->p_pid, 0,
2264 SCHED_NONE, &attrs->sa_schedparam);
2265 }
2266 if (error)
2267 goto out;
2268
2269 /* Reset user ID's */
2270 if (attrs->sa_flags & POSIX_SPAWN_RESETIDS) {
2271 error = do_setresgid(l, -1, kauth_cred_getgid(l->l_cred), -1,
2272 ID_E_EQ_R | ID_E_EQ_S);
2273 if (error)
2274 return error;
2275 error = do_setresuid(l, -1, kauth_cred_getuid(l->l_cred), -1,
2276 ID_E_EQ_R | ID_E_EQ_S);
2277 if (error)
2278 goto out;
2279 }
2280
2281 /* Set signal masks/defaults */
2282 if (attrs->sa_flags & POSIX_SPAWN_SETSIGMASK) {
2283 mutex_enter(p->p_lock);
2284 error = sigprocmask1(l, SIG_SETMASK, &attrs->sa_sigmask, NULL);
2285 mutex_exit(p->p_lock);
2286 if (error)
2287 goto out;
2288 }
2289
2290 if (attrs->sa_flags & POSIX_SPAWN_SETSIGDEF) {
2291 /*
2292 * The following sigaction call is using a sigaction
2293 * version 0 trampoline which is in the compatibility
2294 * code only. This is not a problem because for SIG_DFL
2295 * and SIG_IGN, the trampolines are now ignored. If they
2296 * were not, this would be a problem because we are
2297 * holding the exec_lock, and the compat code needs
2298 * to do the same in order to replace the trampoline
2299 * code of the process.
2300 */
2301 for (int i = 1; i <= NSIG; i++) {
2302 if (sigismember(&attrs->sa_sigdefault, i))
2303 sigaction1(l, i, &sigact, NULL, NULL, 0);
2304 }
2305 }
2306 error = 0;
2307 out:
2308 mutex_enter(&proc_lock);
2309 p->p_stat = ostat;
2310 p->p_pptr->p_nstopchild--;
2311 mutex_exit(&proc_lock);
2312 return error;
2313 }
2314
2315 /*
2316 * A child lwp of a posix_spawn operation starts here and ends up in
2317 * cpu_spawn_return, dealing with all filedescriptor and scheduler
2318 * manipulations in between.
2319 * The parent waits for the child, as it is not clear whether the child
2320 * will be able to acquire its own exec_lock. If it can, the parent can
2321 * be released early and continue running in parallel. If not (or if the
2322 * magic debug flag is passed in the scheduler attribute struct), the
2323 * child rides on the parent's exec lock until it is ready to return to
2324 * to userland - and only then releases the parent. This method loses
2325 * concurrency, but improves error reporting.
2326 */
2327 static void
2328 spawn_return(void *arg)
2329 {
2330 struct spawn_exec_data *spawn_data = arg;
2331 struct lwp *l = curlwp;
2332 struct proc *p = l->l_proc;
2333 int error;
2334 bool have_reflock;
2335 bool parent_is_waiting = true;
2336
2337 /*
2338 * Check if we can release parent early.
2339 * We either need to have no sed_attrs, or sed_attrs does not
2340 * have POSIX_SPAWN_RETURNERROR or one of the flags, that require
2341 * safe access to the parent proc (passed in sed_parent).
2342 * We then try to get the exec_lock, and only if that works, we can
2343 * release the parent here already.
2344 */
2345 struct posix_spawnattr *attrs = spawn_data->sed_attrs;
2346 if ((!attrs || (attrs->sa_flags
2347 & (POSIX_SPAWN_RETURNERROR|POSIX_SPAWN_SETPGROUP)) == 0)
2348 && rw_tryenter(&exec_lock, RW_READER)) {
2349 parent_is_waiting = false;
2350 mutex_enter(&spawn_data->sed_mtx_child);
2351 KASSERT(!spawn_data->sed_child_ready);
2352 spawn_data->sed_error = 0;
2353 spawn_data->sed_child_ready = true;
2354 cv_signal(&spawn_data->sed_cv_child_ready);
2355 mutex_exit(&spawn_data->sed_mtx_child);
2356 }
2357
2358 /* don't allow debugger access yet */
2359 rw_enter(&p->p_reflock, RW_WRITER);
2360 have_reflock = true;
2361
2362 /* handle posix_spawnattr */
2363 error = handle_posix_spawn_attrs(attrs, spawn_data->sed_parent);
2364 if (error)
2365 goto report_error;
2366
2367 /* handle posix_spawn_file_actions */
2368 error = handle_posix_spawn_file_actions(spawn_data->sed_actions);
2369 if (error)
2370 goto report_error;
2371
2372 /* now do the real exec */
2373 error = execve_runproc(l, &spawn_data->sed_exec, parent_is_waiting,
2374 true);
2375 have_reflock = false;
2376 if (error == EJUSTRETURN)
2377 error = 0;
2378 else if (error)
2379 goto report_error;
2380
2381 if (parent_is_waiting) {
2382 mutex_enter(&spawn_data->sed_mtx_child);
2383 KASSERT(!spawn_data->sed_child_ready);
2384 spawn_data->sed_error = 0;
2385 spawn_data->sed_child_ready = true;
2386 cv_signal(&spawn_data->sed_cv_child_ready);
2387 mutex_exit(&spawn_data->sed_mtx_child);
2388 }
2389
2390 /* release our refcount on the data */
2391 spawn_exec_data_release(spawn_data);
2392
2393 if ((p->p_slflag & (PSL_TRACED|PSL_TRACEDCHILD)) ==
2394 (PSL_TRACED|PSL_TRACEDCHILD)) {
2395 eventswitchchild(p, TRAP_CHLD, PTRACE_POSIX_SPAWN);
2396 }
2397
2398 /* and finally: leave to userland for the first time */
2399 cpu_spawn_return(l);
2400
2401 /* NOTREACHED */
2402 return;
2403
2404 report_error:
2405 if (have_reflock) {
2406 /*
2407 * We have not passed through execve_runproc(),
2408 * which would have released the p_reflock and also
2409 * taken ownership of the sed_exec part of spawn_data,
2410 * so release/free both here.
2411 */
2412 rw_exit(&p->p_reflock);
2413 execve_free_data(&spawn_data->sed_exec);
2414 }
2415
2416 if (parent_is_waiting) {
2417 /* pass error to parent */
2418 mutex_enter(&spawn_data->sed_mtx_child);
2419 KASSERT(!spawn_data->sed_child_ready);
2420 spawn_data->sed_error = error;
2421 spawn_data->sed_child_ready = true;
2422 cv_signal(&spawn_data->sed_cv_child_ready);
2423 mutex_exit(&spawn_data->sed_mtx_child);
2424 } else {
2425 rw_exit(&exec_lock);
2426 }
2427
2428 /* release our refcount on the data */
2429 spawn_exec_data_release(spawn_data);
2430
2431 /* done, exit */
2432 mutex_enter(p->p_lock);
2433 /*
2434 * Posix explicitly asks for an exit code of 127 if we report
2435 * errors from the child process - so, unfortunately, there
2436 * is no way to report a more exact error code.
2437 * A NetBSD specific workaround is POSIX_SPAWN_RETURNERROR as
2438 * flag bit in the attrp argument to posix_spawn(2), see above.
2439 */
2440 exit1(l, 127, 0);
2441 }
2442
2443 static __inline char **
2444 posix_spawn_fae_path(struct posix_spawn_file_actions_entry *fae)
2445 {
2446 switch (fae->fae_action) {
2447 case FAE_OPEN:
2448 return &fae->fae_path;
2449 case FAE_CHDIR:
2450 return &fae->fae_chdir_path;
2451 default:
2452 return NULL;
2453 }
2454 }
2455
2456 void
2457 posix_spawn_fa_free(struct posix_spawn_file_actions *fa, size_t len)
2458 {
2459
2460 for (size_t i = 0; i < len; i++) {
2461 char **pathp = posix_spawn_fae_path(&fa->fae[i]);
2462 if (pathp)
2463 kmem_strfree(*pathp);
2464 }
2465 if (fa->len > 0)
2466 kmem_free(fa->fae, sizeof(*fa->fae) * fa->len);
2467 kmem_free(fa, sizeof(*fa));
2468 }
2469
2470 static int
2471 posix_spawn_fa_alloc(struct posix_spawn_file_actions **fap,
2472 const struct posix_spawn_file_actions *ufa, rlim_t lim)
2473 {
2474 struct posix_spawn_file_actions *fa;
2475 struct posix_spawn_file_actions_entry *fae;
2476 char *pbuf = NULL;
2477 int error;
2478 size_t i = 0;
2479
2480 fa = kmem_alloc(sizeof(*fa), KM_SLEEP);
2481 error = copyin(ufa, fa, sizeof(*fa));
2482 if (error || fa->len == 0) {
2483 kmem_free(fa, sizeof(*fa));
2484 return error; /* 0 if not an error, and len == 0 */
2485 }
2486
2487 if (fa->len > lim) {
2488 kmem_free(fa, sizeof(*fa));
2489 return SET_ERROR(EINVAL);
2490 }
2491
2492 fa->size = fa->len;
2493 size_t fal = fa->len * sizeof(*fae);
2494 fae = fa->fae;
2495 fa->fae = kmem_alloc(fal, KM_SLEEP);
2496 error = copyin(fae, fa->fae, fal);
2497 if (error)
2498 goto out;
2499
2500 pbuf = PNBUF_GET();
2501 for (; i < fa->len; i++) {
2502 char **pathp = posix_spawn_fae_path(&fa->fae[i]);
2503 if (pathp == NULL)
2504 continue;
2505 error = copyinstr(*pathp, pbuf, MAXPATHLEN, &fal);
2506 if (error)
2507 goto out;
2508 *pathp = kmem_alloc(fal, KM_SLEEP);
2509 memcpy(*pathp, pbuf, fal);
2510 }
2511 PNBUF_PUT(pbuf);
2512
2513 *fap = fa;
2514 return 0;
2515 out:
2516 if (pbuf)
2517 PNBUF_PUT(pbuf);
2518 posix_spawn_fa_free(fa, i);
2519 return error;
2520 }
2521
2522 /*
2523 * N.B. increments nprocs upon success. Callers need to drop nprocs if
2524 * they fail for some other reason.
2525 */
2526 int
2527 check_posix_spawn(struct lwp *l1)
2528 {
2529 int error, tnprocs, count;
2530 uid_t uid;
2531 struct proc *p1;
2532
2533 p1 = l1->l_proc;
2534 uid = kauth_cred_getuid(l1->l_cred);
2535 tnprocs = atomic_inc_uint_nv(&nprocs);
2536
2537 /*
2538 * Although process entries are dynamically created, we still keep
2539 * a global limit on the maximum number we will create.
2540 */
2541 if (__predict_false(tnprocs >= maxproc))
2542 error = -1;
2543 else
2544 error = kauth_authorize_process(l1->l_cred,
2545 KAUTH_PROCESS_FORK, p1, KAUTH_ARG(tnprocs), NULL, NULL);
2546
2547 if (error) {
2548 atomic_dec_uint(&nprocs);
2549 return SET_ERROR(EAGAIN);
2550 }
2551
2552 /*
2553 * Enforce limits.
2554 */
2555 count = chgproccnt(uid, 1);
2556 if (kauth_authorize_process(l1->l_cred, KAUTH_PROCESS_RLIMIT,
2557 p1, KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS),
2558 &p1->p_rlimit[RLIMIT_NPROC], KAUTH_ARG(RLIMIT_NPROC)) != 0 &&
2559 __predict_false(count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur)) {
2560 (void)chgproccnt(uid, -1);
2561 atomic_dec_uint(&nprocs);
2562 return SET_ERROR(EAGAIN);
2563 }
2564
2565 return 0;
2566 }
2567
2568 int
2569 do_posix_spawn(struct lwp *l1, pid_t *pid_res, bool *child_ok, const char *path,
2570 struct posix_spawn_file_actions *fa,
2571 struct posix_spawnattr *sa,
2572 char *const *argv, char *const *envp,
2573 execve_fetch_element_t fetch)
2574 {
2575
2576 struct proc *p1, *p2;
2577 struct lwp *l2;
2578 int error;
2579 struct spawn_exec_data *spawn_data;
2580 vaddr_t uaddr = 0;
2581 pid_t pid;
2582 bool have_exec_lock = false;
2583
2584 p1 = l1->l_proc;
2585
2586 /* Allocate and init spawn_data */
2587 spawn_data = kmem_zalloc(sizeof(*spawn_data), KM_SLEEP);
2588 spawn_data->sed_refcnt = 1; /* only parent so far */
2589 cv_init(&spawn_data->sed_cv_child_ready, "pspawn");
2590 mutex_init(&spawn_data->sed_mtx_child, MUTEX_DEFAULT, IPL_NONE);
2591 mutex_enter(&spawn_data->sed_mtx_child);
2592
2593 /*
2594 * Do the first part of the exec now, collect state
2595 * in spawn_data.
2596 */
2597 error = execve_loadvm(l1, true, path, -1, argv,
2598 envp, fetch, &spawn_data->sed_exec);
2599 if (error == EJUSTRETURN)
2600 error = 0;
2601 else if (error)
2602 goto error_exit;
2603
2604 have_exec_lock = true;
2605
2606 /*
2607 * Allocate virtual address space for the U-area now, while it
2608 * is still easy to abort the fork operation if we're out of
2609 * kernel virtual address space.
2610 */
2611 uaddr = uvm_uarea_alloc();
2612 if (__predict_false(uaddr == 0)) {
2613 error = SET_ERROR(ENOMEM);
2614 goto error_exit;
2615 }
2616
2617 /*
2618 * Allocate new proc. Borrow proc0 vmspace for it, we will
2619 * replace it with its own before returning to userland
2620 * in the child.
2621 */
2622 p2 = proc_alloc();
2623 if (p2 == NULL) {
2624 /* We were unable to allocate a process ID. */
2625 error = SET_ERROR(EAGAIN);
2626 goto error_exit;
2627 }
2628
2629 /*
2630 * This is a point of no return, we will have to go through
2631 * the child proc to properly clean it up past this point.
2632 */
2633 pid = p2->p_pid;
2634
2635 /*
2636 * Make a proc table entry for the new process.
2637 * Start by zeroing the section of proc that is zero-initialized,
2638 * then copy the section that is copied directly from the parent.
2639 */
2640 memset(&p2->p_startzero, 0,
2641 (unsigned) ((char *)&p2->p_endzero - (char *)&p2->p_startzero));
2642 memcpy(&p2->p_startcopy, &p1->p_startcopy,
2643 (unsigned) ((char *)&p2->p_endcopy - (char *)&p2->p_startcopy));
2644 p2->p_vmspace = proc0.p_vmspace;
2645
2646 TAILQ_INIT(&p2->p_sigpend.sp_info);
2647
2648 LIST_INIT(&p2->p_lwps);
2649 LIST_INIT(&p2->p_sigwaiters);
2650
2651 /*
2652 * Duplicate sub-structures as needed.
2653 * Increase reference counts on shared objects.
2654 * Inherit flags we want to keep. The flags related to SIGCHLD
2655 * handling are important in order to keep a consistent behaviour
2656 * for the child after the fork. If we are a 32-bit process, the
2657 * child will be too.
2658 */
2659 p2->p_flag =
2660 p1->p_flag & (PK_SUGID | PK_NOCLDWAIT | PK_CLDSIGIGN | PK_32);
2661 p2->p_emul = p1->p_emul;
2662 p2->p_execsw = p1->p_execsw;
2663
2664 mutex_init(&p2->p_stmutex, MUTEX_DEFAULT, IPL_HIGH);
2665 mutex_init(&p2->p_auxlock, MUTEX_DEFAULT, IPL_NONE);
2666 rw_init(&p2->p_reflock);
2667 cv_init(&p2->p_waitcv, "wait");
2668 cv_init(&p2->p_lwpcv, "lwpwait");
2669
2670 p2->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
2671
2672 kauth_proc_fork(p1, p2);
2673
2674 p2->p_raslist = NULL;
2675 p2->p_fd = fd_copy();
2676
2677 /* XXX racy */
2678 p2->p_mqueue_cnt = p1->p_mqueue_cnt;
2679
2680 p2->p_cwdi = cwdinit();
2681
2682 /*
2683 * Note: p_limit (rlimit stuff) is copy-on-write, so normally
2684 * we just need increase pl_refcnt.
2685 */
2686 if (!p1->p_limit->pl_writeable) {
2687 lim_addref(p1->p_limit);
2688 p2->p_limit = p1->p_limit;
2689 } else {
2690 p2->p_limit = lim_copy(p1->p_limit);
2691 }
2692
2693 p2->p_lflag = 0;
2694 l1->l_vforkwaiting = false;
2695 p2->p_sflag = 0;
2696 p2->p_slflag = 0;
2697 p2->p_pptr = p1;
2698 p2->p_ppid = p1->p_pid;
2699 LIST_INIT(&p2->p_children);
2700
2701 p2->p_aio = NULL;
2702
2703 #ifdef KTRACE
2704 /*
2705 * Copy traceflag and tracefile if enabled.
2706 * If not inherited, these were zeroed above.
2707 */
2708 if (p1->p_traceflag & KTRFAC_INHERIT) {
2709 mutex_enter(&ktrace_lock);
2710 p2->p_traceflag = p1->p_traceflag;
2711 if ((p2->p_tracep = p1->p_tracep) != NULL)
2712 ktradref(p2);
2713 mutex_exit(&ktrace_lock);
2714 }
2715 #endif
2716
2717 /*
2718 * Create signal actions for the child process.
2719 */
2720 p2->p_sigacts = sigactsinit(p1, 0);
2721 mutex_enter(p1->p_lock);
2722 p2->p_sflag |=
2723 (p1->p_sflag & (PS_STOPFORK | PS_STOPEXEC | PS_NOCLDSTOP));
2724 sched_proc_fork(p1, p2);
2725 mutex_exit(p1->p_lock);
2726
2727 p2->p_stflag = p1->p_stflag;
2728
2729 /*
2730 * p_stats.
2731 * Copy parts of p_stats, and zero out the rest.
2732 */
2733 p2->p_stats = pstatscopy(p1->p_stats);
2734
2735 /* copy over machdep flags to the new proc */
2736 cpu_proc_fork(p1, p2);
2737
2738 /*
2739 * Prepare remaining parts of spawn data
2740 */
2741 spawn_data->sed_actions = fa;
2742 spawn_data->sed_attrs = sa;
2743
2744 spawn_data->sed_parent = p1;
2745
2746 /* create LWP */
2747 lwp_create(l1, p2, uaddr, 0, NULL, 0, spawn_return, spawn_data,
2748 &l2, l1->l_class, &l1->l_sigmask, &l1->l_sigstk);
2749 l2->l_ctxlink = NULL; /* reset ucontext link */
2750
2751 /*
2752 * Copy the credential so other references don't see our changes.
2753 * Test to see if this is necessary first, since in the common case
2754 * we won't need a private reference.
2755 */
2756 if (kauth_cred_geteuid(l2->l_cred) != kauth_cred_getsvuid(l2->l_cred) ||
2757 kauth_cred_getegid(l2->l_cred) != kauth_cred_getsvgid(l2->l_cred)) {
2758 l2->l_cred = kauth_cred_copy(l2->l_cred);
2759 kauth_cred_setsvuid(l2->l_cred, kauth_cred_geteuid(l2->l_cred));
2760 kauth_cred_setsvgid(l2->l_cred, kauth_cred_getegid(l2->l_cred));
2761 }
2762
2763 /* Update the master credentials. */
2764 if (l2->l_cred != p2->p_cred) {
2765 kauth_cred_t ocred;
2766 mutex_enter(p2->p_lock);
2767 ocred = p2->p_cred;
2768 p2->p_cred = kauth_cred_hold(l2->l_cred);
2769 mutex_exit(p2->p_lock);
2770 kauth_cred_free(ocred);
2771 }
2772
2773 *child_ok = true;
2774 spawn_data->sed_refcnt = 2; /* child gets it as well */
2775 #if 0
2776 l2->l_nopreempt = 1; /* start it non-preemptable */
2777 #endif
2778
2779 /*
2780 * It's now safe for the scheduler and other processes to see the
2781 * child process.
2782 */
2783 mutex_enter(&proc_lock);
2784
2785 if (p1->p_session->s_ttyvp != NULL && p1->p_lflag & PL_CONTROLT)
2786 p2->p_lflag |= PL_CONTROLT;
2787
2788 LIST_INSERT_HEAD(&p1->p_children, p2, p_sibling);
2789 p2->p_exitsig = SIGCHLD; /* signal for parent on exit */
2790
2791 if ((p1->p_slflag & (PSL_TRACEPOSIX_SPAWN|PSL_TRACED)) ==
2792 (PSL_TRACEPOSIX_SPAWN|PSL_TRACED)) {
2793 proc_changeparent(p2, p1->p_pptr);
2794 SET(p2->p_slflag, PSL_TRACEDCHILD);
2795 }
2796
2797 p2->p_oppid = p1->p_pid; /* Remember the original parent id. */
2798
2799 LIST_INSERT_AFTER(p1, p2, p_pglist);
2800 LIST_INSERT_HEAD(&allproc, p2, p_list);
2801
2802 p2->p_trace_enabled = trace_is_enabled(p2);
2803 #ifdef __HAVE_SYSCALL_INTERN
2804 (*p2->p_emul->e_syscall_intern)(p2);
2805 #endif
2806
2807 /*
2808 * Make child runnable, set start time, and add to run queue except
2809 * if the parent requested the child to start in SSTOP state.
2810 */
2811 mutex_enter(p2->p_lock);
2812
2813 getmicrotime(&p2->p_stats->p_start);
2814
2815 lwp_lock(l2);
2816 KASSERT(p2->p_nrlwps == 1);
2817 KASSERT(l2->l_stat == LSIDL);
2818 p2->p_nrlwps = 1;
2819 p2->p_stat = SACTIVE;
2820 setrunnable(l2);
2821 /* LWP now unlocked */
2822
2823 mutex_exit(p2->p_lock);
2824 mutex_exit(&proc_lock);
2825
2826 while (!spawn_data->sed_child_ready) {
2827 cv_wait(&spawn_data->sed_cv_child_ready,
2828 &spawn_data->sed_mtx_child);
2829 }
2830 error = spawn_data->sed_error;
2831 mutex_exit(&spawn_data->sed_mtx_child);
2832 spawn_exec_data_release(spawn_data);
2833
2834 rw_exit(&p1->p_reflock);
2835 rw_exit(&exec_lock);
2836 have_exec_lock = false;
2837
2838 *pid_res = pid;
2839
2840 if (error)
2841 return error;
2842
2843 if (p1->p_slflag & PSL_TRACED) {
2844 /* Paranoid check */
2845 mutex_enter(&proc_lock);
2846 if ((p1->p_slflag & (PSL_TRACEPOSIX_SPAWN|PSL_TRACED)) !=
2847 (PSL_TRACEPOSIX_SPAWN|PSL_TRACED)) {
2848 mutex_exit(&proc_lock);
2849 return 0;
2850 }
2851
2852 mutex_enter(p1->p_lock);
2853 eventswitch(TRAP_CHLD, PTRACE_POSIX_SPAWN, pid);
2854 }
2855 return 0;
2856
2857 error_exit:
2858 if (have_exec_lock) {
2859 execve_free_data(&spawn_data->sed_exec);
2860 rw_exit(&p1->p_reflock);
2861 rw_exit(&exec_lock);
2862 }
2863 mutex_exit(&spawn_data->sed_mtx_child);
2864 spawn_exec_data_release(spawn_data);
2865 if (uaddr != 0)
2866 uvm_uarea_free(uaddr);
2867
2868 return error;
2869 }
2870
2871 int
2872 sys_posix_spawn(struct lwp *l1, const struct sys_posix_spawn_args *uap,
2873 register_t *retval)
2874 {
2875 /* {
2876 syscallarg(pid_t *) pid;
2877 syscallarg(const char *) path;
2878 syscallarg(const struct posix_spawn_file_actions *) file_actions;
2879 syscallarg(const struct posix_spawnattr *) attrp;
2880 syscallarg(char *const *) argv;
2881 syscallarg(char *const *) envp;
2882 } */
2883
2884 int error;
2885 struct posix_spawn_file_actions *fa = NULL;
2886 struct posix_spawnattr *sa = NULL;
2887 pid_t pid;
2888 bool child_ok = false;
2889 rlim_t max_fileactions;
2890 proc_t *p = l1->l_proc;
2891
2892 /* check_posix_spawn() increments nprocs for us. */
2893 error = check_posix_spawn(l1);
2894 if (error) {
2895 *retval = error;
2896 return 0;
2897 }
2898
2899 /* copy in file_actions struct */
2900 if (SCARG(uap, file_actions) != NULL) {
2901 max_fileactions = 2 * uimin(p->p_rlimit[RLIMIT_NOFILE].rlim_cur,
2902 maxfiles);
2903 error = posix_spawn_fa_alloc(&fa, SCARG(uap, file_actions),
2904 max_fileactions);
2905 if (error)
2906 goto error_exit;
2907 }
2908
2909 /* copyin posix_spawnattr struct */
2910 if (SCARG(uap, attrp) != NULL) {
2911 sa = kmem_alloc(sizeof(*sa), KM_SLEEP);
2912 error = copyin(SCARG(uap, attrp), sa, sizeof(*sa));
2913 if (error)
2914 goto error_exit;
2915 }
2916
2917 /*
2918 * Do the spawn
2919 */
2920 error = do_posix_spawn(l1, &pid, &child_ok, SCARG(uap, path), fa, sa,
2921 SCARG(uap, argv), SCARG(uap, envp), execve_fetch_element);
2922 if (error)
2923 goto error_exit;
2924
2925 if (error == 0 && SCARG(uap, pid) != NULL)
2926 error = copyout(&pid, SCARG(uap, pid), sizeof(pid));
2927
2928 *retval = error;
2929 return 0;
2930
2931 error_exit:
2932 if (!child_ok) {
2933 (void)chgproccnt(kauth_cred_getuid(l1->l_cred), -1);
2934 atomic_dec_uint(&nprocs);
2935
2936 if (sa)
2937 kmem_free(sa, sizeof(*sa));
2938 if (fa)
2939 posix_spawn_fa_free(fa, fa->len);
2940 }
2941
2942 *retval = error;
2943 return 0;
2944 }
2945
2946 void
2947 exec_free_emul_arg(struct exec_package *epp)
2948 {
2949 if (epp->ep_emul_arg_free != NULL) {
2950 KASSERT(epp->ep_emul_arg != NULL);
2951 (*epp->ep_emul_arg_free)(epp->ep_emul_arg);
2952 epp->ep_emul_arg_free = NULL;
2953 epp->ep_emul_arg = NULL;
2954 } else {
2955 KASSERT(epp->ep_emul_arg == NULL);
2956 }
2957 }
2958
2959 #ifdef DEBUG_EXEC
2960 static void
2961 dump_vmcmds(const struct exec_package * const epp, size_t x, int error)
2962 {
2963 struct exec_vmcmd *vp = &epp->ep_vmcmds.evs_cmds[0];
2964 size_t j;
2965
2966 if (error == 0)
2967 DPRINTF(("vmcmds %u\n", epp->ep_vmcmds.evs_used));
2968 else
2969 DPRINTF(("vmcmds %zu/%u, error %d\n", x,
2970 epp->ep_vmcmds.evs_used, error));
2971
2972 for (j = 0; j < epp->ep_vmcmds.evs_used; j++) {
2973 DPRINTF(("vmcmd[%zu] = vmcmd_map_%s %#"
2974 PRIxVADDR"/%#"PRIxVSIZE" fd@%#"
2975 PRIxVSIZE" prot=0%o flags=%d\n", j,
2976 vp[j].ev_proc == vmcmd_map_pagedvn ?
2977 "pagedvn" :
2978 vp[j].ev_proc == vmcmd_map_readvn ?
2979 "readvn" :
2980 vp[j].ev_proc == vmcmd_map_zero ?
2981 "zero" : "*unknown*",
2982 vp[j].ev_addr, vp[j].ev_len,
2983 vp[j].ev_offset, vp[j].ev_prot,
2984 vp[j].ev_flags));
2985 if (error != 0 && j == x)
2986 DPRINTF((" ^--- failed\n"));
2987 }
2988 }
2989 #endif
2990