kern_exec.c revision 1.331 1 /* $NetBSD: kern_exec.c,v 1.331 2011/11/24 17:09:14 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*-
30 * Copyright (C) 1993, 1994, 1996 Christopher G. Demetriou
31 * Copyright (C) 1992 Wolfgang Solfrank.
32 * Copyright (C) 1992 TooLs GmbH.
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by TooLs GmbH.
46 * 4. The name of TooLs GmbH may not be used to endorse or promote products
47 * derived from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
53 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
54 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
55 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
56 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
57 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
58 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.331 2011/11/24 17:09:14 christos Exp $");
63
64 #include "opt_exec.h"
65 #include "opt_ktrace.h"
66 #include "opt_modular.h"
67 #include "opt_syscall_debug.h"
68 #include "veriexec.h"
69 #include "opt_pax.h"
70 #include "opt_sa.h"
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/filedesc.h>
75 #include <sys/kernel.h>
76 #include <sys/proc.h>
77 #include <sys/mount.h>
78 #include <sys/malloc.h>
79 #include <sys/kmem.h>
80 #include <sys/namei.h>
81 #include <sys/vnode.h>
82 #include <sys/file.h>
83 #include <sys/acct.h>
84 #include <sys/exec.h>
85 #include <sys/ktrace.h>
86 #include <sys/uidinfo.h>
87 #include <sys/wait.h>
88 #include <sys/mman.h>
89 #include <sys/ras.h>
90 #include <sys/signalvar.h>
91 #include <sys/stat.h>
92 #include <sys/syscall.h>
93 #include <sys/kauth.h>
94 #include <sys/lwpctl.h>
95 #include <sys/pax.h>
96 #include <sys/cpu.h>
97 #include <sys/module.h>
98 #include <sys/sa.h>
99 #include <sys/savar.h>
100 #include <sys/syscallvar.h>
101 #include <sys/syscallargs.h>
102 #if NVERIEXEC > 0
103 #include <sys/verified_exec.h>
104 #endif /* NVERIEXEC > 0 */
105 #include <sys/sdt.h>
106 #include <sys/cprng.h>
107
108 #include <uvm/uvm_extern.h>
109
110 #include <machine/reg.h>
111
112 #include <compat/common/compat_util.h>
113
114 static int exec_sigcode_map(struct proc *, const struct emul *);
115
116 #ifdef DEBUG_EXEC
117 #define DPRINTF(a) printf a
118 #define COPYPRINTF(s, a, b) printf("%s, %d: copyout%s @%p %zu\n", __func__, \
119 __LINE__, (s), (a), (b))
120 #else
121 #define DPRINTF(a)
122 #define COPYPRINTF(s, a, b)
123 #endif /* DEBUG_EXEC */
124
125 /*
126 * DTrace SDT provider definitions
127 */
128 SDT_PROBE_DEFINE(proc,,,exec,
129 "char *", NULL,
130 NULL, NULL, NULL, NULL,
131 NULL, NULL, NULL, NULL);
132 SDT_PROBE_DEFINE(proc,,,exec_success,
133 "char *", NULL,
134 NULL, NULL, NULL, NULL,
135 NULL, NULL, NULL, NULL);
136 SDT_PROBE_DEFINE(proc,,,exec_failure,
137 "int", NULL,
138 NULL, NULL, NULL, NULL,
139 NULL, NULL, NULL, NULL);
140
141 /*
142 * Exec function switch:
143 *
144 * Note that each makecmds function is responsible for loading the
145 * exec package with the necessary functions for any exec-type-specific
146 * handling.
147 *
148 * Functions for specific exec types should be defined in their own
149 * header file.
150 */
151 static const struct execsw **execsw = NULL;
152 static int nexecs;
153
154 u_int exec_maxhdrsz; /* must not be static - used by netbsd32 */
155
156 /* list of dynamically loaded execsw entries */
157 static LIST_HEAD(execlist_head, exec_entry) ex_head =
158 LIST_HEAD_INITIALIZER(ex_head);
159 struct exec_entry {
160 LIST_ENTRY(exec_entry) ex_list;
161 SLIST_ENTRY(exec_entry) ex_slist;
162 const struct execsw *ex_sw;
163 };
164
165 #ifndef __HAVE_SYSCALL_INTERN
166 void syscall(void);
167 #endif
168
169 #ifdef KERN_SA
170 static struct sa_emul saemul_netbsd = {
171 sizeof(ucontext_t),
172 sizeof(struct sa_t),
173 sizeof(struct sa_t *),
174 NULL,
175 NULL,
176 cpu_upcall,
177 (void (*)(struct lwp *, void *))getucontext_sa,
178 sa_ucsp
179 };
180 #endif /* KERN_SA */
181
182 /* NetBSD emul struct */
183 struct emul emul_netbsd = {
184 .e_name = "netbsd",
185 .e_path = NULL,
186 #ifndef __HAVE_MINIMAL_EMUL
187 .e_flags = EMUL_HAS_SYS___syscall,
188 .e_errno = NULL,
189 .e_nosys = SYS_syscall,
190 .e_nsysent = SYS_NSYSENT,
191 #endif
192 .e_sysent = sysent,
193 #ifdef SYSCALL_DEBUG
194 .e_syscallnames = syscallnames,
195 #else
196 .e_syscallnames = NULL,
197 #endif
198 .e_sendsig = sendsig,
199 .e_trapsignal = trapsignal,
200 .e_tracesig = NULL,
201 .e_sigcode = NULL,
202 .e_esigcode = NULL,
203 .e_sigobject = NULL,
204 .e_setregs = setregs,
205 .e_proc_exec = NULL,
206 .e_proc_fork = NULL,
207 .e_proc_exit = NULL,
208 .e_lwp_fork = NULL,
209 .e_lwp_exit = NULL,
210 #ifdef __HAVE_SYSCALL_INTERN
211 .e_syscall_intern = syscall_intern,
212 #else
213 .e_syscall = syscall,
214 #endif
215 .e_sysctlovly = NULL,
216 .e_fault = NULL,
217 .e_vm_default_addr = uvm_default_mapaddr,
218 .e_usertrap = NULL,
219 #ifdef KERN_SA
220 .e_sa = &saemul_netbsd,
221 #else
222 .e_sa = NULL,
223 #endif
224 .e_ucsize = sizeof(ucontext_t),
225 .e_startlwp = startlwp
226 };
227
228 /*
229 * Exec lock. Used to control access to execsw[] structures.
230 * This must not be static so that netbsd32 can access it, too.
231 */
232 krwlock_t exec_lock;
233
234 static kmutex_t sigobject_lock;
235
236 static void *
237 exec_pool_alloc(struct pool *pp, int flags)
238 {
239
240 return (void *)uvm_km_alloc(kernel_map, NCARGS, 0,
241 UVM_KMF_PAGEABLE | UVM_KMF_WAITVA);
242 }
243
244 static void
245 exec_pool_free(struct pool *pp, void *addr)
246 {
247
248 uvm_km_free(kernel_map, (vaddr_t)addr, NCARGS, UVM_KMF_PAGEABLE);
249 }
250
251 static struct pool exec_pool;
252
253 static struct pool_allocator exec_palloc = {
254 .pa_alloc = exec_pool_alloc,
255 .pa_free = exec_pool_free,
256 .pa_pagesz = NCARGS
257 };
258
259 /*
260 * check exec:
261 * given an "executable" described in the exec package's namei info,
262 * see what we can do with it.
263 *
264 * ON ENTRY:
265 * exec package with appropriate namei info
266 * lwp pointer of exec'ing lwp
267 * NO SELF-LOCKED VNODES
268 *
269 * ON EXIT:
270 * error: nothing held, etc. exec header still allocated.
271 * ok: filled exec package, executable's vnode (unlocked).
272 *
273 * EXEC SWITCH ENTRY:
274 * Locked vnode to check, exec package, proc.
275 *
276 * EXEC SWITCH EXIT:
277 * ok: return 0, filled exec package, executable's vnode (unlocked).
278 * error: destructive:
279 * everything deallocated execept exec header.
280 * non-destructive:
281 * error code, executable's vnode (unlocked),
282 * exec header unmodified.
283 */
284 int
285 /*ARGSUSED*/
286 check_exec(struct lwp *l, struct exec_package *epp, struct pathbuf *pb)
287 {
288 int error, i;
289 struct vnode *vp;
290 struct nameidata nd;
291 size_t resid;
292
293 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, pb);
294
295 /* first get the vnode */
296 if ((error = namei(&nd)) != 0)
297 return error;
298 epp->ep_vp = vp = nd.ni_vp;
299 /* this cannot overflow as both are size PATH_MAX */
300 strcpy(epp->ep_resolvedname, nd.ni_pnbuf);
301
302 #ifdef DIAGNOSTIC
303 /* paranoia (take this out once namei stuff stabilizes) */
304 memset(nd.ni_pnbuf, '~', PATH_MAX);
305 #endif
306
307 /* check access and type */
308 if (vp->v_type != VREG) {
309 error = EACCES;
310 goto bad1;
311 }
312 if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred)) != 0)
313 goto bad1;
314
315 /* get attributes */
316 if ((error = VOP_GETATTR(vp, epp->ep_vap, l->l_cred)) != 0)
317 goto bad1;
318
319 /* Check mount point */
320 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
321 error = EACCES;
322 goto bad1;
323 }
324 if (vp->v_mount->mnt_flag & MNT_NOSUID)
325 epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID);
326
327 /* try to open it */
328 if ((error = VOP_OPEN(vp, FREAD, l->l_cred)) != 0)
329 goto bad1;
330
331 /* unlock vp, since we need it unlocked from here on out. */
332 VOP_UNLOCK(vp);
333
334 #if NVERIEXEC > 0
335 error = veriexec_verify(l, vp, epp->ep_resolvedname,
336 epp->ep_flags & EXEC_INDIR ? VERIEXEC_INDIRECT : VERIEXEC_DIRECT,
337 NULL);
338 if (error)
339 goto bad2;
340 #endif /* NVERIEXEC > 0 */
341
342 #ifdef PAX_SEGVGUARD
343 error = pax_segvguard(l, vp, epp->ep_resolvedname, false);
344 if (error)
345 goto bad2;
346 #endif /* PAX_SEGVGUARD */
347
348 /* now we have the file, get the exec header */
349 error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0,
350 UIO_SYSSPACE, 0, l->l_cred, &resid, NULL);
351 if (error)
352 goto bad2;
353 epp->ep_hdrvalid = epp->ep_hdrlen - resid;
354
355 /*
356 * Set up default address space limits. Can be overridden
357 * by individual exec packages.
358 *
359 * XXX probably should be all done in the exec packages.
360 */
361 epp->ep_vm_minaddr = VM_MIN_ADDRESS;
362 epp->ep_vm_maxaddr = VM_MAXUSER_ADDRESS;
363 /*
364 * set up the vmcmds for creation of the process
365 * address space
366 */
367 error = ENOEXEC;
368 for (i = 0; i < nexecs; i++) {
369 int newerror;
370
371 epp->ep_esch = execsw[i];
372 newerror = (*execsw[i]->es_makecmds)(l, epp);
373
374 if (!newerror) {
375 /* Seems ok: check that entry point is not too high */
376 if (epp->ep_entry > epp->ep_vm_maxaddr) {
377 #ifdef DIAGNOSTIC
378 printf("%s: rejecting %p due to "
379 "too high entry address (> %p)\n",
380 __func__, (void *)epp->ep_entry,
381 (void *)epp->ep_vm_maxaddr);
382 #endif
383 error = ENOEXEC;
384 break;
385 }
386 /* Seems ok: check that entry point is not too low */
387 if (epp->ep_entry < epp->ep_vm_minaddr) {
388 #ifdef DIAGNOSTIC
389 printf("%s: rejecting %p due to "
390 "too low entry address (< %p)\n",
391 __func__, (void *)epp->ep_entry,
392 (void *)epp->ep_vm_minaddr);
393 #endif
394 error = ENOEXEC;
395 break;
396 }
397
398 /* check limits */
399 if ((epp->ep_tsize > MAXTSIZ) ||
400 (epp->ep_dsize > (u_quad_t)l->l_proc->p_rlimit
401 [RLIMIT_DATA].rlim_cur)) {
402 #ifdef DIAGNOSTIC
403 printf("%s: rejecting due to "
404 "limits (t=%llu > %llu || d=%llu > %llu)\n",
405 __func__,
406 (unsigned long long)epp->ep_tsize,
407 (unsigned long long)MAXTSIZ,
408 (unsigned long long)epp->ep_dsize,
409 (unsigned long long)l->l_proc->p_rlimit);
410 #endif
411 error = ENOMEM;
412 break;
413 }
414 return 0;
415 }
416
417 if (epp->ep_emul_root != NULL) {
418 vrele(epp->ep_emul_root);
419 epp->ep_emul_root = NULL;
420 }
421 if (epp->ep_interp != NULL) {
422 vrele(epp->ep_interp);
423 epp->ep_interp = NULL;
424 }
425
426 /* make sure the first "interesting" error code is saved. */
427 if (error == ENOEXEC)
428 error = newerror;
429
430 if (epp->ep_flags & EXEC_DESTR)
431 /* Error from "#!" code, tidied up by recursive call */
432 return error;
433 }
434
435 /* not found, error */
436
437 /*
438 * free any vmspace-creation commands,
439 * and release their references
440 */
441 kill_vmcmds(&epp->ep_vmcmds);
442
443 bad2:
444 /*
445 * close and release the vnode, restore the old one, free the
446 * pathname buf, and punt.
447 */
448 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
449 VOP_CLOSE(vp, FREAD, l->l_cred);
450 vput(vp);
451 return error;
452
453 bad1:
454 /*
455 * free the namei pathname buffer, and put the vnode
456 * (which we don't yet have open).
457 */
458 vput(vp); /* was still locked */
459 return error;
460 }
461
462 #ifdef __MACHINE_STACK_GROWS_UP
463 #define STACK_PTHREADSPACE NBPG
464 #else
465 #define STACK_PTHREADSPACE 0
466 #endif
467
468 static int
469 execve_fetch_element(char * const *array, size_t index, char **value)
470 {
471 return copyin(array + index, value, sizeof(*value));
472 }
473
474 /*
475 * exec system call
476 */
477 /* ARGSUSED */
478 int
479 sys_execve(struct lwp *l, const struct sys_execve_args *uap, register_t *retval)
480 {
481 /* {
482 syscallarg(const char *) path;
483 syscallarg(char * const *) argp;
484 syscallarg(char * const *) envp;
485 } */
486
487 return execve1(l, SCARG(uap, path), SCARG(uap, argp),
488 SCARG(uap, envp), execve_fetch_element);
489 }
490
491 int
492 sys_fexecve(struct lwp *l, const struct sys_fexecve_args *uap,
493 register_t *retval)
494 {
495 /* {
496 syscallarg(int) fd;
497 syscallarg(char * const *) argp;
498 syscallarg(char * const *) envp;
499 } */
500
501 return ENOSYS;
502 }
503
504 /*
505 * Load modules to try and execute an image that we do not understand.
506 * If no execsw entries are present, we load those likely to be needed
507 * in order to run native images only. Otherwise, we autoload all
508 * possible modules that could let us run the binary. XXX lame
509 */
510 static void
511 exec_autoload(void)
512 {
513 #ifdef MODULAR
514 static const char * const native[] = {
515 "exec_elf32",
516 "exec_elf64",
517 "exec_script",
518 NULL
519 };
520 static const char * const compat[] = {
521 "exec_elf32",
522 "exec_elf64",
523 "exec_script",
524 "exec_aout",
525 "exec_coff",
526 "exec_ecoff",
527 "compat_aoutm68k",
528 "compat_freebsd",
529 "compat_ibcs2",
530 "compat_linux",
531 "compat_linux32",
532 "compat_netbsd32",
533 "compat_sunos",
534 "compat_sunos32",
535 "compat_svr4",
536 "compat_svr4_32",
537 "compat_ultrix",
538 NULL
539 };
540 char const * const *list;
541 int i;
542
543 list = (nexecs == 0 ? native : compat);
544 for (i = 0; list[i] != NULL; i++) {
545 if (module_autoload(list[i], MODULE_CLASS_MISC) != 0) {
546 continue;
547 }
548 yield();
549 }
550 #endif
551 }
552
553 int
554 execve1(struct lwp *l, const char *path, char * const *args,
555 char * const *envs, execve_fetch_element_t fetch_element)
556 {
557 int error;
558 struct exec_package pack;
559 struct pathbuf *pb;
560 struct vattr attr;
561 struct proc *p;
562 char *argp;
563 char *dp, *sp;
564 long argc, envc;
565 size_t i, len;
566 char *stack;
567 struct ps_strings arginfo;
568 struct ps_strings32 arginfo32;
569 void *aip;
570 struct vmspace *vm;
571 struct exec_fakearg *tmpfap;
572 int szsigcode;
573 struct exec_vmcmd *base_vcp;
574 int oldlwpflags;
575 ksiginfo_t ksi;
576 ksiginfoq_t kq;
577 const char *pathstring;
578 char *resolvedpathbuf;
579 const char *commandname;
580 u_int modgen;
581 size_t ps_strings_sz;
582
583 p = l->l_proc;
584 modgen = 0;
585
586 SDT_PROBE(proc,,,exec, path, 0, 0, 0, 0);
587
588 /*
589 * Check if we have exceeded our number of processes limit.
590 * This is so that we handle the case where a root daemon
591 * forked, ran setuid to become the desired user and is trying
592 * to exec. The obvious place to do the reference counting check
593 * is setuid(), but we don't do the reference counting check there
594 * like other OS's do because then all the programs that use setuid()
595 * must be modified to check the return code of setuid() and exit().
596 * It is dangerous to make setuid() fail, because it fails open and
597 * the program will continue to run as root. If we make it succeed
598 * and return an error code, again we are not enforcing the limit.
599 * The best place to enforce the limit is here, when the process tries
600 * to execute a new image, because eventually the process will need
601 * to call exec in order to do something useful.
602 */
603 retry:
604 if ((p->p_flag & PK_SUGID) && kauth_authorize_generic(l->l_cred,
605 KAUTH_GENERIC_ISSUSER, NULL) != 0 && chgproccnt(kauth_cred_getuid(
606 l->l_cred), 0) > p->p_rlimit[RLIMIT_NPROC].rlim_cur)
607 return EAGAIN;
608
609 oldlwpflags = l->l_flag & (LW_SA | LW_SA_UPCALL);
610 if (l->l_flag & LW_SA) {
611 lwp_lock(l);
612 l->l_flag &= ~(LW_SA | LW_SA_UPCALL);
613 lwp_unlock(l);
614 }
615
616 /*
617 * Drain existing references and forbid new ones. The process
618 * should be left alone until we're done here. This is necessary
619 * to avoid race conditions - e.g. in ptrace() - that might allow
620 * a local user to illicitly obtain elevated privileges.
621 */
622 rw_enter(&p->p_reflock, RW_WRITER);
623
624 base_vcp = NULL;
625 /*
626 * Init the namei data to point the file user's program name.
627 * This is done here rather than in check_exec(), so that it's
628 * possible to override this settings if any of makecmd/probe
629 * functions call check_exec() recursively - for example,
630 * see exec_script_makecmds().
631 */
632 error = pathbuf_copyin(path, &pb);
633 if (error) {
634 DPRINTF(("%s: pathbuf_copyin path @%p %d\n", __func__,
635 path, error));
636 goto clrflg;
637 }
638 pathstring = pathbuf_stringcopy_get(pb);
639 resolvedpathbuf = PNBUF_GET();
640 #ifdef DIAGNOSTIC
641 strcpy(resolvedpathbuf, "/wrong");
642 #endif
643
644 /*
645 * initialize the fields of the exec package.
646 */
647 pack.ep_name = path;
648 pack.ep_kname = pathstring;
649 pack.ep_resolvedname = resolvedpathbuf;
650 pack.ep_hdr = kmem_alloc(exec_maxhdrsz, KM_SLEEP);
651 pack.ep_hdrlen = exec_maxhdrsz;
652 pack.ep_hdrvalid = 0;
653 pack.ep_emul_arg = NULL;
654 pack.ep_vmcmds.evs_cnt = 0;
655 pack.ep_vmcmds.evs_used = 0;
656 pack.ep_vap = &attr;
657 pack.ep_flags = 0;
658 pack.ep_emul_root = NULL;
659 pack.ep_interp = NULL;
660 pack.ep_esch = NULL;
661 pack.ep_pax_flags = 0;
662
663 rw_enter(&exec_lock, RW_READER);
664
665 /* see if we can run it. */
666 if ((error = check_exec(l, &pack, pb)) != 0) {
667 if (error != ENOENT) {
668 DPRINTF(("%s: check exec failed %d\n",
669 __func__, error));
670 }
671 goto freehdr;
672 }
673
674 /* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */
675
676 /* allocate an argument buffer */
677 argp = pool_get(&exec_pool, PR_WAITOK);
678 KASSERT(argp != NULL);
679 dp = argp;
680 argc = 0;
681
682 /* copy the fake args list, if there's one, freeing it as we go */
683 if (pack.ep_flags & EXEC_HASARGL) {
684 tmpfap = pack.ep_fa;
685 while (tmpfap->fa_arg != NULL) {
686 const char *cp;
687
688 cp = tmpfap->fa_arg;
689 while (*cp)
690 *dp++ = *cp++;
691 *dp++ = '\0';
692 ktrexecarg(tmpfap->fa_arg, cp - tmpfap->fa_arg);
693
694 kmem_free(tmpfap->fa_arg, tmpfap->fa_len);
695 tmpfap++; argc++;
696 }
697 kmem_free(pack.ep_fa, pack.ep_fa_len);
698 pack.ep_flags &= ~EXEC_HASARGL;
699 }
700
701 /* Now get argv & environment */
702 if (args == NULL) {
703 DPRINTF(("%s: null args\n", __func__));
704 error = EINVAL;
705 goto bad;
706 }
707 /* 'i' will index the argp/envp element to be retrieved */
708 i = 0;
709 if (pack.ep_flags & EXEC_SKIPARG)
710 i++;
711
712 while (1) {
713 len = argp + ARG_MAX - dp;
714 if ((error = (*fetch_element)(args, i, &sp)) != 0) {
715 DPRINTF(("%s: fetch_element args %d\n",
716 __func__, error));
717 goto bad;
718 }
719 if (!sp)
720 break;
721 if ((error = copyinstr(sp, dp, len, &len)) != 0) {
722 DPRINTF(("%s: copyinstr args %d\n", __func__, error));
723 if (error == ENAMETOOLONG)
724 error = E2BIG;
725 goto bad;
726 }
727 ktrexecarg(dp, len - 1);
728 dp += len;
729 i++;
730 argc++;
731 }
732
733 envc = 0;
734 /* environment need not be there */
735 if (envs != NULL) {
736 i = 0;
737 while (1) {
738 len = argp + ARG_MAX - dp;
739 if ((error = (*fetch_element)(envs, i, &sp)) != 0) {
740 DPRINTF(("%s: fetch_element env %d\n",
741 __func__, error));
742 goto bad;
743 }
744 if (!sp)
745 break;
746 if ((error = copyinstr(sp, dp, len, &len)) != 0) {
747 DPRINTF(("%s: copyinstr env %d\n",
748 __func__, error));
749 if (error == ENAMETOOLONG)
750 error = E2BIG;
751 goto bad;
752 }
753 ktrexecenv(dp, len - 1);
754 dp += len;
755 i++;
756 envc++;
757 }
758 }
759
760 dp = (char *) ALIGN(dp);
761
762 szsigcode = pack.ep_esch->es_emul->e_esigcode -
763 pack.ep_esch->es_emul->e_sigcode;
764
765 #ifdef __MACHINE_STACK_GROWS_UP
766 /* See big comment lower down */
767 #define RTLD_GAP 32
768 #else
769 #define RTLD_GAP 0
770 #endif
771
772 /* Now check if args & environ fit into new stack */
773 if (pack.ep_flags & EXEC_32) {
774 aip = &arginfo32;
775 ps_strings_sz = sizeof(struct ps_strings32);
776 len = ((argc + envc + 2 + pack.ep_esch->es_arglen) *
777 sizeof(int) + sizeof(int) + dp + RTLD_GAP +
778 szsigcode + ps_strings_sz + STACK_PTHREADSPACE)
779 - argp;
780 } else {
781 aip = &arginfo;
782 ps_strings_sz = sizeof(struct ps_strings);
783 len = ((argc + envc + 2 + pack.ep_esch->es_arglen) *
784 sizeof(char *) + sizeof(int) + dp + RTLD_GAP +
785 szsigcode + ps_strings_sz + STACK_PTHREADSPACE)
786 - argp;
787 }
788
789 #ifdef PAX_ASLR
790 if (pax_aslr_active(l))
791 len += (cprng_fast32() % PAGE_SIZE);
792 #endif /* PAX_ASLR */
793
794 #ifdef STACKALIGN /* arm, etc. */
795 len = STACKALIGN(len); /* make the stack "safely" aligned */
796 #else
797 len = ALIGN(len); /* make the stack "safely" aligned */
798 #endif
799
800 if (len > pack.ep_ssize) { /* in effect, compare to initial limit */
801 DPRINTF(("%s: stack limit exceeded %zu\n", __func__, len));
802 error = ENOMEM;
803 goto bad;
804 }
805
806 /* Get rid of other LWPs. */
807 if (p->p_sa || p->p_nlwps > 1) {
808 mutex_enter(p->p_lock);
809 exit_lwps(l);
810 mutex_exit(p->p_lock);
811 }
812 KDASSERT(p->p_nlwps == 1);
813
814 /* Destroy any lwpctl info. */
815 if (p->p_lwpctl != NULL)
816 lwp_ctl_exit();
817
818 #ifdef KERN_SA
819 /* Release any SA state. */
820 if (p->p_sa)
821 sa_release(p);
822 #endif /* KERN_SA */
823
824 /* Remove POSIX timers */
825 timers_free(p, TIMERS_POSIX);
826
827 /* adjust "active stack depth" for process VSZ */
828 pack.ep_ssize = len; /* maybe should go elsewhere, but... */
829
830 /*
831 * Do whatever is necessary to prepare the address space
832 * for remapping. Note that this might replace the current
833 * vmspace with another!
834 */
835 uvmspace_exec(l, pack.ep_vm_minaddr, pack.ep_vm_maxaddr);
836
837 /* record proc's vnode, for use by procfs and others */
838 if (p->p_textvp)
839 vrele(p->p_textvp);
840 vref(pack.ep_vp);
841 p->p_textvp = pack.ep_vp;
842
843 /* Now map address space */
844 vm = p->p_vmspace;
845 vm->vm_taddr = (void *)pack.ep_taddr;
846 vm->vm_tsize = btoc(pack.ep_tsize);
847 vm->vm_daddr = (void*)pack.ep_daddr;
848 vm->vm_dsize = btoc(pack.ep_dsize);
849 vm->vm_ssize = btoc(pack.ep_ssize);
850 vm->vm_issize = 0;
851 vm->vm_maxsaddr = (void *)pack.ep_maxsaddr;
852 vm->vm_minsaddr = (void *)pack.ep_minsaddr;
853
854 #ifdef PAX_ASLR
855 pax_aslr_init(l, vm);
856 #endif /* PAX_ASLR */
857
858 /* create the new process's VM space by running the vmcmds */
859 #ifdef DIAGNOSTIC
860 if (pack.ep_vmcmds.evs_used == 0)
861 panic("%s: no vmcmds", __func__);
862 #endif
863
864 #ifdef DEBUG_EXEC
865 {
866 size_t j;
867 struct exec_vmcmd *vp = &pack.ep_vmcmds.evs_cmds[0];
868 DPRINTF(("vmcmds %u\n", pack.ep_vmcmds.evs_used));
869 for (j = 0; j < pack.ep_vmcmds.evs_used; j++) {
870 DPRINTF(("vmcmd[%zu] = vmcmd_map_%s %#"
871 PRIxVADDR"/%#"PRIxVSIZE" fd@%#"
872 PRIxVSIZE" prot=0%o flags=%d\n", j,
873 vp[j].ev_proc == vmcmd_map_pagedvn ?
874 "pagedvn" :
875 vp[j].ev_proc == vmcmd_map_readvn ?
876 "readvn" :
877 vp[j].ev_proc == vmcmd_map_zero ?
878 "zero" : "*unknown*",
879 vp[j].ev_addr, vp[j].ev_len,
880 vp[j].ev_offset, vp[j].ev_prot,
881 vp[j].ev_flags));
882 }
883 }
884 #endif /* DEBUG_EXEC */
885
886 for (i = 0; i < pack.ep_vmcmds.evs_used && !error; i++) {
887 struct exec_vmcmd *vcp;
888
889 vcp = &pack.ep_vmcmds.evs_cmds[i];
890 if (vcp->ev_flags & VMCMD_RELATIVE) {
891 #ifdef DIAGNOSTIC
892 if (base_vcp == NULL)
893 panic("%s: relative vmcmd with no base",
894 __func__);
895 if (vcp->ev_flags & VMCMD_BASE)
896 panic("%s: illegal base & relative vmcmd",
897 __func__);
898 #endif
899 vcp->ev_addr += base_vcp->ev_addr;
900 }
901 error = (*vcp->ev_proc)(l, vcp);
902 #ifdef DEBUG_EXEC
903 if (error) {
904 size_t j;
905 struct exec_vmcmd *vp = &pack.ep_vmcmds.evs_cmds[0];
906 DPRINTF(("vmcmds %zu/%u, error %d\n", i,
907 pack.ep_vmcmds.evs_used, error));
908 for (j = 0; j < pack.ep_vmcmds.evs_used; j++) {
909 DPRINTF(("vmcmd[%zu] = vmcmd_map_%s %#"
910 PRIxVADDR"/%#"PRIxVSIZE" fd@%#"
911 PRIxVSIZE" prot=0%o flags=%d\n", j,
912 vp[j].ev_proc == vmcmd_map_pagedvn ?
913 "pagedvn" :
914 vp[j].ev_proc == vmcmd_map_readvn ?
915 "readvn" :
916 vp[j].ev_proc == vmcmd_map_zero ?
917 "zero" : "*unknown*",
918 vp[j].ev_addr, vp[j].ev_len,
919 vp[j].ev_offset, vp[j].ev_prot,
920 vp[j].ev_flags));
921 if (j == i)
922 DPRINTF((" ^--- failed\n"));
923 }
924 }
925 #endif /* DEBUG_EXEC */
926 if (vcp->ev_flags & VMCMD_BASE)
927 base_vcp = vcp;
928 }
929
930 /* free the vmspace-creation commands, and release their references */
931 kill_vmcmds(&pack.ep_vmcmds);
932
933 vn_lock(pack.ep_vp, LK_EXCLUSIVE | LK_RETRY);
934 VOP_CLOSE(pack.ep_vp, FREAD, l->l_cred);
935 vput(pack.ep_vp);
936
937 /* if an error happened, deallocate and punt */
938 if (error) {
939 DPRINTF(("%s: vmcmd %zu failed: %d\n", __func__, i - 1, error));
940 goto exec_abort;
941 }
942
943 /* remember information about the process */
944 arginfo.ps_nargvstr = argc;
945 arginfo.ps_nenvstr = envc;
946
947 /* set command name & other accounting info */
948 commandname = strrchr(pack.ep_resolvedname, '/');
949 if (commandname != NULL) {
950 commandname++;
951 } else {
952 commandname = pack.ep_resolvedname;
953 }
954 i = min(strlen(commandname), MAXCOMLEN);
955 (void)memcpy(p->p_comm, commandname, i);
956 p->p_comm[i] = '\0';
957
958 dp = PNBUF_GET();
959 /*
960 * If the path starts with /, we don't need to do any work.
961 * This handles the majority of the cases.
962 * In the future perhaps we could canonicalize it?
963 */
964 if (pathstring[0] == '/')
965 (void)strlcpy(pack.ep_path = dp, pathstring, MAXPATHLEN);
966 #ifdef notyet
967 /*
968 * Although this works most of the time [since the entry was just
969 * entered in the cache] we don't use it because it theoretically
970 * can fail and it is not the cleanest interface, because there
971 * could be races. When the namei cache is re-written, this can
972 * be changed to use the appropriate function.
973 */
974 else if (!(error = vnode_to_path(dp, MAXPATHLEN, p->p_textvp, l, p)))
975 pack.ep_path = dp;
976 #endif
977 else {
978 #ifdef notyet
979 printf("Cannot get path for pid %d [%s] (error %d)",
980 (int)p->p_pid, p->p_comm, error);
981 #endif
982 pack.ep_path = NULL;
983 PNBUF_PUT(dp);
984 }
985
986 stack = (char *)STACK_ALLOC(STACK_GROW(vm->vm_minsaddr,
987 STACK_PTHREADSPACE + ps_strings_sz + szsigcode),
988 len - (ps_strings_sz + szsigcode));
989
990 #ifdef __MACHINE_STACK_GROWS_UP
991 /*
992 * The copyargs call always copies into lower addresses
993 * first, moving towards higher addresses, starting with
994 * the stack pointer that we give. When the stack grows
995 * down, this puts argc/argv/envp very shallow on the
996 * stack, right at the first user stack pointer.
997 * When the stack grows up, the situation is reversed.
998 *
999 * Normally, this is no big deal. But the ld_elf.so _rtld()
1000 * function expects to be called with a single pointer to
1001 * a region that has a few words it can stash values into,
1002 * followed by argc/argv/envp. When the stack grows down,
1003 * it's easy to decrement the stack pointer a little bit to
1004 * allocate the space for these few words and pass the new
1005 * stack pointer to _rtld. When the stack grows up, however,
1006 * a few words before argc is part of the signal trampoline, XXX
1007 * so we have a problem.
1008 *
1009 * Instead of changing how _rtld works, we take the easy way
1010 * out and steal 32 bytes before we call copyargs.
1011 * This extra space was allowed for when 'len' was calculated.
1012 */
1013 stack += RTLD_GAP;
1014 #endif /* __MACHINE_STACK_GROWS_UP */
1015
1016 /* Now copy argc, args & environ to new stack */
1017 error = (*pack.ep_esch->es_copyargs)(l, &pack, &arginfo, &stack, argp);
1018 if (pack.ep_path) {
1019 PNBUF_PUT(pack.ep_path);
1020 pack.ep_path = NULL;
1021 }
1022 if (error) {
1023 DPRINTF(("%s: copyargs failed %d\n", __func__, error));
1024 goto exec_abort;
1025 }
1026 /* Move the stack back to original point */
1027 stack = (char *)STACK_GROW(vm->vm_minsaddr, len);
1028
1029 /* fill process ps_strings info */
1030 p->p_psstrp = (vaddr_t)STACK_ALLOC(STACK_GROW(vm->vm_minsaddr,
1031 STACK_PTHREADSPACE), ps_strings_sz);
1032
1033 if (pack.ep_flags & EXEC_32) {
1034 arginfo32.ps_argvstr = (vaddr_t)arginfo.ps_argvstr;
1035 arginfo32.ps_nargvstr = arginfo.ps_nargvstr;
1036 arginfo32.ps_envstr = (vaddr_t)arginfo.ps_envstr;
1037 arginfo32.ps_nenvstr = arginfo.ps_nenvstr;
1038 }
1039
1040 /* copy out the process's ps_strings structure */
1041 if ((error = copyout(aip, (void *)p->p_psstrp, ps_strings_sz)) != 0) {
1042 DPRINTF(("%s: ps_strings copyout %p->%p size %zu failed\n",
1043 __func__, aip, (void *)p->p_psstrp, ps_strings_sz));
1044 goto exec_abort;
1045 }
1046
1047 cwdexec(p);
1048 fd_closeexec(); /* handle close on exec */
1049
1050 if (__predict_false(ktrace_on))
1051 fd_ktrexecfd();
1052
1053 execsigs(p); /* reset catched signals */
1054
1055 l->l_ctxlink = NULL; /* reset ucontext link */
1056
1057
1058 p->p_acflag &= ~AFORK;
1059 mutex_enter(p->p_lock);
1060 p->p_flag |= PK_EXEC;
1061 mutex_exit(p->p_lock);
1062
1063 /*
1064 * Stop profiling.
1065 */
1066 if ((p->p_stflag & PST_PROFIL) != 0) {
1067 mutex_spin_enter(&p->p_stmutex);
1068 stopprofclock(p);
1069 mutex_spin_exit(&p->p_stmutex);
1070 }
1071
1072 /*
1073 * It's OK to test PL_PPWAIT unlocked here, as other LWPs have
1074 * exited and exec()/exit() are the only places it will be cleared.
1075 */
1076 if ((p->p_lflag & PL_PPWAIT) != 0) {
1077 mutex_enter(proc_lock);
1078 l->l_lwpctl = NULL; /* was on loan from blocked parent */
1079 p->p_lflag &= ~PL_PPWAIT;
1080 cv_broadcast(&p->p_pptr->p_waitcv);
1081 mutex_exit(proc_lock);
1082 }
1083
1084 /*
1085 * Deal with set[ug]id. MNT_NOSUID has already been used to disable
1086 * s[ug]id. It's OK to check for PSL_TRACED here as we have blocked
1087 * out additional references on the process for the moment.
1088 */
1089 if ((p->p_slflag & PSL_TRACED) == 0 &&
1090
1091 (((attr.va_mode & S_ISUID) != 0 &&
1092 kauth_cred_geteuid(l->l_cred) != attr.va_uid) ||
1093
1094 ((attr.va_mode & S_ISGID) != 0 &&
1095 kauth_cred_getegid(l->l_cred) != attr.va_gid))) {
1096 /*
1097 * Mark the process as SUGID before we do
1098 * anything that might block.
1099 */
1100 proc_crmod_enter();
1101 proc_crmod_leave(NULL, NULL, true);
1102
1103 /* Make sure file descriptors 0..2 are in use. */
1104 if ((error = fd_checkstd()) != 0) {
1105 DPRINTF(("%s: fdcheckstd failed %d\n",
1106 __func__, error));
1107 goto exec_abort;
1108 }
1109
1110 /*
1111 * Copy the credential so other references don't see our
1112 * changes.
1113 */
1114 l->l_cred = kauth_cred_copy(l->l_cred);
1115 #ifdef KTRACE
1116 /*
1117 * If the persistent trace flag isn't set, turn off.
1118 */
1119 if (p->p_tracep) {
1120 mutex_enter(&ktrace_lock);
1121 if (!(p->p_traceflag & KTRFAC_PERSISTENT))
1122 ktrderef(p);
1123 mutex_exit(&ktrace_lock);
1124 }
1125 #endif
1126 if (attr.va_mode & S_ISUID)
1127 kauth_cred_seteuid(l->l_cred, attr.va_uid);
1128 if (attr.va_mode & S_ISGID)
1129 kauth_cred_setegid(l->l_cred, attr.va_gid);
1130 } else {
1131 if (kauth_cred_geteuid(l->l_cred) ==
1132 kauth_cred_getuid(l->l_cred) &&
1133 kauth_cred_getegid(l->l_cred) ==
1134 kauth_cred_getgid(l->l_cred))
1135 p->p_flag &= ~PK_SUGID;
1136 }
1137
1138 /*
1139 * Copy the credential so other references don't see our changes.
1140 * Test to see if this is necessary first, since in the common case
1141 * we won't need a private reference.
1142 */
1143 if (kauth_cred_geteuid(l->l_cred) != kauth_cred_getsvuid(l->l_cred) ||
1144 kauth_cred_getegid(l->l_cred) != kauth_cred_getsvgid(l->l_cred)) {
1145 l->l_cred = kauth_cred_copy(l->l_cred);
1146 kauth_cred_setsvuid(l->l_cred, kauth_cred_geteuid(l->l_cred));
1147 kauth_cred_setsvgid(l->l_cred, kauth_cred_getegid(l->l_cred));
1148 }
1149
1150 /* Update the master credentials. */
1151 if (l->l_cred != p->p_cred) {
1152 kauth_cred_t ocred;
1153
1154 kauth_cred_hold(l->l_cred);
1155 mutex_enter(p->p_lock);
1156 ocred = p->p_cred;
1157 p->p_cred = l->l_cred;
1158 mutex_exit(p->p_lock);
1159 kauth_cred_free(ocred);
1160 }
1161
1162 #if defined(__HAVE_RAS)
1163 /*
1164 * Remove all RASs from the address space.
1165 */
1166 ras_purgeall();
1167 #endif
1168
1169 doexechooks(p);
1170
1171 /* setup new registers and do misc. setup. */
1172 (*pack.ep_esch->es_emul->e_setregs)(l, &pack, (vaddr_t)stack);
1173 if (pack.ep_esch->es_setregs)
1174 (*pack.ep_esch->es_setregs)(l, &pack, (vaddr_t)stack);
1175
1176 /* Provide a consistent LWP private setting */
1177 (void)lwp_setprivate(l, NULL);
1178
1179 /* Discard all PCU state; need to start fresh */
1180 pcu_discard_all(l);
1181
1182 /* map the process's signal trampoline code */
1183 if ((error = exec_sigcode_map(p, pack.ep_esch->es_emul)) != 0) {
1184 DPRINTF(("%s: map sigcode failed %d\n", __func__, error));
1185 goto exec_abort;
1186 }
1187
1188 pool_put(&exec_pool, argp);
1189
1190 /* notify others that we exec'd */
1191 KNOTE(&p->p_klist, NOTE_EXEC);
1192
1193 kmem_free(pack.ep_hdr, pack.ep_hdrlen);
1194
1195 SDT_PROBE(proc,,,exec_success, path, 0, 0, 0, 0);
1196
1197 /* The emulation root will usually have been found when we looked
1198 * for the elf interpreter (or similar), if not look now. */
1199 if (pack.ep_esch->es_emul->e_path != NULL && pack.ep_emul_root == NULL)
1200 emul_find_root(l, &pack);
1201
1202 /* Any old emulation root got removed by fdcloseexec */
1203 rw_enter(&p->p_cwdi->cwdi_lock, RW_WRITER);
1204 p->p_cwdi->cwdi_edir = pack.ep_emul_root;
1205 rw_exit(&p->p_cwdi->cwdi_lock);
1206 pack.ep_emul_root = NULL;
1207 if (pack.ep_interp != NULL)
1208 vrele(pack.ep_interp);
1209
1210 /*
1211 * Call emulation specific exec hook. This can setup per-process
1212 * p->p_emuldata or do any other per-process stuff an emulation needs.
1213 *
1214 * If we are executing process of different emulation than the
1215 * original forked process, call e_proc_exit() of the old emulation
1216 * first, then e_proc_exec() of new emulation. If the emulation is
1217 * same, the exec hook code should deallocate any old emulation
1218 * resources held previously by this process.
1219 */
1220 if (p->p_emul && p->p_emul->e_proc_exit
1221 && p->p_emul != pack.ep_esch->es_emul)
1222 (*p->p_emul->e_proc_exit)(p);
1223
1224 /*
1225 * This is now LWP 1.
1226 */
1227 mutex_enter(p->p_lock);
1228 p->p_nlwpid = 1;
1229 l->l_lid = 1;
1230 mutex_exit(p->p_lock);
1231
1232 /*
1233 * Call exec hook. Emulation code may NOT store reference to anything
1234 * from &pack.
1235 */
1236 if (pack.ep_esch->es_emul->e_proc_exec)
1237 (*pack.ep_esch->es_emul->e_proc_exec)(p, &pack);
1238
1239 /* update p_emul, the old value is no longer needed */
1240 p->p_emul = pack.ep_esch->es_emul;
1241
1242 /* ...and the same for p_execsw */
1243 p->p_execsw = pack.ep_esch;
1244
1245 #ifdef __HAVE_SYSCALL_INTERN
1246 (*p->p_emul->e_syscall_intern)(p);
1247 #endif
1248 ktremul();
1249
1250 /* Allow new references from the debugger/procfs. */
1251 rw_exit(&p->p_reflock);
1252 rw_exit(&exec_lock);
1253
1254 mutex_enter(proc_lock);
1255
1256 if ((p->p_slflag & (PSL_TRACED|PSL_SYSCALL)) == PSL_TRACED) {
1257 KSI_INIT_EMPTY(&ksi);
1258 ksi.ksi_signo = SIGTRAP;
1259 ksi.ksi_lid = l->l_lid;
1260 kpsignal(p, &ksi, NULL);
1261 }
1262
1263 if (p->p_sflag & PS_STOPEXEC) {
1264 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
1265 p->p_pptr->p_nstopchild++;
1266 p->p_pptr->p_waited = 0;
1267 mutex_enter(p->p_lock);
1268 ksiginfo_queue_init(&kq);
1269 sigclearall(p, &contsigmask, &kq);
1270 lwp_lock(l);
1271 l->l_stat = LSSTOP;
1272 p->p_stat = SSTOP;
1273 p->p_nrlwps--;
1274 lwp_unlock(l);
1275 mutex_exit(p->p_lock);
1276 mutex_exit(proc_lock);
1277 lwp_lock(l);
1278 mi_switch(l);
1279 ksiginfo_queue_drain(&kq);
1280 KERNEL_LOCK(l->l_biglocks, l);
1281 } else {
1282 mutex_exit(proc_lock);
1283 }
1284
1285 pathbuf_stringcopy_put(pb, pathstring);
1286 pathbuf_destroy(pb);
1287 PNBUF_PUT(resolvedpathbuf);
1288 DPRINTF(("%s finished\n", __func__));
1289 return (EJUSTRETURN);
1290
1291 bad:
1292 /* free the vmspace-creation commands, and release their references */
1293 kill_vmcmds(&pack.ep_vmcmds);
1294 /* kill any opened file descriptor, if necessary */
1295 if (pack.ep_flags & EXEC_HASFD) {
1296 pack.ep_flags &= ~EXEC_HASFD;
1297 fd_close(pack.ep_fd);
1298 }
1299 /* close and put the exec'd file */
1300 vn_lock(pack.ep_vp, LK_EXCLUSIVE | LK_RETRY);
1301 VOP_CLOSE(pack.ep_vp, FREAD, l->l_cred);
1302 vput(pack.ep_vp);
1303 pool_put(&exec_pool, argp);
1304
1305 freehdr:
1306 kmem_free(pack.ep_hdr, pack.ep_hdrlen);
1307 if (pack.ep_emul_root != NULL)
1308 vrele(pack.ep_emul_root);
1309 if (pack.ep_interp != NULL)
1310 vrele(pack.ep_interp);
1311
1312 rw_exit(&exec_lock);
1313
1314 pathbuf_stringcopy_put(pb, pathstring);
1315 pathbuf_destroy(pb);
1316 PNBUF_PUT(resolvedpathbuf);
1317
1318 clrflg:
1319 lwp_lock(l);
1320 l->l_flag |= oldlwpflags;
1321 lwp_unlock(l);
1322 rw_exit(&p->p_reflock);
1323
1324 if (modgen != module_gen && error == ENOEXEC) {
1325 modgen = module_gen;
1326 exec_autoload();
1327 goto retry;
1328 }
1329
1330 SDT_PROBE(proc,,,exec_failure, error, 0, 0, 0, 0);
1331 return error;
1332
1333 exec_abort:
1334 SDT_PROBE(proc,,,exec_failure, error, 0, 0, 0, 0);
1335 rw_exit(&p->p_reflock);
1336 rw_exit(&exec_lock);
1337
1338 pathbuf_stringcopy_put(pb, pathstring);
1339 pathbuf_destroy(pb);
1340 PNBUF_PUT(resolvedpathbuf);
1341
1342 /*
1343 * the old process doesn't exist anymore. exit gracefully.
1344 * get rid of the (new) address space we have created, if any, get rid
1345 * of our namei data and vnode, and exit noting failure
1346 */
1347 uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS,
1348 VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS);
1349 if (pack.ep_emul_arg)
1350 free(pack.ep_emul_arg, M_TEMP);
1351 pool_put(&exec_pool, argp);
1352 kmem_free(pack.ep_hdr, pack.ep_hdrlen);
1353 if (pack.ep_emul_root != NULL)
1354 vrele(pack.ep_emul_root);
1355 if (pack.ep_interp != NULL)
1356 vrele(pack.ep_interp);
1357
1358 /* Acquire the sched-state mutex (exit1() will release it). */
1359 mutex_enter(p->p_lock);
1360 exit1(l, W_EXITCODE(error, SIGABRT));
1361
1362 /* NOTREACHED */
1363 return 0;
1364 }
1365
1366 int
1367 copyargs(struct lwp *l, struct exec_package *pack, struct ps_strings *arginfo,
1368 char **stackp, void *argp)
1369 {
1370 char **cpp, *dp, *sp;
1371 size_t len;
1372 void *nullp;
1373 long argc, envc;
1374 int error;
1375
1376 cpp = (char **)*stackp;
1377 nullp = NULL;
1378 argc = arginfo->ps_nargvstr;
1379 envc = arginfo->ps_nenvstr;
1380 if ((error = copyout(&argc, cpp++, sizeof(argc))) != 0) {
1381 COPYPRINTF("", cpp - 1, sizeof(argc));
1382 return error;
1383 }
1384
1385 dp = (char *) (cpp + argc + envc + 2 + pack->ep_esch->es_arglen);
1386 sp = argp;
1387
1388 /* XXX don't copy them out, remap them! */
1389 arginfo->ps_argvstr = cpp; /* remember location of argv for later */
1390
1391 for (; --argc >= 0; sp += len, dp += len) {
1392 if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0) {
1393 COPYPRINTF("", cpp - 1, sizeof(dp));
1394 return error;
1395 }
1396 if ((error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0) {
1397 COPYPRINTF("str", dp, (size_t)ARG_MAX);
1398 return error;
1399 }
1400 }
1401
1402 if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0) {
1403 COPYPRINTF("", cpp - 1, sizeof(nullp));
1404 return error;
1405 }
1406
1407 arginfo->ps_envstr = cpp; /* remember location of envp for later */
1408
1409 for (; --envc >= 0; sp += len, dp += len) {
1410 if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0) {
1411 COPYPRINTF("", cpp - 1, sizeof(dp));
1412 return error;
1413 }
1414 if ((error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0) {
1415 COPYPRINTF("str", dp, (size_t)ARG_MAX);
1416 return error;
1417 }
1418 }
1419
1420 if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0) {
1421 COPYPRINTF("", cpp - 1, sizeof(nullp));
1422 return error;
1423 }
1424
1425 *stackp = (char *)cpp;
1426 return 0;
1427 }
1428
1429
1430 /*
1431 * Add execsw[] entries.
1432 */
1433 int
1434 exec_add(struct execsw *esp, int count)
1435 {
1436 struct exec_entry *it;
1437 int i;
1438
1439 if (count == 0) {
1440 return 0;
1441 }
1442
1443 /* Check for duplicates. */
1444 rw_enter(&exec_lock, RW_WRITER);
1445 for (i = 0; i < count; i++) {
1446 LIST_FOREACH(it, &ex_head, ex_list) {
1447 /* assume unique (makecmds, probe_func, emulation) */
1448 if (it->ex_sw->es_makecmds == esp[i].es_makecmds &&
1449 it->ex_sw->u.elf_probe_func ==
1450 esp[i].u.elf_probe_func &&
1451 it->ex_sw->es_emul == esp[i].es_emul) {
1452 rw_exit(&exec_lock);
1453 return EEXIST;
1454 }
1455 }
1456 }
1457
1458 /* Allocate new entries. */
1459 for (i = 0; i < count; i++) {
1460 it = kmem_alloc(sizeof(*it), KM_SLEEP);
1461 it->ex_sw = &esp[i];
1462 LIST_INSERT_HEAD(&ex_head, it, ex_list);
1463 }
1464
1465 /* update execsw[] */
1466 exec_init(0);
1467 rw_exit(&exec_lock);
1468 return 0;
1469 }
1470
1471 /*
1472 * Remove execsw[] entry.
1473 */
1474 int
1475 exec_remove(struct execsw *esp, int count)
1476 {
1477 struct exec_entry *it, *next;
1478 int i;
1479 const struct proclist_desc *pd;
1480 proc_t *p;
1481
1482 if (count == 0) {
1483 return 0;
1484 }
1485
1486 /* Abort if any are busy. */
1487 rw_enter(&exec_lock, RW_WRITER);
1488 for (i = 0; i < count; i++) {
1489 mutex_enter(proc_lock);
1490 for (pd = proclists; pd->pd_list != NULL; pd++) {
1491 PROCLIST_FOREACH(p, pd->pd_list) {
1492 if (p->p_execsw == &esp[i]) {
1493 mutex_exit(proc_lock);
1494 rw_exit(&exec_lock);
1495 return EBUSY;
1496 }
1497 }
1498 }
1499 mutex_exit(proc_lock);
1500 }
1501
1502 /* None are busy, so remove them all. */
1503 for (i = 0; i < count; i++) {
1504 for (it = LIST_FIRST(&ex_head); it != NULL; it = next) {
1505 next = LIST_NEXT(it, ex_list);
1506 if (it->ex_sw == &esp[i]) {
1507 LIST_REMOVE(it, ex_list);
1508 kmem_free(it, sizeof(*it));
1509 break;
1510 }
1511 }
1512 }
1513
1514 /* update execsw[] */
1515 exec_init(0);
1516 rw_exit(&exec_lock);
1517 return 0;
1518 }
1519
1520 /*
1521 * Initialize exec structures. If init_boot is true, also does necessary
1522 * one-time initialization (it's called from main() that way).
1523 * Once system is multiuser, this should be called with exec_lock held,
1524 * i.e. via exec_{add|remove}().
1525 */
1526 int
1527 exec_init(int init_boot)
1528 {
1529 const struct execsw **sw;
1530 struct exec_entry *ex;
1531 SLIST_HEAD(,exec_entry) first;
1532 SLIST_HEAD(,exec_entry) any;
1533 SLIST_HEAD(,exec_entry) last;
1534 int i, sz;
1535
1536 if (init_boot) {
1537 /* do one-time initializations */
1538 rw_init(&exec_lock);
1539 mutex_init(&sigobject_lock, MUTEX_DEFAULT, IPL_NONE);
1540 pool_init(&exec_pool, NCARGS, 0, 0, PR_NOALIGN|PR_NOTOUCH,
1541 "execargs", &exec_palloc, IPL_NONE);
1542 pool_sethardlimit(&exec_pool, maxexec, "should not happen", 0);
1543 } else {
1544 KASSERT(rw_write_held(&exec_lock));
1545 }
1546
1547 /* Sort each entry onto the appropriate queue. */
1548 SLIST_INIT(&first);
1549 SLIST_INIT(&any);
1550 SLIST_INIT(&last);
1551 sz = 0;
1552 LIST_FOREACH(ex, &ex_head, ex_list) {
1553 switch(ex->ex_sw->es_prio) {
1554 case EXECSW_PRIO_FIRST:
1555 SLIST_INSERT_HEAD(&first, ex, ex_slist);
1556 break;
1557 case EXECSW_PRIO_ANY:
1558 SLIST_INSERT_HEAD(&any, ex, ex_slist);
1559 break;
1560 case EXECSW_PRIO_LAST:
1561 SLIST_INSERT_HEAD(&last, ex, ex_slist);
1562 break;
1563 default:
1564 panic("%s", __func__);
1565 break;
1566 }
1567 sz++;
1568 }
1569
1570 /*
1571 * Create new execsw[]. Ensure we do not try a zero-sized
1572 * allocation.
1573 */
1574 sw = kmem_alloc(sz * sizeof(struct execsw *) + 1, KM_SLEEP);
1575 i = 0;
1576 SLIST_FOREACH(ex, &first, ex_slist) {
1577 sw[i++] = ex->ex_sw;
1578 }
1579 SLIST_FOREACH(ex, &any, ex_slist) {
1580 sw[i++] = ex->ex_sw;
1581 }
1582 SLIST_FOREACH(ex, &last, ex_slist) {
1583 sw[i++] = ex->ex_sw;
1584 }
1585
1586 /* Replace old execsw[] and free used memory. */
1587 if (execsw != NULL) {
1588 kmem_free(__UNCONST(execsw),
1589 nexecs * sizeof(struct execsw *) + 1);
1590 }
1591 execsw = sw;
1592 nexecs = sz;
1593
1594 /* Figure out the maximum size of an exec header. */
1595 exec_maxhdrsz = sizeof(int);
1596 for (i = 0; i < nexecs; i++) {
1597 if (execsw[i]->es_hdrsz > exec_maxhdrsz)
1598 exec_maxhdrsz = execsw[i]->es_hdrsz;
1599 }
1600
1601 return 0;
1602 }
1603
1604 static int
1605 exec_sigcode_map(struct proc *p, const struct emul *e)
1606 {
1607 vaddr_t va;
1608 vsize_t sz;
1609 int error;
1610 struct uvm_object *uobj;
1611
1612 sz = (vaddr_t)e->e_esigcode - (vaddr_t)e->e_sigcode;
1613
1614 if (e->e_sigobject == NULL || sz == 0) {
1615 return 0;
1616 }
1617
1618 /*
1619 * If we don't have a sigobject for this emulation, create one.
1620 *
1621 * sigobject is an anonymous memory object (just like SYSV shared
1622 * memory) that we keep a permanent reference to and that we map
1623 * in all processes that need this sigcode. The creation is simple,
1624 * we create an object, add a permanent reference to it, map it in
1625 * kernel space, copy out the sigcode to it and unmap it.
1626 * We map it with PROT_READ|PROT_EXEC into the process just
1627 * the way sys_mmap() would map it.
1628 */
1629
1630 uobj = *e->e_sigobject;
1631 if (uobj == NULL) {
1632 mutex_enter(&sigobject_lock);
1633 if ((uobj = *e->e_sigobject) == NULL) {
1634 uobj = uao_create(sz, 0);
1635 (*uobj->pgops->pgo_reference)(uobj);
1636 va = vm_map_min(kernel_map);
1637 if ((error = uvm_map(kernel_map, &va, round_page(sz),
1638 uobj, 0, 0,
1639 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
1640 UVM_INH_SHARE, UVM_ADV_RANDOM, 0)))) {
1641 printf("kernel mapping failed %d\n", error);
1642 (*uobj->pgops->pgo_detach)(uobj);
1643 mutex_exit(&sigobject_lock);
1644 return (error);
1645 }
1646 memcpy((void *)va, e->e_sigcode, sz);
1647 #ifdef PMAP_NEED_PROCWR
1648 pmap_procwr(&proc0, va, sz);
1649 #endif
1650 uvm_unmap(kernel_map, va, va + round_page(sz));
1651 *e->e_sigobject = uobj;
1652 }
1653 mutex_exit(&sigobject_lock);
1654 }
1655
1656 /* Just a hint to uvm_map where to put it. */
1657 va = e->e_vm_default_addr(p, (vaddr_t)p->p_vmspace->vm_daddr,
1658 round_page(sz));
1659
1660 #ifdef __alpha__
1661 /*
1662 * Tru64 puts /sbin/loader at the end of user virtual memory,
1663 * which causes the above calculation to put the sigcode at
1664 * an invalid address. Put it just below the text instead.
1665 */
1666 if (va == (vaddr_t)vm_map_max(&p->p_vmspace->vm_map)) {
1667 va = (vaddr_t)p->p_vmspace->vm_taddr - round_page(sz);
1668 }
1669 #endif
1670
1671 (*uobj->pgops->pgo_reference)(uobj);
1672 error = uvm_map(&p->p_vmspace->vm_map, &va, round_page(sz),
1673 uobj, 0, 0,
1674 UVM_MAPFLAG(UVM_PROT_RX, UVM_PROT_RX, UVM_INH_SHARE,
1675 UVM_ADV_RANDOM, 0));
1676 if (error) {
1677 DPRINTF(("%s, %d: map %p "
1678 "uvm_map %#"PRIxVSIZE"@%#"PRIxVADDR" failed %d\n",
1679 __func__, __LINE__, &p->p_vmspace->vm_map, round_page(sz),
1680 va, error));
1681 (*uobj->pgops->pgo_detach)(uobj);
1682 return (error);
1683 }
1684 p->p_sigctx.ps_sigcode = (void *)va;
1685 return (0);
1686 }
1687