kern_exec.c revision 1.279 1 /* $NetBSD: kern_exec.c,v 1.279 2008/10/15 06:51:20 wrstuden Exp $ */
2
3 /*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*-
30 * Copyright (C) 1993, 1994, 1996 Christopher G. Demetriou
31 * Copyright (C) 1992 Wolfgang Solfrank.
32 * Copyright (C) 1992 TooLs GmbH.
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by TooLs GmbH.
46 * 4. The name of TooLs GmbH may not be used to endorse or promote products
47 * derived from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
53 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
54 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
55 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
56 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
57 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
58 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */
60
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: kern_exec.c,v 1.279 2008/10/15 06:51:20 wrstuden Exp $");
63
64 #include "opt_ktrace.h"
65 #include "opt_syscall_debug.h"
66 #include "opt_compat_netbsd.h"
67 #include "veriexec.h"
68 #include "opt_pax.h"
69 #include "opt_sa.h"
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/filedesc.h>
74 #include <sys/kernel.h>
75 #include <sys/proc.h>
76 #include <sys/mount.h>
77 #include <sys/malloc.h>
78 #include <sys/kmem.h>
79 #include <sys/namei.h>
80 #include <sys/vnode.h>
81 #include <sys/file.h>
82 #include <sys/acct.h>
83 #include <sys/exec.h>
84 #include <sys/ktrace.h>
85 #include <sys/uidinfo.h>
86 #include <sys/wait.h>
87 #include <sys/mman.h>
88 #include <sys/ras.h>
89 #include <sys/signalvar.h>
90 #include <sys/stat.h>
91 #include <sys/syscall.h>
92 #include <sys/kauth.h>
93 #include <sys/lwpctl.h>
94 #include <sys/pax.h>
95 #include <sys/cpu.h>
96
97 #include <sys/sa.h>
98 #include <sys/savar.h>
99 #include <sys/syscallargs.h>
100 #if NVERIEXEC > 0
101 #include <sys/verified_exec.h>
102 #endif /* NVERIEXEC > 0 */
103
104 #include <uvm/uvm_extern.h>
105
106 #include <machine/reg.h>
107
108 #include <compat/common/compat_util.h>
109
110 static int exec_sigcode_map(struct proc *, const struct emul *);
111
112 #ifdef DEBUG_EXEC
113 #define DPRINTF(a) uprintf a
114 #else
115 #define DPRINTF(a)
116 #endif /* DEBUG_EXEC */
117
118 /*
119 * Exec function switch:
120 *
121 * Note that each makecmds function is responsible for loading the
122 * exec package with the necessary functions for any exec-type-specific
123 * handling.
124 *
125 * Functions for specific exec types should be defined in their own
126 * header file.
127 */
128 extern const struct execsw execsw_builtin[];
129 extern int nexecs_builtin;
130 static const struct execsw **execsw = NULL;
131 static int nexecs;
132
133 u_int exec_maxhdrsz; /* must not be static - netbsd32 needs it */
134
135 #ifdef LKM
136 /* list of supported emulations */
137 static
138 LIST_HEAD(emlist_head, emul_entry) el_head = LIST_HEAD_INITIALIZER(el_head);
139 struct emul_entry {
140 LIST_ENTRY(emul_entry) el_list;
141 const struct emul *el_emul;
142 int ro_entry;
143 };
144
145 /* list of dynamically loaded execsw entries */
146 static
147 LIST_HEAD(execlist_head, exec_entry) ex_head = LIST_HEAD_INITIALIZER(ex_head);
148 struct exec_entry {
149 LIST_ENTRY(exec_entry) ex_list;
150 const struct execsw *es;
151 };
152
153 /* structure used for building execw[] */
154 struct execsw_entry {
155 struct execsw_entry *next;
156 const struct execsw *es;
157 };
158 #endif /* LKM */
159
160 #ifdef SYSCALL_DEBUG
161 extern const char * const syscallnames[];
162 #endif
163
164 #ifdef COMPAT_16
165 extern char sigcode[], esigcode[];
166 struct uvm_object *emul_netbsd_object;
167 #endif
168
169 #ifndef __HAVE_SYSCALL_INTERN
170 void syscall(void);
171 #endif
172
173 static const struct sa_emul saemul_netbsd = {
174 sizeof(ucontext_t),
175 sizeof(struct sa_t),
176 sizeof(struct sa_t *),
177 NULL,
178 NULL,
179 cpu_upcall,
180 (void (*)(struct lwp *, void *))getucontext_sa,
181 #ifdef KERN_SA
182 sa_ucsp
183 #else
184 NULL
185 #endif
186 };
187
188 /* NetBSD emul struct */
189 const struct emul emul_netbsd = {
190 "netbsd",
191 NULL, /* emulation path */
192 #ifndef __HAVE_MINIMAL_EMUL
193 EMUL_HAS_SYS___syscall,
194 NULL,
195 SYS_syscall,
196 SYS_NSYSENT,
197 #endif
198 sysent,
199 #ifdef SYSCALL_DEBUG
200 syscallnames,
201 #else
202 NULL,
203 #endif
204 sendsig,
205 trapsignal,
206 NULL,
207 #ifdef COMPAT_16
208 sigcode,
209 esigcode,
210 &emul_netbsd_object,
211 #else
212 NULL,
213 NULL,
214 NULL,
215 #endif
216 setregs,
217 NULL,
218 NULL,
219 NULL,
220 NULL,
221 NULL,
222 #ifdef __HAVE_SYSCALL_INTERN
223 syscall_intern,
224 #else
225 syscall,
226 #endif
227 NULL,
228 NULL,
229
230 uvm_default_mapaddr,
231 NULL,
232 &saemul_netbsd,
233 sizeof(ucontext_t),
234 startlwp,
235 };
236
237 #ifdef LKM
238 /*
239 * Exec lock. Used to control access to execsw[] structures.
240 * This must not be static so that netbsd32 can access it, too.
241 */
242 krwlock_t exec_lock;
243
244 static void link_es(struct execsw_entry **, const struct execsw *);
245 #endif /* LKM */
246
247 static kmutex_t sigobject_lock;
248
249 static void *
250 exec_pool_alloc(struct pool *pp, int flags)
251 {
252
253 return (void *)uvm_km_alloc(kernel_map, NCARGS, 0,
254 UVM_KMF_PAGEABLE | UVM_KMF_WAITVA);
255 }
256
257 static void
258 exec_pool_free(struct pool *pp, void *addr)
259 {
260
261 uvm_km_free(kernel_map, (vaddr_t)addr, NCARGS, UVM_KMF_PAGEABLE);
262 }
263
264 static struct pool exec_pool;
265
266 static struct pool_allocator exec_palloc = {
267 .pa_alloc = exec_pool_alloc,
268 .pa_free = exec_pool_free,
269 .pa_pagesz = NCARGS
270 };
271
272 /*
273 * check exec:
274 * given an "executable" described in the exec package's namei info,
275 * see what we can do with it.
276 *
277 * ON ENTRY:
278 * exec package with appropriate namei info
279 * lwp pointer of exec'ing lwp
280 * NO SELF-LOCKED VNODES
281 *
282 * ON EXIT:
283 * error: nothing held, etc. exec header still allocated.
284 * ok: filled exec package, executable's vnode (unlocked).
285 *
286 * EXEC SWITCH ENTRY:
287 * Locked vnode to check, exec package, proc.
288 *
289 * EXEC SWITCH EXIT:
290 * ok: return 0, filled exec package, executable's vnode (unlocked).
291 * error: destructive:
292 * everything deallocated execept exec header.
293 * non-destructive:
294 * error code, executable's vnode (unlocked),
295 * exec header unmodified.
296 */
297 int
298 /*ARGSUSED*/
299 check_exec(struct lwp *l, struct exec_package *epp)
300 {
301 int error, i;
302 struct vnode *vp;
303 struct nameidata *ndp;
304 size_t resid;
305
306 ndp = epp->ep_ndp;
307 ndp->ni_cnd.cn_nameiop = LOOKUP;
308 ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF | SAVENAME | TRYEMULROOT;
309 /* first get the vnode */
310 if ((error = namei(ndp)) != 0)
311 return error;
312 epp->ep_vp = vp = ndp->ni_vp;
313
314 /* check access and type */
315 if (vp->v_type != VREG) {
316 error = EACCES;
317 goto bad1;
318 }
319 if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred)) != 0)
320 goto bad1;
321
322 /* get attributes */
323 if ((error = VOP_GETATTR(vp, epp->ep_vap, l->l_cred)) != 0)
324 goto bad1;
325
326 /* Check mount point */
327 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
328 error = EACCES;
329 goto bad1;
330 }
331 if (vp->v_mount->mnt_flag & MNT_NOSUID)
332 epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID);
333
334 /* try to open it */
335 if ((error = VOP_OPEN(vp, FREAD, l->l_cred)) != 0)
336 goto bad1;
337
338 /* unlock vp, since we need it unlocked from here on out. */
339 VOP_UNLOCK(vp, 0);
340
341 #if NVERIEXEC > 0
342 error = veriexec_verify(l, vp, ndp->ni_cnd.cn_pnbuf,
343 epp->ep_flags & EXEC_INDIR ? VERIEXEC_INDIRECT : VERIEXEC_DIRECT,
344 NULL);
345 if (error)
346 goto bad2;
347 #endif /* NVERIEXEC > 0 */
348
349 #ifdef PAX_SEGVGUARD
350 error = pax_segvguard(l, vp, ndp->ni_cnd.cn_pnbuf, false);
351 if (error)
352 goto bad2;
353 #endif /* PAX_SEGVGUARD */
354
355 /* now we have the file, get the exec header */
356 error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0,
357 UIO_SYSSPACE, 0, l->l_cred, &resid, NULL);
358 if (error)
359 goto bad2;
360 epp->ep_hdrvalid = epp->ep_hdrlen - resid;
361
362 /*
363 * Set up default address space limits. Can be overridden
364 * by individual exec packages.
365 *
366 * XXX probably should be all done in the exec packages.
367 */
368 epp->ep_vm_minaddr = VM_MIN_ADDRESS;
369 epp->ep_vm_maxaddr = VM_MAXUSER_ADDRESS;
370 /*
371 * set up the vmcmds for creation of the process
372 * address space
373 */
374 error = ENOEXEC;
375 for (i = 0; i < nexecs; i++) {
376 int newerror;
377
378 epp->ep_esch = execsw[i];
379 newerror = (*execsw[i]->es_makecmds)(l, epp);
380
381 if (!newerror) {
382 /* Seems ok: check that entry point is sane */
383 if (epp->ep_entry > VM_MAXUSER_ADDRESS) {
384 error = ENOEXEC;
385 break;
386 }
387
388 /* check limits */
389 if ((epp->ep_tsize > MAXTSIZ) ||
390 (epp->ep_dsize > (u_quad_t)l->l_proc->p_rlimit
391 [RLIMIT_DATA].rlim_cur)) {
392 error = ENOMEM;
393 break;
394 }
395 return 0;
396 }
397
398 if (epp->ep_emul_root != NULL) {
399 vrele(epp->ep_emul_root);
400 epp->ep_emul_root = NULL;
401 }
402 if (epp->ep_interp != NULL) {
403 vrele(epp->ep_interp);
404 epp->ep_interp = NULL;
405 }
406
407 /* make sure the first "interesting" error code is saved. */
408 if (error == ENOEXEC)
409 error = newerror;
410
411 if (epp->ep_flags & EXEC_DESTR)
412 /* Error from "#!" code, tidied up by recursive call */
413 return error;
414 }
415
416 /* not found, error */
417
418 /*
419 * free any vmspace-creation commands,
420 * and release their references
421 */
422 kill_vmcmds(&epp->ep_vmcmds);
423
424 bad2:
425 /*
426 * close and release the vnode, restore the old one, free the
427 * pathname buf, and punt.
428 */
429 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
430 VOP_CLOSE(vp, FREAD, l->l_cred);
431 vput(vp);
432 PNBUF_PUT(ndp->ni_cnd.cn_pnbuf);
433 return error;
434
435 bad1:
436 /*
437 * free the namei pathname buffer, and put the vnode
438 * (which we don't yet have open).
439 */
440 vput(vp); /* was still locked */
441 PNBUF_PUT(ndp->ni_cnd.cn_pnbuf);
442 return error;
443 }
444
445 #ifdef __MACHINE_STACK_GROWS_UP
446 #define STACK_PTHREADSPACE NBPG
447 #else
448 #define STACK_PTHREADSPACE 0
449 #endif
450
451 static int
452 execve_fetch_element(char * const *array, size_t index, char **value)
453 {
454 return copyin(array + index, value, sizeof(*value));
455 }
456
457 /*
458 * exec system call
459 */
460 /* ARGSUSED */
461 int
462 sys_execve(struct lwp *l, const struct sys_execve_args *uap, register_t *retval)
463 {
464 /* {
465 syscallarg(const char *) path;
466 syscallarg(char * const *) argp;
467 syscallarg(char * const *) envp;
468 } */
469
470 return execve1(l, SCARG(uap, path), SCARG(uap, argp),
471 SCARG(uap, envp), execve_fetch_element);
472 }
473
474 int
475 execve1(struct lwp *l, const char *path, char * const *args,
476 char * const *envs, execve_fetch_element_t fetch_element)
477 {
478 int error;
479 struct exec_package pack;
480 struct nameidata nid;
481 struct vattr attr;
482 struct proc *p;
483 char *argp;
484 char *dp, *sp;
485 long argc, envc;
486 size_t i, len;
487 char *stack;
488 struct ps_strings arginfo;
489 struct ps_strings *aip = &arginfo;
490 struct vmspace *vm;
491 struct exec_fakearg *tmpfap;
492 int szsigcode;
493 struct exec_vmcmd *base_vcp;
494 int oldlwpflags;
495 ksiginfo_t ksi;
496 ksiginfoq_t kq;
497 char *pathbuf;
498 size_t pathbuflen;
499
500 p = l->l_proc;
501
502 /*
503 * Check if we have exceeded our number of processes limit.
504 * This is so that we handle the case where a root daemon
505 * forked, ran setuid to become the desired user and is trying
506 * to exec. The obvious place to do the reference counting check
507 * is setuid(), but we don't do the reference counting check there
508 * like other OS's do because then all the programs that use setuid()
509 * must be modified to check the return code of setuid() and exit().
510 * It is dangerous to make setuid() fail, because it fails open and
511 * the program will continue to run as root. If we make it succeed
512 * and return an error code, again we are not enforcing the limit.
513 * The best place to enforce the limit is here, when the process tries
514 * to execute a new image, because eventually the process will need
515 * to call exec in order to do something useful.
516 */
517
518 if ((p->p_flag & PK_SUGID) &&
519 chgproccnt(kauth_cred_getuid(l->l_cred), 0) >
520 p->p_rlimit[RLIMIT_NPROC].rlim_cur)
521 return EAGAIN;
522
523 oldlwpflags = l->l_flag & (LW_SA | LW_SA_UPCALL);
524 if (l->l_flag & LW_SA) {
525 lwp_lock(l);
526 l->l_flag &= ~(LW_SA | LW_SA_UPCALL);
527 lwp_unlock(l);
528 }
529
530 /*
531 * Drain existing references and forbid new ones. The process
532 * should be left alone until we're done here. This is necessary
533 * to avoid race conditions - e.g. in ptrace() - that might allow
534 * a local user to illicitly obtain elevated privileges.
535 */
536 rw_enter(&p->p_reflock, RW_WRITER);
537
538 base_vcp = NULL;
539 /*
540 * Init the namei data to point the file user's program name.
541 * This is done here rather than in check_exec(), so that it's
542 * possible to override this settings if any of makecmd/probe
543 * functions call check_exec() recursively - for example,
544 * see exec_script_makecmds().
545 */
546 pathbuf = PNBUF_GET();
547 error = copyinstr(path, pathbuf, MAXPATHLEN, &pathbuflen);
548 if (error) {
549 DPRINTF(("execve: copyinstr path %d", error));
550 goto clrflg;
551 }
552
553 NDINIT(&nid, LOOKUP, NOFOLLOW | TRYEMULROOT, UIO_SYSSPACE, pathbuf);
554
555 /*
556 * initialize the fields of the exec package.
557 */
558 pack.ep_name = path;
559 pack.ep_hdr = kmem_alloc(exec_maxhdrsz, KM_SLEEP);
560 pack.ep_hdrlen = exec_maxhdrsz;
561 pack.ep_hdrvalid = 0;
562 pack.ep_ndp = &nid;
563 pack.ep_emul_arg = NULL;
564 pack.ep_vmcmds.evs_cnt = 0;
565 pack.ep_vmcmds.evs_used = 0;
566 pack.ep_vap = &attr;
567 pack.ep_flags = 0;
568 pack.ep_emul_root = NULL;
569 pack.ep_interp = NULL;
570 pack.ep_esch = NULL;
571 pack.ep_pax_flags = 0;
572
573 #ifdef LKM
574 rw_enter(&exec_lock, RW_READER);
575 #endif
576
577 /* see if we can run it. */
578 if ((error = check_exec(l, &pack)) != 0) {
579 if (error != ENOENT) {
580 DPRINTF(("execve: check exec failed %d\n", error));
581 }
582 goto freehdr;
583 }
584
585 /* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */
586
587 /* allocate an argument buffer */
588 argp = pool_get(&exec_pool, PR_WAITOK);
589 KASSERT(argp != NULL);
590 dp = argp;
591 argc = 0;
592
593 /* copy the fake args list, if there's one, freeing it as we go */
594 if (pack.ep_flags & EXEC_HASARGL) {
595 tmpfap = pack.ep_fa;
596 while (tmpfap->fa_arg != NULL) {
597 const char *cp;
598
599 cp = tmpfap->fa_arg;
600 while (*cp)
601 *dp++ = *cp++;
602 *dp++ = '\0';
603
604 kmem_free(tmpfap->fa_arg, tmpfap->fa_len);
605 tmpfap++; argc++;
606 }
607 kmem_free(pack.ep_fa, pack.ep_fa_len);
608 pack.ep_flags &= ~EXEC_HASARGL;
609 }
610
611 /* Now get argv & environment */
612 if (args == NULL) {
613 DPRINTF(("execve: null args\n"));
614 error = EINVAL;
615 goto bad;
616 }
617 /* 'i' will index the argp/envp element to be retrieved */
618 i = 0;
619 if (pack.ep_flags & EXEC_SKIPARG)
620 i++;
621
622 while (1) {
623 len = argp + ARG_MAX - dp;
624 if ((error = (*fetch_element)(args, i, &sp)) != 0) {
625 DPRINTF(("execve: fetch_element args %d\n", error));
626 goto bad;
627 }
628 if (!sp)
629 break;
630 if ((error = copyinstr(sp, dp, len, &len)) != 0) {
631 DPRINTF(("execve: copyinstr args %d\n", error));
632 if (error == ENAMETOOLONG)
633 error = E2BIG;
634 goto bad;
635 }
636 ktrexecarg(dp, len - 1);
637 dp += len;
638 i++;
639 argc++;
640 }
641
642 envc = 0;
643 /* environment need not be there */
644 if (envs != NULL) {
645 i = 0;
646 while (1) {
647 len = argp + ARG_MAX - dp;
648 if ((error = (*fetch_element)(envs, i, &sp)) != 0) {
649 DPRINTF(("execve: fetch_element env %d\n", error));
650 goto bad;
651 }
652 if (!sp)
653 break;
654 if ((error = copyinstr(sp, dp, len, &len)) != 0) {
655 DPRINTF(("execve: copyinstr env %d\n", error));
656 if (error == ENAMETOOLONG)
657 error = E2BIG;
658 goto bad;
659 }
660 ktrexecenv(dp, len - 1);
661 dp += len;
662 i++;
663 envc++;
664 }
665 }
666
667 dp = (char *) ALIGN(dp);
668
669 szsigcode = pack.ep_esch->es_emul->e_esigcode -
670 pack.ep_esch->es_emul->e_sigcode;
671
672 #ifdef __MACHINE_STACK_GROWS_UP
673 /* See big comment lower down */
674 #define RTLD_GAP 32
675 #else
676 #define RTLD_GAP 0
677 #endif
678
679 /* Now check if args & environ fit into new stack */
680 if (pack.ep_flags & EXEC_32)
681 len = ((argc + envc + 2 + pack.ep_esch->es_arglen) *
682 sizeof(int) + sizeof(int) + dp + RTLD_GAP +
683 szsigcode + sizeof(struct ps_strings) + STACK_PTHREADSPACE)
684 - argp;
685 else
686 len = ((argc + envc + 2 + pack.ep_esch->es_arglen) *
687 sizeof(char *) + sizeof(int) + dp + RTLD_GAP +
688 szsigcode + sizeof(struct ps_strings) + STACK_PTHREADSPACE)
689 - argp;
690
691 #ifdef PAX_ASLR
692 if (pax_aslr_active(l))
693 len += (arc4random() % PAGE_SIZE);
694 #endif /* PAX_ASLR */
695
696 #ifdef STACKLALIGN /* arm, etc. */
697 len = STACKALIGN(len); /* make the stack "safely" aligned */
698 #else
699 len = ALIGN(len); /* make the stack "safely" aligned */
700 #endif
701
702 if (len > pack.ep_ssize) { /* in effect, compare to initial limit */
703 DPRINTF(("execve: stack limit exceeded %zu\n", len));
704 error = ENOMEM;
705 goto bad;
706 }
707
708 /* Get rid of other LWPs. */
709 if (p->p_sa || p->p_nlwps > 1) {
710 mutex_enter(p->p_lock);
711 exit_lwps(l);
712 mutex_exit(p->p_lock);
713 }
714 KDASSERT(p->p_nlwps == 1);
715
716 /* Destroy any lwpctl info. */
717 if (p->p_lwpctl != NULL)
718 lwp_ctl_exit();
719
720 /* This is now LWP 1 */
721 l->l_lid = 1;
722 p->p_nlwpid = 1;
723
724 #ifdef KERN_SA
725 /* Release any SA state. */
726 if (p->p_sa)
727 sa_release(p);
728 #endif /* KERN_SA */
729
730 /* Remove POSIX timers */
731 timers_free(p, TIMERS_POSIX);
732
733 /* adjust "active stack depth" for process VSZ */
734 pack.ep_ssize = len; /* maybe should go elsewhere, but... */
735
736 /*
737 * Do whatever is necessary to prepare the address space
738 * for remapping. Note that this might replace the current
739 * vmspace with another!
740 */
741 uvmspace_exec(l, pack.ep_vm_minaddr, pack.ep_vm_maxaddr);
742
743 /* record proc's vnode, for use by procfs and others */
744 if (p->p_textvp)
745 vrele(p->p_textvp);
746 VREF(pack.ep_vp);
747 p->p_textvp = pack.ep_vp;
748
749 /* Now map address space */
750 vm = p->p_vmspace;
751 vm->vm_taddr = (void *)pack.ep_taddr;
752 vm->vm_tsize = btoc(pack.ep_tsize);
753 vm->vm_daddr = (void*)pack.ep_daddr;
754 vm->vm_dsize = btoc(pack.ep_dsize);
755 vm->vm_ssize = btoc(pack.ep_ssize);
756 vm->vm_maxsaddr = (void *)pack.ep_maxsaddr;
757 vm->vm_minsaddr = (void *)pack.ep_minsaddr;
758
759 #ifdef PAX_ASLR
760 pax_aslr_init(l, vm);
761 #endif /* PAX_ASLR */
762
763 /* create the new process's VM space by running the vmcmds */
764 #ifdef DIAGNOSTIC
765 if (pack.ep_vmcmds.evs_used == 0)
766 panic("execve: no vmcmds");
767 #endif
768 for (i = 0; i < pack.ep_vmcmds.evs_used && !error; i++) {
769 struct exec_vmcmd *vcp;
770
771 vcp = &pack.ep_vmcmds.evs_cmds[i];
772 if (vcp->ev_flags & VMCMD_RELATIVE) {
773 #ifdef DIAGNOSTIC
774 if (base_vcp == NULL)
775 panic("execve: relative vmcmd with no base");
776 if (vcp->ev_flags & VMCMD_BASE)
777 panic("execve: illegal base & relative vmcmd");
778 #endif
779 vcp->ev_addr += base_vcp->ev_addr;
780 }
781 error = (*vcp->ev_proc)(l, vcp);
782 #ifdef DEBUG_EXEC
783 if (error) {
784 size_t j;
785 struct exec_vmcmd *vp = &pack.ep_vmcmds.evs_cmds[0];
786 for (j = 0; j <= i; j++)
787 uprintf(
788 "vmcmd[%zu] = %#lx/%#lx fd@%#lx prot=0%o flags=%d\n",
789 j, vp[j].ev_addr, vp[j].ev_len,
790 vp[j].ev_offset, vp[j].ev_prot,
791 vp[j].ev_flags);
792 }
793 #endif /* DEBUG_EXEC */
794 if (vcp->ev_flags & VMCMD_BASE)
795 base_vcp = vcp;
796 }
797
798 /* free the vmspace-creation commands, and release their references */
799 kill_vmcmds(&pack.ep_vmcmds);
800
801 vn_lock(pack.ep_vp, LK_EXCLUSIVE | LK_RETRY);
802 VOP_CLOSE(pack.ep_vp, FREAD, l->l_cred);
803 vput(pack.ep_vp);
804
805 /* if an error happened, deallocate and punt */
806 if (error) {
807 DPRINTF(("execve: vmcmd %zu failed: %d\n", i - 1, error));
808 goto exec_abort;
809 }
810
811 /* remember information about the process */
812 arginfo.ps_nargvstr = argc;
813 arginfo.ps_nenvstr = envc;
814
815 /* set command name & other accounting info */
816 i = min(nid.ni_cnd.cn_namelen, MAXCOMLEN);
817 (void)memcpy(p->p_comm, nid.ni_cnd.cn_nameptr, i);
818 p->p_comm[i] = '\0';
819
820 dp = PNBUF_GET();
821 /*
822 * If the path starts with /, we don't need to do any work.
823 * This handles the majority of the cases.
824 * In the future perhaps we could canonicalize it?
825 */
826 if (pathbuf[0] == '/')
827 (void)strlcpy(pack.ep_path = dp, pathbuf, MAXPATHLEN);
828 #ifdef notyet
829 /*
830 * Although this works most of the time [since the entry was just
831 * entered in the cache] we don't use it because it theoretically
832 * can fail and it is not the cleanest interface, because there
833 * could be races. When the namei cache is re-written, this can
834 * be changed to use the appropriate function.
835 */
836 else if (!(error = vnode_to_path(dp, MAXPATHLEN, p->p_textvp, l, p)))
837 pack.ep_path = dp;
838 #endif
839 else {
840 #ifdef notyet
841 printf("Cannot get path for pid %d [%s] (error %d)",
842 (int)p->p_pid, p->p_comm, error);
843 #endif
844 pack.ep_path = NULL;
845 PNBUF_PUT(dp);
846 }
847
848 stack = (char *)STACK_ALLOC(STACK_GROW(vm->vm_minsaddr,
849 STACK_PTHREADSPACE + sizeof(struct ps_strings) + szsigcode),
850 len - (sizeof(struct ps_strings) + szsigcode));
851
852 #ifdef __MACHINE_STACK_GROWS_UP
853 /*
854 * The copyargs call always copies into lower addresses
855 * first, moving towards higher addresses, starting with
856 * the stack pointer that we give. When the stack grows
857 * down, this puts argc/argv/envp very shallow on the
858 * stack, right at the first user stack pointer.
859 * When the stack grows up, the situation is reversed.
860 *
861 * Normally, this is no big deal. But the ld_elf.so _rtld()
862 * function expects to be called with a single pointer to
863 * a region that has a few words it can stash values into,
864 * followed by argc/argv/envp. When the stack grows down,
865 * it's easy to decrement the stack pointer a little bit to
866 * allocate the space for these few words and pass the new
867 * stack pointer to _rtld. When the stack grows up, however,
868 * a few words before argc is part of the signal trampoline, XXX
869 * so we have a problem.
870 *
871 * Instead of changing how _rtld works, we take the easy way
872 * out and steal 32 bytes before we call copyargs.
873 * This extra space was allowed for when 'len' was calculated.
874 */
875 stack += RTLD_GAP;
876 #endif /* __MACHINE_STACK_GROWS_UP */
877
878 /* Now copy argc, args & environ to new stack */
879 error = (*pack.ep_esch->es_copyargs)(l, &pack, &arginfo, &stack, argp);
880 if (pack.ep_path) {
881 PNBUF_PUT(pack.ep_path);
882 pack.ep_path = NULL;
883 }
884 if (error) {
885 DPRINTF(("execve: copyargs failed %d\n", error));
886 goto exec_abort;
887 }
888 /* Move the stack back to original point */
889 stack = (char *)STACK_GROW(vm->vm_minsaddr, len);
890
891 /* fill process ps_strings info */
892 p->p_psstr = (struct ps_strings *)
893 STACK_ALLOC(STACK_GROW(vm->vm_minsaddr, STACK_PTHREADSPACE),
894 sizeof(struct ps_strings));
895 p->p_psargv = offsetof(struct ps_strings, ps_argvstr);
896 p->p_psnargv = offsetof(struct ps_strings, ps_nargvstr);
897 p->p_psenv = offsetof(struct ps_strings, ps_envstr);
898 p->p_psnenv = offsetof(struct ps_strings, ps_nenvstr);
899
900 /* copy out the process's ps_strings structure */
901 if ((error = copyout(aip, (char *)p->p_psstr,
902 sizeof(arginfo))) != 0) {
903 DPRINTF(("execve: ps_strings copyout %p->%p size %ld failed\n",
904 aip, (char *)p->p_psstr, (long)sizeof(arginfo)));
905 goto exec_abort;
906 }
907
908 fd_closeexec(); /* handle close on exec */
909 execsigs(p); /* reset catched signals */
910
911 l->l_ctxlink = NULL; /* reset ucontext link */
912
913
914 p->p_acflag &= ~AFORK;
915 mutex_enter(p->p_lock);
916 p->p_flag |= PK_EXEC;
917 mutex_exit(p->p_lock);
918
919 /*
920 * Stop profiling.
921 */
922 if ((p->p_stflag & PST_PROFIL) != 0) {
923 mutex_spin_enter(&p->p_stmutex);
924 stopprofclock(p);
925 mutex_spin_exit(&p->p_stmutex);
926 }
927
928 /*
929 * It's OK to test PL_PPWAIT unlocked here, as other LWPs have
930 * exited and exec()/exit() are the only places it will be cleared.
931 */
932 if ((p->p_lflag & PL_PPWAIT) != 0) {
933 mutex_enter(proc_lock);
934 p->p_lflag &= ~PL_PPWAIT;
935 cv_broadcast(&p->p_pptr->p_waitcv);
936 mutex_exit(proc_lock);
937 }
938
939 /*
940 * Deal with set[ug]id. MNT_NOSUID has already been used to disable
941 * s[ug]id. It's OK to check for PSL_TRACED here as we have blocked
942 * out additional references on the process for the moment.
943 */
944 if ((p->p_slflag & PSL_TRACED) == 0 &&
945
946 (((attr.va_mode & S_ISUID) != 0 &&
947 kauth_cred_geteuid(l->l_cred) != attr.va_uid) ||
948
949 ((attr.va_mode & S_ISGID) != 0 &&
950 kauth_cred_getegid(l->l_cred) != attr.va_gid))) {
951 /*
952 * Mark the process as SUGID before we do
953 * anything that might block.
954 */
955 proc_crmod_enter();
956 proc_crmod_leave(NULL, NULL, true);
957
958 /* Make sure file descriptors 0..2 are in use. */
959 if ((error = fd_checkstd()) != 0) {
960 DPRINTF(("execve: fdcheckstd failed %d\n", error));
961 goto exec_abort;
962 }
963
964 /*
965 * Copy the credential so other references don't see our
966 * changes.
967 */
968 l->l_cred = kauth_cred_copy(l->l_cred);
969 #ifdef KTRACE
970 /*
971 * If the persistent trace flag isn't set, turn off.
972 */
973 if (p->p_tracep) {
974 mutex_enter(&ktrace_lock);
975 if (!(p->p_traceflag & KTRFAC_PERSISTENT))
976 ktrderef(p);
977 mutex_exit(&ktrace_lock);
978 }
979 #endif
980 if (attr.va_mode & S_ISUID)
981 kauth_cred_seteuid(l->l_cred, attr.va_uid);
982 if (attr.va_mode & S_ISGID)
983 kauth_cred_setegid(l->l_cred, attr.va_gid);
984 } else {
985 if (kauth_cred_geteuid(l->l_cred) ==
986 kauth_cred_getuid(l->l_cred) &&
987 kauth_cred_getegid(l->l_cred) ==
988 kauth_cred_getgid(l->l_cred))
989 p->p_flag &= ~PK_SUGID;
990 }
991
992 /*
993 * Copy the credential so other references don't see our changes.
994 * Test to see if this is necessary first, since in the common case
995 * we won't need a private reference.
996 */
997 if (kauth_cred_geteuid(l->l_cred) != kauth_cred_getsvuid(l->l_cred) ||
998 kauth_cred_getegid(l->l_cred) != kauth_cred_getsvgid(l->l_cred)) {
999 l->l_cred = kauth_cred_copy(l->l_cred);
1000 kauth_cred_setsvuid(l->l_cred, kauth_cred_geteuid(l->l_cred));
1001 kauth_cred_setsvgid(l->l_cred, kauth_cred_getegid(l->l_cred));
1002 }
1003
1004 /* Update the master credentials. */
1005 if (l->l_cred != p->p_cred) {
1006 kauth_cred_t ocred;
1007
1008 kauth_cred_hold(l->l_cred);
1009 mutex_enter(p->p_lock);
1010 ocred = p->p_cred;
1011 p->p_cred = l->l_cred;
1012 mutex_exit(p->p_lock);
1013 kauth_cred_free(ocred);
1014 }
1015
1016 #if defined(__HAVE_RAS)
1017 /*
1018 * Remove all RASs from the address space.
1019 */
1020 ras_purgeall();
1021 #endif
1022
1023 doexechooks(p);
1024
1025 /* setup new registers and do misc. setup. */
1026 (*pack.ep_esch->es_emul->e_setregs)(l, &pack, (u_long) stack);
1027 if (pack.ep_esch->es_setregs)
1028 (*pack.ep_esch->es_setregs)(l, &pack, (u_long) stack);
1029
1030 /* map the process's signal trampoline code */
1031 if (exec_sigcode_map(p, pack.ep_esch->es_emul)) {
1032 DPRINTF(("execve: map sigcode failed %d\n", error));
1033 goto exec_abort;
1034 }
1035
1036 pool_put(&exec_pool, argp);
1037
1038 PNBUF_PUT(nid.ni_cnd.cn_pnbuf);
1039
1040 /* notify others that we exec'd */
1041 KNOTE(&p->p_klist, NOTE_EXEC);
1042
1043 kmem_free(pack.ep_hdr, pack.ep_hdrlen);
1044
1045 /* The emulation root will usually have been found when we looked
1046 * for the elf interpreter (or similar), if not look now. */
1047 if (pack.ep_esch->es_emul->e_path != NULL && pack.ep_emul_root == NULL)
1048 emul_find_root(l, &pack);
1049
1050 /* Any old emulation root got removed by fdcloseexec */
1051 rw_enter(&p->p_cwdi->cwdi_lock, RW_WRITER);
1052 p->p_cwdi->cwdi_edir = pack.ep_emul_root;
1053 rw_exit(&p->p_cwdi->cwdi_lock);
1054 pack.ep_emul_root = NULL;
1055 if (pack.ep_interp != NULL)
1056 vrele(pack.ep_interp);
1057
1058 /*
1059 * Call emulation specific exec hook. This can setup per-process
1060 * p->p_emuldata or do any other per-process stuff an emulation needs.
1061 *
1062 * If we are executing process of different emulation than the
1063 * original forked process, call e_proc_exit() of the old emulation
1064 * first, then e_proc_exec() of new emulation. If the emulation is
1065 * same, the exec hook code should deallocate any old emulation
1066 * resources held previously by this process.
1067 */
1068 if (p->p_emul && p->p_emul->e_proc_exit
1069 && p->p_emul != pack.ep_esch->es_emul)
1070 (*p->p_emul->e_proc_exit)(p);
1071
1072 /*
1073 * Call exec hook. Emulation code may NOT store reference to anything
1074 * from &pack.
1075 */
1076 if (pack.ep_esch->es_emul->e_proc_exec)
1077 (*pack.ep_esch->es_emul->e_proc_exec)(p, &pack);
1078
1079 /* update p_emul, the old value is no longer needed */
1080 p->p_emul = pack.ep_esch->es_emul;
1081
1082 /* ...and the same for p_execsw */
1083 p->p_execsw = pack.ep_esch;
1084
1085 #ifdef __HAVE_SYSCALL_INTERN
1086 (*p->p_emul->e_syscall_intern)(p);
1087 #endif
1088 ktremul();
1089
1090 /* Allow new references from the debugger/procfs. */
1091 rw_exit(&p->p_reflock);
1092 #ifdef LKM
1093 rw_exit(&exec_lock);
1094 #endif
1095
1096 mutex_enter(proc_lock);
1097
1098 if ((p->p_slflag & (PSL_TRACED|PSL_SYSCALL)) == PSL_TRACED) {
1099 KSI_INIT_EMPTY(&ksi);
1100 ksi.ksi_signo = SIGTRAP;
1101 ksi.ksi_lid = l->l_lid;
1102 kpsignal(p, &ksi, NULL);
1103 }
1104
1105 if (p->p_sflag & PS_STOPEXEC) {
1106 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
1107 p->p_pptr->p_nstopchild++;
1108 p->p_pptr->p_waited = 0;
1109 mutex_enter(p->p_lock);
1110 ksiginfo_queue_init(&kq);
1111 sigclearall(p, &contsigmask, &kq);
1112 lwp_lock(l);
1113 l->l_stat = LSSTOP;
1114 p->p_stat = SSTOP;
1115 p->p_nrlwps--;
1116 mutex_exit(p->p_lock);
1117 mutex_exit(proc_lock);
1118 mi_switch(l);
1119 ksiginfo_queue_drain(&kq);
1120 KERNEL_LOCK(l->l_biglocks, l);
1121 } else {
1122 mutex_exit(proc_lock);
1123 }
1124
1125 PNBUF_PUT(pathbuf);
1126 return (EJUSTRETURN);
1127
1128 bad:
1129 /* free the vmspace-creation commands, and release their references */
1130 kill_vmcmds(&pack.ep_vmcmds);
1131 /* kill any opened file descriptor, if necessary */
1132 if (pack.ep_flags & EXEC_HASFD) {
1133 pack.ep_flags &= ~EXEC_HASFD;
1134 fd_close(pack.ep_fd);
1135 }
1136 /* close and put the exec'd file */
1137 vn_lock(pack.ep_vp, LK_EXCLUSIVE | LK_RETRY);
1138 VOP_CLOSE(pack.ep_vp, FREAD, l->l_cred);
1139 vput(pack.ep_vp);
1140 PNBUF_PUT(nid.ni_cnd.cn_pnbuf);
1141 pool_put(&exec_pool, argp);
1142
1143 freehdr:
1144 kmem_free(pack.ep_hdr, pack.ep_hdrlen);
1145 if (pack.ep_emul_root != NULL)
1146 vrele(pack.ep_emul_root);
1147 if (pack.ep_interp != NULL)
1148 vrele(pack.ep_interp);
1149
1150 #ifdef LKM
1151 rw_exit(&exec_lock);
1152 #endif
1153
1154 clrflg:
1155 lwp_lock(l);
1156 l->l_flag |= oldlwpflags;
1157 lwp_unlock(l);
1158 PNBUF_PUT(pathbuf);
1159 rw_exit(&p->p_reflock);
1160
1161 return error;
1162
1163 exec_abort:
1164 PNBUF_PUT(pathbuf);
1165 rw_exit(&p->p_reflock);
1166 #ifdef LKM
1167 rw_exit(&exec_lock);
1168 #endif
1169
1170 /*
1171 * the old process doesn't exist anymore. exit gracefully.
1172 * get rid of the (new) address space we have created, if any, get rid
1173 * of our namei data and vnode, and exit noting failure
1174 */
1175 uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS,
1176 VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS);
1177 if (pack.ep_emul_arg)
1178 FREE(pack.ep_emul_arg, M_TEMP);
1179 PNBUF_PUT(nid.ni_cnd.cn_pnbuf);
1180 pool_put(&exec_pool, argp);
1181 kmem_free(pack.ep_hdr, pack.ep_hdrlen);
1182 if (pack.ep_emul_root != NULL)
1183 vrele(pack.ep_emul_root);
1184 if (pack.ep_interp != NULL)
1185 vrele(pack.ep_interp);
1186
1187 /* Acquire the sched-state mutex (exit1() will release it). */
1188 mutex_enter(p->p_lock);
1189 exit1(l, W_EXITCODE(error, SIGABRT));
1190
1191 /* NOTREACHED */
1192 return 0;
1193 }
1194
1195
1196 int
1197 copyargs(struct lwp *l, struct exec_package *pack, struct ps_strings *arginfo,
1198 char **stackp, void *argp)
1199 {
1200 char **cpp, *dp, *sp;
1201 size_t len;
1202 void *nullp;
1203 long argc, envc;
1204 int error;
1205
1206 cpp = (char **)*stackp;
1207 nullp = NULL;
1208 argc = arginfo->ps_nargvstr;
1209 envc = arginfo->ps_nenvstr;
1210 if ((error = copyout(&argc, cpp++, sizeof(argc))) != 0)
1211 return error;
1212
1213 dp = (char *) (cpp + argc + envc + 2 + pack->ep_esch->es_arglen);
1214 sp = argp;
1215
1216 /* XXX don't copy them out, remap them! */
1217 arginfo->ps_argvstr = cpp; /* remember location of argv for later */
1218
1219 for (; --argc >= 0; sp += len, dp += len)
1220 if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0 ||
1221 (error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0)
1222 return error;
1223
1224 if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0)
1225 return error;
1226
1227 arginfo->ps_envstr = cpp; /* remember location of envp for later */
1228
1229 for (; --envc >= 0; sp += len, dp += len)
1230 if ((error = copyout(&dp, cpp++, sizeof(dp))) != 0 ||
1231 (error = copyoutstr(sp, dp, ARG_MAX, &len)) != 0)
1232 return error;
1233
1234 if ((error = copyout(&nullp, cpp++, sizeof(nullp))) != 0)
1235 return error;
1236
1237 *stackp = (char *)cpp;
1238 return 0;
1239 }
1240
1241 #ifdef LKM
1242 /*
1243 * Find an emulation of given name in list of emulations.
1244 * Needs to be called with the exec_lock held.
1245 */
1246 const struct emul *
1247 emul_search(const char *name)
1248 {
1249 struct emul_entry *it;
1250
1251 LIST_FOREACH(it, &el_head, el_list) {
1252 if (strcmp(name, it->el_emul->e_name) == 0)
1253 return it->el_emul;
1254 }
1255
1256 return NULL;
1257 }
1258
1259 /*
1260 * Add an emulation to list, if it's not there already.
1261 */
1262 int
1263 emul_register(const struct emul *emul, int ro_entry)
1264 {
1265 struct emul_entry *ee;
1266 int error;
1267
1268 error = 0;
1269 rw_enter(&exec_lock, RW_WRITER);
1270
1271 if (emul_search(emul->e_name)) {
1272 error = EEXIST;
1273 goto out;
1274 }
1275
1276 ee = kmem_alloc(sizeof(*ee), KM_SLEEP);
1277 ee->el_emul = emul;
1278 ee->ro_entry = ro_entry;
1279 LIST_INSERT_HEAD(&el_head, ee, el_list);
1280
1281 out:
1282 rw_exit(&exec_lock);
1283 return error;
1284 }
1285
1286 /*
1287 * Remove emulation with name 'name' from list of supported emulations.
1288 */
1289 int
1290 emul_unregister(const char *name)
1291 {
1292 const struct proclist_desc *pd;
1293 struct emul_entry *it;
1294 int i, error;
1295 struct proc *ptmp;
1296
1297 error = 0;
1298 rw_enter(&exec_lock, RW_WRITER);
1299
1300 LIST_FOREACH(it, &el_head, el_list) {
1301 if (strcmp(it->el_emul->e_name, name) == 0)
1302 break;
1303 }
1304
1305 if (!it) {
1306 error = ENOENT;
1307 goto out;
1308 }
1309
1310 if (it->ro_entry) {
1311 error = EBUSY;
1312 goto out;
1313 }
1314
1315 /* test if any execw[] entry is still using this */
1316 for(i=0; i < nexecs; i++) {
1317 if (execsw[i]->es_emul == it->el_emul) {
1318 error = EBUSY;
1319 goto out;
1320 }
1321 }
1322
1323 /*
1324 * Test if any process is running under this emulation - since
1325 * emul_unregister() is running quite sendomly, it's better
1326 * to do expensive check here than to use any locking.
1327 */
1328 mutex_enter(proc_lock);
1329 for (pd = proclists; pd->pd_list != NULL && !error; pd++) {
1330 PROCLIST_FOREACH(ptmp, pd->pd_list) {
1331 if (ptmp->p_emul == it->el_emul) {
1332 error = EBUSY;
1333 break;
1334 }
1335 }
1336 }
1337 mutex_exit(proc_lock);
1338
1339 if (error)
1340 goto out;
1341
1342
1343 /* entry is not used, remove it */
1344 LIST_REMOVE(it, el_list);
1345 kmem_free(it, sizeof(*it));
1346
1347 out:
1348 rw_exit(&exec_lock);
1349 return error;
1350 }
1351
1352 /*
1353 * Add execsw[] entry.
1354 */
1355 int
1356 exec_add(struct execsw *esp, const char *e_name)
1357 {
1358 struct exec_entry *it;
1359 int error;
1360
1361 error = 0;
1362 rw_enter(&exec_lock, RW_WRITER);
1363
1364 if (!esp->es_emul) {
1365 esp->es_emul = emul_search(e_name);
1366 if (!esp->es_emul) {
1367 error = ENOENT;
1368 goto out;
1369 }
1370 }
1371
1372 LIST_FOREACH(it, &ex_head, ex_list) {
1373 /* assume tuple (makecmds, probe_func, emulation) is unique */
1374 if (it->es->es_makecmds == esp->es_makecmds
1375 && it->es->u.elf_probe_func == esp->u.elf_probe_func
1376 && it->es->es_emul == esp->es_emul) {
1377 error = EEXIST;
1378 goto out;
1379 }
1380 }
1381
1382 /* if we got here, the entry doesn't exist yet */
1383 it = kmem_alloc(sizeof(*it), KM_SLEEP);
1384 it->es = esp;
1385 LIST_INSERT_HEAD(&ex_head, it, ex_list);
1386
1387 /* update execsw[] */
1388 exec_init(0);
1389
1390 out:
1391 rw_exit(&exec_lock);
1392 return error;
1393 }
1394
1395 /*
1396 * Remove execsw[] entry.
1397 */
1398 int
1399 exec_remove(const struct execsw *esp)
1400 {
1401 struct exec_entry *it;
1402 int error;
1403
1404 error = 0;
1405 rw_enter(&exec_lock, RW_WRITER);
1406
1407 LIST_FOREACH(it, &ex_head, ex_list) {
1408 /* assume tuple (makecmds, probe_func, emulation) is unique */
1409 if (it->es->es_makecmds == esp->es_makecmds
1410 && it->es->u.elf_probe_func == esp->u.elf_probe_func
1411 && it->es->es_emul == esp->es_emul)
1412 break;
1413 }
1414 if (!it) {
1415 error = ENOENT;
1416 goto out;
1417 }
1418
1419 /* remove item from list and free resources */
1420 LIST_REMOVE(it, ex_list);
1421 kmem_free(it, sizeof(*it));
1422
1423 /* update execsw[] */
1424 exec_init(0);
1425
1426 out:
1427 rw_exit(&exec_lock);
1428 return error;
1429 }
1430
1431 static void
1432 link_es(struct execsw_entry **listp, const struct execsw *esp)
1433 {
1434 struct execsw_entry *et, *e1;
1435
1436 et = (struct execsw_entry *) malloc(sizeof(struct execsw_entry),
1437 M_TEMP, M_WAITOK);
1438 et->next = NULL;
1439 et->es = esp;
1440 if (*listp == NULL) {
1441 *listp = et;
1442 return;
1443 }
1444
1445 switch(et->es->es_prio) {
1446 case EXECSW_PRIO_FIRST:
1447 /* put new entry as the first */
1448 et->next = *listp;
1449 *listp = et;
1450 break;
1451 case EXECSW_PRIO_ANY:
1452 /* put new entry after all *_FIRST and *_ANY entries */
1453 for(e1 = *listp; e1->next
1454 && e1->next->es->es_prio != EXECSW_PRIO_LAST;
1455 e1 = e1->next);
1456 et->next = e1->next;
1457 e1->next = et;
1458 break;
1459 case EXECSW_PRIO_LAST:
1460 /* put new entry as the last one */
1461 for(e1 = *listp; e1->next; e1 = e1->next);
1462 e1->next = et;
1463 break;
1464 default:
1465 #ifdef DIAGNOSTIC
1466 panic("execw[] entry with unknown priority %d found",
1467 et->es->es_prio);
1468 #else
1469 free(et, M_TEMP);
1470 #endif
1471 break;
1472 }
1473 }
1474
1475 /*
1476 * Initialize exec structures. If init_boot is true, also does necessary
1477 * one-time initialization (it's called from main() that way).
1478 * Once system is multiuser, this should be called with exec_lock held,
1479 * i.e. via exec_{add|remove}().
1480 */
1481 int
1482 exec_init(int init_boot)
1483 {
1484 const struct execsw **new_es, * const *old_es;
1485 struct execsw_entry *list, *e1;
1486 struct exec_entry *e2;
1487 int i, es_sz;
1488
1489 if (init_boot) {
1490 /* do one-time initializations */
1491 rw_init(&exec_lock);
1492 mutex_init(&sigobject_lock, MUTEX_DEFAULT, IPL_NONE);
1493 pool_init(&exec_pool, NCARGS, 0, 0, PR_NOALIGN|PR_NOTOUCH,
1494 "execargs", &exec_palloc, IPL_NONE);
1495 pool_sethardlimit(&exec_pool, maxexec, "should not happen", 0);
1496
1497 /* register compiled-in emulations */
1498 for(i=0; i < nexecs_builtin; i++) {
1499 if (execsw_builtin[i].es_emul)
1500 emul_register(execsw_builtin[i].es_emul, 1);
1501 }
1502 #ifdef DIAGNOSTIC
1503 if (i == 0)
1504 panic("no emulations found in execsw_builtin[]");
1505 #endif
1506 }
1507
1508 /*
1509 * Build execsw[] array from builtin entries and entries added
1510 * at runtime.
1511 */
1512 list = NULL;
1513 for(i=0; i < nexecs_builtin; i++)
1514 link_es(&list, &execsw_builtin[i]);
1515
1516 /* Add dynamically loaded entries */
1517 es_sz = nexecs_builtin;
1518 LIST_FOREACH(e2, &ex_head, ex_list) {
1519 link_es(&list, e2->es);
1520 es_sz++;
1521 }
1522
1523 /*
1524 * Now that we have sorted all execw entries, create new execsw[]
1525 * and free no longer needed memory in the process.
1526 */
1527 new_es = kmem_alloc(es_sz * sizeof(struct execsw *), KM_SLEEP);
1528 for(i=0; list; i++) {
1529 new_es[i] = list->es;
1530 e1 = list->next;
1531 free(list, M_TEMP);
1532 list = e1;
1533 }
1534
1535 /*
1536 * New execsw[] array built, now replace old execsw[] and free
1537 * used memory.
1538 */
1539 old_es = execsw;
1540 if (old_es)
1541 /*XXXUNCONST*/
1542 kmem_free(__UNCONST(old_es), nexecs * sizeof(struct execsw *));
1543 execsw = new_es;
1544 nexecs = es_sz;
1545
1546 /*
1547 * Figure out the maximum size of an exec header.
1548 */
1549 exec_maxhdrsz = 0;
1550 for (i = 0; i < nexecs; i++) {
1551 if (execsw[i]->es_hdrsz > exec_maxhdrsz)
1552 exec_maxhdrsz = execsw[i]->es_hdrsz;
1553 }
1554
1555 return 0;
1556 }
1557 #endif
1558
1559 #ifndef LKM
1560 /*
1561 * Simplified exec_init() for kernels without LKMs. Only initialize
1562 * exec_maxhdrsz and execsw[].
1563 */
1564 int
1565 exec_init(int init_boot)
1566 {
1567 int i;
1568
1569 #ifdef DIAGNOSTIC
1570 if (!init_boot)
1571 panic("exec_init(): called with init_boot == 0");
1572 #endif
1573
1574 /* do one-time initializations */
1575 nexecs = nexecs_builtin;
1576 execsw = kmem_alloc(nexecs * sizeof(struct execsw *), KM_SLEEP);
1577
1578 pool_init(&exec_pool, NCARGS, 0, 0, PR_NOALIGN|PR_NOTOUCH,
1579 "execargs", &exec_palloc, IPL_NONE);
1580 pool_sethardlimit(&exec_pool, maxexec, "should not happen", 0);
1581
1582 /*
1583 * Fill in execsw[] and figure out the maximum size of an exec header.
1584 */
1585 exec_maxhdrsz = 0;
1586 for(i=0; i < nexecs; i++) {
1587 execsw[i] = &execsw_builtin[i];
1588 if (execsw_builtin[i].es_hdrsz > exec_maxhdrsz)
1589 exec_maxhdrsz = execsw_builtin[i].es_hdrsz;
1590 }
1591
1592 return 0;
1593
1594 }
1595 #endif /* !LKM */
1596
1597 static int
1598 exec_sigcode_map(struct proc *p, const struct emul *e)
1599 {
1600 vaddr_t va;
1601 vsize_t sz;
1602 int error;
1603 struct uvm_object *uobj;
1604
1605 sz = (vaddr_t)e->e_esigcode - (vaddr_t)e->e_sigcode;
1606
1607 if (e->e_sigobject == NULL || sz == 0) {
1608 return 0;
1609 }
1610
1611 /*
1612 * If we don't have a sigobject for this emulation, create one.
1613 *
1614 * sigobject is an anonymous memory object (just like SYSV shared
1615 * memory) that we keep a permanent reference to and that we map
1616 * in all processes that need this sigcode. The creation is simple,
1617 * we create an object, add a permanent reference to it, map it in
1618 * kernel space, copy out the sigcode to it and unmap it.
1619 * We map it with PROT_READ|PROT_EXEC into the process just
1620 * the way sys_mmap() would map it.
1621 */
1622
1623 uobj = *e->e_sigobject;
1624 if (uobj == NULL) {
1625 mutex_enter(&sigobject_lock);
1626 if ((uobj = *e->e_sigobject) == NULL) {
1627 uobj = uao_create(sz, 0);
1628 (*uobj->pgops->pgo_reference)(uobj);
1629 va = vm_map_min(kernel_map);
1630 if ((error = uvm_map(kernel_map, &va, round_page(sz),
1631 uobj, 0, 0,
1632 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
1633 UVM_INH_SHARE, UVM_ADV_RANDOM, 0)))) {
1634 printf("kernel mapping failed %d\n", error);
1635 (*uobj->pgops->pgo_detach)(uobj);
1636 mutex_exit(&sigobject_lock);
1637 return (error);
1638 }
1639 memcpy((void *)va, e->e_sigcode, sz);
1640 #ifdef PMAP_NEED_PROCWR
1641 pmap_procwr(&proc0, va, sz);
1642 #endif
1643 uvm_unmap(kernel_map, va, va + round_page(sz));
1644 *e->e_sigobject = uobj;
1645 }
1646 mutex_exit(&sigobject_lock);
1647 }
1648
1649 /* Just a hint to uvm_map where to put it. */
1650 va = e->e_vm_default_addr(p, (vaddr_t)p->p_vmspace->vm_daddr,
1651 round_page(sz));
1652
1653 #ifdef __alpha__
1654 /*
1655 * Tru64 puts /sbin/loader at the end of user virtual memory,
1656 * which causes the above calculation to put the sigcode at
1657 * an invalid address. Put it just below the text instead.
1658 */
1659 if (va == (vaddr_t)vm_map_max(&p->p_vmspace->vm_map)) {
1660 va = (vaddr_t)p->p_vmspace->vm_taddr - round_page(sz);
1661 }
1662 #endif
1663
1664 (*uobj->pgops->pgo_reference)(uobj);
1665 error = uvm_map(&p->p_vmspace->vm_map, &va, round_page(sz),
1666 uobj, 0, 0,
1667 UVM_MAPFLAG(UVM_PROT_RX, UVM_PROT_RX, UVM_INH_SHARE,
1668 UVM_ADV_RANDOM, 0));
1669 if (error) {
1670 (*uobj->pgops->pgo_detach)(uobj);
1671 return (error);
1672 }
1673 p->p_sigctx.ps_sigcode = (void *)va;
1674 return (0);
1675 }
1676