Home | History | Annotate | Line # | Download | only in kern
exec_elf.c revision 1.15
      1 /*	$NetBSD: exec_elf.c,v 1.15 2010/03/19 22:08:13 christos Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1994, 2000, 2005 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Christos Zoulas.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1996 Christopher G. Demetriou
     34  * All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  * 3. The name of the author may not be used to endorse or promote products
     45  *    derived from this software without specific prior written permission
     46  *
     47  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     48  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     49  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     50  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     51  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     52  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     53  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     54  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     55  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     56  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     57  */
     58 
     59 #include <sys/cdefs.h>
     60 __KERNEL_RCSID(1, "$NetBSD: exec_elf.c,v 1.15 2010/03/19 22:08:13 christos Exp $");
     61 
     62 #ifdef _KERNEL_OPT
     63 #include "opt_pax.h"
     64 #endif /* _KERNEL_OPT */
     65 
     66 #include <sys/param.h>
     67 #include <sys/proc.h>
     68 #include <sys/malloc.h>
     69 #include <sys/kmem.h>
     70 #include <sys/namei.h>
     71 #include <sys/vnode.h>
     72 #include <sys/exec.h>
     73 #include <sys/exec_elf.h>
     74 #include <sys/syscall.h>
     75 #include <sys/signalvar.h>
     76 #include <sys/mount.h>
     77 #include <sys/stat.h>
     78 #include <sys/kauth.h>
     79 #include <sys/bitops.h>
     80 
     81 #include <sys/cpu.h>
     82 #include <machine/reg.h>
     83 
     84 #include <compat/common/compat_util.h>
     85 
     86 #include <sys/pax.h>
     87 
     88 extern struct emul emul_netbsd;
     89 
     90 #define elf_check_header	ELFNAME(check_header)
     91 #define elf_copyargs		ELFNAME(copyargs)
     92 #define elf_load_file		ELFNAME(load_file)
     93 #define elf_load_psection	ELFNAME(load_psection)
     94 #define exec_elf_makecmds	ELFNAME2(exec,makecmds)
     95 #define netbsd_elf_signature	ELFNAME2(netbsd,signature)
     96 #define netbsd_elf_probe	ELFNAME2(netbsd,probe)
     97 #define	coredump		ELFNAMEEND(coredump)
     98 
     99 int	elf_load_file(struct lwp *, struct exec_package *, char *,
    100 	    struct exec_vmcmd_set *, u_long *, struct elf_args *, Elf_Addr *);
    101 void	elf_load_psection(struct exec_vmcmd_set *, struct vnode *,
    102 	    const Elf_Phdr *, Elf_Addr *, u_long *, int *, int);
    103 
    104 int	netbsd_elf_signature(struct lwp *, struct exec_package *, Elf_Ehdr *);
    105 int	netbsd_elf_probe(struct lwp *, struct exec_package *, void *, char *,
    106 	    vaddr_t *);
    107 
    108 /* round up and down to page boundaries. */
    109 #define	ELF_ROUND(a, b)		(((a) + (b) - 1) & ~((b) - 1))
    110 #define	ELF_TRUNC(a, b)		((a) & ~((b) - 1))
    111 
    112 /*
    113  * Arbitrary limits to avoid DoS for excessive memory allocation.
    114  */
    115 #define MAXPHNUM	128
    116 #define MAXSHNUM	32768
    117 #define MAXNOTESIZE	1024
    118 
    119 #ifdef PAX_ASLR
    120 /*
    121  * We don't move this code in kern_pax.c because it is compiled twice.
    122  */
    123 static void
    124 pax_aslr_elf(struct lwp *l, struct exec_package *epp, Elf_Ehdr *eh,
    125     Elf_Phdr *ph)
    126 {
    127 	size_t pax_offset, i;
    128 
    129 	if (pax_aslr_active(l)) {
    130 		size_t pax_align, l2, delta;
    131 		uint32_t r;
    132 
    133 		/*
    134 		 * find align XXX: not all sections might have the same
    135 		 * alignment
    136 		 */
    137 		for (pax_align = i = 0; i < eh->e_phnum; i++)
    138 			if (ph[i].p_type == PT_LOAD) {
    139 				pax_align = ph[i].p_align;
    140 				break;
    141 			}
    142 
    143 		r = arc4random();
    144 
    145 		if (pax_align == 0)
    146 			pax_align = PGSHIFT;
    147 		l2 = ilog2(pax_align);
    148 		delta = PAX_ASLR_DELTA(r, l2, PAX_ASLR_DELTA_EXEC_LEN);
    149 #ifdef PAX_ASLR_DEBUG
    150 		uprintf("r=0x%x a=0x%x p=0x%x Delta=0x%lx\n", r, l2, PGSHIFT,
    151 		    delta);
    152 #endif
    153 		pax_offset = ELF_TRUNC(delta, pax_align) + PAGE_SIZE;
    154 	} else
    155 		pax_offset = PAGE_SIZE;
    156 
    157 	for (i = 0; i < eh->e_phnum; i++)
    158 		ph[i].p_vaddr += pax_offset;
    159 	eh->e_entry += pax_offset;
    160 #ifdef PAX_ASLR_DEBUG
    161 	uprintf("pax offset=0x%zx entry=0x%llx\n",
    162 	    pax_offset, (unsigned long long)eh->e_entry);
    163 #endif
    164 }
    165 #else
    166 static void
    167 elf_placedynexec(Elf_Ehdr *eh, Elf_Phdr *ph)
    168 {
    169 	int i;
    170 
    171 	for (i = 0; i < eh->e_phnum; i++)
    172 		ph[i].p_vaddr += PAGE_SIZE;
    173 	eh->e_entry += PAGE_SIZE;
    174 }
    175 #endif /* PAX_ASLR */
    176 
    177 /*
    178  * Copy arguments onto the stack in the normal way, but add some
    179  * extra information in case of dynamic binding.
    180  */
    181 int
    182 elf_copyargs(struct lwp *l, struct exec_package *pack,
    183     struct ps_strings *arginfo, char **stackp, void *argp)
    184 {
    185 	size_t len, vlen;
    186 	AuxInfo ai[ELF_AUX_ENTRIES], *a, *execname;
    187 	struct elf_args *ap;
    188 	int error;
    189 
    190 	if ((error = copyargs(l, pack, arginfo, stackp, argp)) != 0)
    191 		return error;
    192 
    193 	a = ai;
    194 	execname = NULL;
    195 
    196 	/*
    197 	 * Push extra arguments on the stack needed by dynamically
    198 	 * linked binaries
    199 	 */
    200 	if ((ap = (struct elf_args *)pack->ep_emul_arg)) {
    201 		struct vattr *vap = pack->ep_vap;
    202 
    203 		a->a_type = AT_PHDR;
    204 		a->a_v = ap->arg_phaddr;
    205 		a++;
    206 
    207 		a->a_type = AT_PHENT;
    208 		a->a_v = ap->arg_phentsize;
    209 		a++;
    210 
    211 		a->a_type = AT_PHNUM;
    212 		a->a_v = ap->arg_phnum;
    213 		a++;
    214 
    215 		a->a_type = AT_PAGESZ;
    216 		a->a_v = PAGE_SIZE;
    217 		a++;
    218 
    219 		a->a_type = AT_BASE;
    220 		a->a_v = ap->arg_interp;
    221 		a++;
    222 
    223 		a->a_type = AT_FLAGS;
    224 		a->a_v = 0;
    225 		a++;
    226 
    227 		a->a_type = AT_ENTRY;
    228 		a->a_v = ap->arg_entry;
    229 		a++;
    230 
    231 		a->a_type = AT_EUID;
    232 		if (vap->va_mode & S_ISUID)
    233 			a->a_v = vap->va_uid;
    234 		else
    235 			a->a_v = kauth_cred_geteuid(l->l_cred);
    236 		a++;
    237 
    238 		a->a_type = AT_RUID;
    239 		a->a_v = kauth_cred_getuid(l->l_cred);
    240 		a++;
    241 
    242 		a->a_type = AT_EGID;
    243 		if (vap->va_mode & S_ISGID)
    244 			a->a_v = vap->va_gid;
    245 		else
    246 			a->a_v = kauth_cred_getegid(l->l_cred);
    247 		a++;
    248 
    249 		a->a_type = AT_RGID;
    250 		a->a_v = kauth_cred_getgid(l->l_cred);
    251 		a++;
    252 
    253 		if (pack->ep_path) {
    254 			execname = a;
    255 			a->a_type = AT_SUN_EXECNAME;
    256 			a++;
    257 		}
    258 
    259 		free(ap, M_TEMP);
    260 		pack->ep_emul_arg = NULL;
    261 	}
    262 
    263 	a->a_type = AT_NULL;
    264 	a->a_v = 0;
    265 	a++;
    266 
    267 	vlen = (a - ai) * sizeof(AuxInfo);
    268 
    269 	if (execname) {
    270 		char *path = pack->ep_path;
    271 		execname->a_v = (uintptr_t)(*stackp + vlen);
    272 		len = strlen(path) + 1;
    273 		if ((error = copyout(path, (*stackp + vlen), len)) != 0)
    274 			return error;
    275 		len = ALIGN(len);
    276 	} else
    277 		len = 0;
    278 
    279 	if ((error = copyout(ai, *stackp, vlen)) != 0)
    280 		return error;
    281 	*stackp += vlen + len;
    282 
    283 	return 0;
    284 }
    285 
    286 /*
    287  * elf_check_header():
    288  *
    289  * Check header for validity; return 0 of ok ENOEXEC if error
    290  */
    291 int
    292 elf_check_header(Elf_Ehdr *eh, int type)
    293 {
    294 
    295 	if (memcmp(eh->e_ident, ELFMAG, SELFMAG) != 0 ||
    296 	    eh->e_ident[EI_CLASS] != ELFCLASS)
    297 		return ENOEXEC;
    298 
    299 	switch (eh->e_machine) {
    300 
    301 	ELFDEFNNAME(MACHDEP_ID_CASES)
    302 
    303 	default:
    304 		return ENOEXEC;
    305 	}
    306 
    307 	if (ELF_EHDR_FLAGS_OK(eh) == 0)
    308 		return ENOEXEC;
    309 
    310 	if (eh->e_type != type)
    311 		return ENOEXEC;
    312 
    313 	if (eh->e_shnum > MAXSHNUM || eh->e_phnum > MAXPHNUM)
    314 		return ENOEXEC;
    315 
    316 	return 0;
    317 }
    318 
    319 /*
    320  * elf_load_psection():
    321  *
    322  * Load a psection at the appropriate address
    323  */
    324 void
    325 elf_load_psection(struct exec_vmcmd_set *vcset, struct vnode *vp,
    326     const Elf_Phdr *ph, Elf_Addr *addr, u_long *size, int *prot, int flags)
    327 {
    328 	u_long msize, psize, rm, rf;
    329 	long diff, offset;
    330 
    331 	/*
    332 	 * If the user specified an address, then we load there.
    333 	 */
    334 	if (*addr == ELFDEFNNAME(NO_ADDR))
    335 		*addr = ph->p_vaddr;
    336 
    337 	if (ph->p_align > 1) {
    338 		/*
    339 		 * Make sure we are virtually aligned as we are supposed to be.
    340 		 */
    341 		diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align);
    342 		KASSERT(*addr - diff == ELF_TRUNC(*addr, ph->p_align));
    343 		/*
    344 		 * But make sure to not map any pages before the start of the
    345 		 * psection by limiting the difference to within a page.
    346 		 */
    347 		diff &= PAGE_MASK;
    348 	} else
    349 		diff = 0;
    350 
    351 	*prot |= (ph->p_flags & PF_R) ? VM_PROT_READ : 0;
    352 	*prot |= (ph->p_flags & PF_W) ? VM_PROT_WRITE : 0;
    353 	*prot |= (ph->p_flags & PF_X) ? VM_PROT_EXECUTE : 0;
    354 
    355 	/*
    356 	 * Adjust everything so it all starts on a page boundary.
    357 	 */
    358 	*addr -= diff;
    359 	offset = ph->p_offset - diff;
    360 	*size = ph->p_filesz + diff;
    361 	msize = ph->p_memsz + diff;
    362 
    363 	if (ph->p_align >= PAGE_SIZE) {
    364 		if ((ph->p_flags & PF_W) != 0) {
    365 			/*
    366 			 * Because the pagedvn pager can't handle zero fill
    367 			 * of the last data page if it's not page aligned we
    368 			 * map the last page readvn.
    369 			 */
    370 			psize = trunc_page(*size);
    371 		} else {
    372 			psize = round_page(*size);
    373 		}
    374 	} else {
    375 		psize = *size;
    376 	}
    377 
    378 	if (psize > 0) {
    379 		NEW_VMCMD2(vcset, ph->p_align < PAGE_SIZE ?
    380 		    vmcmd_map_readvn : vmcmd_map_pagedvn, psize, *addr, vp,
    381 		    offset, *prot, flags);
    382 		flags &= VMCMD_RELATIVE;
    383 	}
    384 	if (psize < *size) {
    385 		NEW_VMCMD2(vcset, vmcmd_map_readvn, *size - psize,
    386 		    *addr + psize, vp, offset + psize, *prot, flags);
    387 	}
    388 
    389 	/*
    390 	 * Check if we need to extend the size of the segment (does
    391 	 * bss extend page the next page boundary)?
    392 	 */
    393 	rm = round_page(*addr + msize);
    394 	rf = round_page(*addr + *size);
    395 
    396 	if (rm != rf) {
    397 		NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP,
    398 		    0, *prot, flags & VMCMD_RELATIVE);
    399 		*size = msize;
    400 	}
    401 }
    402 
    403 /*
    404  * elf_load_file():
    405  *
    406  * Load a file (interpreter/library) pointed to by path
    407  * [stolen from coff_load_shlib()]. Made slightly generic
    408  * so it might be used externally.
    409  */
    410 int
    411 elf_load_file(struct lwp *l, struct exec_package *epp, char *path,
    412     struct exec_vmcmd_set *vcset, u_long *entryoff, struct elf_args *ap,
    413     Elf_Addr *last)
    414 {
    415 	int error, i;
    416 	struct vnode *vp;
    417 	struct vattr attr;
    418 	Elf_Ehdr eh;
    419 	Elf_Phdr *ph = NULL;
    420 	const Elf_Phdr *ph0;
    421 	const Elf_Phdr *base_ph;
    422 	const Elf_Phdr *last_ph;
    423 	u_long phsize;
    424 	Elf_Addr addr = *last;
    425 	struct proc *p;
    426 
    427 	p = l->l_proc;
    428 
    429 	/*
    430 	 * 1. open file
    431 	 * 2. read filehdr
    432 	 * 3. map text, data, and bss out of it using VM_*
    433 	 */
    434 	vp = epp->ep_interp;
    435 	if (vp == NULL) {
    436 		error = emul_find_interp(l, epp, path);
    437 		if (error != 0)
    438 			return error;
    439 		vp = epp->ep_interp;
    440 	}
    441 	/* We'll tidy this ourselves - otherwise we have locking issues */
    442 	epp->ep_interp = NULL;
    443 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    444 
    445 	/*
    446 	 * Similarly, if it's not marked as executable, or it's not a regular
    447 	 * file, we don't allow it to be used.
    448 	 */
    449 	if (vp->v_type != VREG) {
    450 		error = EACCES;
    451 		goto badunlock;
    452 	}
    453 	if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred)) != 0)
    454 		goto badunlock;
    455 
    456 	/* get attributes */
    457 	if ((error = VOP_GETATTR(vp, &attr, l->l_cred)) != 0)
    458 		goto badunlock;
    459 
    460 	/*
    461 	 * Check mount point.  Though we're not trying to exec this binary,
    462 	 * we will be executing code from it, so if the mount point
    463 	 * disallows execution or set-id-ness, we punt or kill the set-id.
    464 	 */
    465 	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
    466 		error = EACCES;
    467 		goto badunlock;
    468 	}
    469 	if (vp->v_mount->mnt_flag & MNT_NOSUID)
    470 		epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID);
    471 
    472 #ifdef notyet /* XXX cgd 960926 */
    473 	XXX cgd 960926: (maybe) VOP_OPEN it (and VOP_CLOSE in copyargs?)
    474 #endif
    475 
    476 	error = vn_marktext(vp);
    477 	if (error)
    478 		goto badunlock;
    479 
    480 	VOP_UNLOCK(vp, 0);
    481 
    482 	if ((error = exec_read_from(l, vp, 0, &eh, sizeof(eh))) != 0)
    483 		goto bad;
    484 
    485 	if ((error = elf_check_header(&eh, ET_DYN)) != 0)
    486 		goto bad;
    487 
    488 	if (eh.e_phnum > MAXPHNUM || eh.e_phnum == 0) {
    489 		error = ENOEXEC;
    490 		goto bad;
    491 	}
    492 
    493 	phsize = eh.e_phnum * sizeof(Elf_Phdr);
    494 	ph = kmem_alloc(phsize, KM_SLEEP);
    495 
    496 	if ((error = exec_read_from(l, vp, eh.e_phoff, ph, phsize)) != 0)
    497 		goto bad;
    498 
    499 #ifdef ELF_INTERP_NON_RELOCATABLE
    500 	/*
    501 	 * Evil hack:  Only MIPS should be non-relocatable, and the
    502 	 * psections should have a high address (typically 0x5ffe0000).
    503 	 * If it's now relocatable, it should be linked at 0 and the
    504 	 * psections should have zeros in the upper part of the address.
    505 	 * Otherwise, force the load at the linked address.
    506 	 */
    507 	if (*last == ELF_LINK_ADDR && (ph->p_vaddr & 0xffff0000) == 0)
    508 		*last = ELFDEFNNAME(NO_ADDR);
    509 #endif
    510 
    511 	/*
    512 	 * If no position to load the interpreter was set by a probe
    513 	 * function, pick the same address that a non-fixed mmap(0, ..)
    514 	 * would (i.e. something safely out of the way).
    515 	 */
    516 	if (*last == ELFDEFNNAME(NO_ADDR)) {
    517 		u_long limit = 0;
    518 		/*
    519 		 * Find the start and ending addresses of the psections to
    520 		 * be loaded.  This will give us the size.
    521 		 */
    522 		for (i = 0, ph0 = ph, base_ph = NULL; i < eh.e_phnum;
    523 		     i++, ph0++) {
    524 			if (ph0->p_type == PT_LOAD) {
    525 				u_long psize = ph0->p_vaddr + ph0->p_memsz;
    526 				if (base_ph == NULL)
    527 					base_ph = ph0;
    528 				if (psize > limit)
    529 					limit = psize;
    530 			}
    531 		}
    532 
    533 		if (base_ph == NULL) {
    534 			error = ENOEXEC;
    535 			goto bad;
    536 		}
    537 
    538 		/*
    539 		 * Now compute the size and load address.
    540 		 */
    541 		addr = (*epp->ep_esch->es_emul->e_vm_default_addr)(p,
    542 		    epp->ep_daddr,
    543 		    round_page(limit) - trunc_page(base_ph->p_vaddr));
    544 	} else
    545 		addr = *last; /* may be ELF_LINK_ADDR */
    546 
    547 	/*
    548 	 * Load all the necessary sections
    549 	 */
    550 	for (i = 0, ph0 = ph, base_ph = NULL, last_ph = NULL;
    551 	     i < eh.e_phnum; i++, ph0++) {
    552 		switch (ph0->p_type) {
    553 		case PT_LOAD: {
    554 			u_long size;
    555 			int prot = 0;
    556 			int flags;
    557 
    558 			if (base_ph == NULL) {
    559 				/*
    560 				 * First encountered psection is always the
    561 				 * base psection.  Make sure it's aligned
    562 				 * properly (align down for topdown and align
    563 				 * upwards for not topdown).
    564 				 */
    565 				base_ph = ph0;
    566 				flags = VMCMD_BASE;
    567 				if (addr == ELF_LINK_ADDR)
    568 					addr = ph0->p_vaddr;
    569 				if (p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN)
    570 					addr = ELF_TRUNC(addr, ph0->p_align);
    571 				else
    572 					addr = ELF_ROUND(addr, ph0->p_align);
    573 			} else {
    574 				u_long limit = round_page(last_ph->p_vaddr
    575 				    + last_ph->p_memsz);
    576 				u_long base = trunc_page(ph0->p_vaddr);
    577 
    578 				/*
    579 				 * If there is a gap in between the psections,
    580 				 * map it as inaccessible so nothing else
    581 				 * mmap'ed will be placed there.
    582 				 */
    583 				if (limit != base) {
    584 					NEW_VMCMD2(vcset, vmcmd_map_zero,
    585 					    base - limit,
    586 					    limit - base_ph->p_vaddr, NULLVP,
    587 					    0, VM_PROT_NONE, VMCMD_RELATIVE);
    588 				}
    589 
    590 				addr = ph0->p_vaddr - base_ph->p_vaddr;
    591 				flags = VMCMD_RELATIVE;
    592 			}
    593 			last_ph = ph0;
    594 			elf_load_psection(vcset, vp, &ph[i], &addr,
    595 			    &size, &prot, flags);
    596 			/*
    597 			 * If entry is within this psection then this
    598 			 * must contain the .text section.  *entryoff is
    599 			 * relative to the base psection.
    600 			 */
    601 			if (eh.e_entry >= ph0->p_vaddr &&
    602 			    eh.e_entry < (ph0->p_vaddr + size)) {
    603 				*entryoff = eh.e_entry - base_ph->p_vaddr;
    604 			}
    605 			addr += size;
    606 			break;
    607 		}
    608 
    609 		case PT_DYNAMIC:
    610 		case PT_PHDR:
    611 			break;
    612 
    613 		case PT_NOTE:
    614 			break;
    615 
    616 		default:
    617 			break;
    618 		}
    619 	}
    620 
    621 	kmem_free(ph, phsize);
    622 	/*
    623 	 * This value is ignored if TOPDOWN.
    624 	 */
    625 	*last = addr;
    626 	vrele(vp);
    627 	return 0;
    628 
    629 badunlock:
    630 	VOP_UNLOCK(vp, 0);
    631 
    632 bad:
    633 	if (ph != NULL)
    634 		kmem_free(ph, phsize);
    635 #ifdef notyet /* XXX cgd 960926 */
    636 	(maybe) VOP_CLOSE it
    637 #endif
    638 	vrele(vp);
    639 	return error;
    640 }
    641 
    642 /*
    643  * exec_elf_makecmds(): Prepare an Elf binary's exec package
    644  *
    645  * First, set of the various offsets/lengths in the exec package.
    646  *
    647  * Then, mark the text image busy (so it can be demand paged) or error
    648  * out if this is not possible.  Finally, set up vmcmds for the
    649  * text, data, bss, and stack segments.
    650  */
    651 int
    652 exec_elf_makecmds(struct lwp *l, struct exec_package *epp)
    653 {
    654 	Elf_Ehdr *eh = epp->ep_hdr;
    655 	Elf_Phdr *ph, *pp;
    656 	Elf_Addr phdr = 0, pos = 0;
    657 	int error, i, nload;
    658 	char *interp = NULL;
    659 	u_long phsize;
    660 	struct proc *p;
    661 	bool is_dyn;
    662 
    663 	if (epp->ep_hdrvalid < sizeof(Elf_Ehdr))
    664 		return ENOEXEC;
    665 
    666 	is_dyn = elf_check_header(eh, ET_DYN) == 0;
    667 	/*
    668 	 * XXX allow for executing shared objects. It seems silly
    669 	 * but other ELF-based systems allow it as well.
    670 	 */
    671 	if (elf_check_header(eh, ET_EXEC) != 0 && !is_dyn)
    672 		return ENOEXEC;
    673 
    674 	if (eh->e_phnum > MAXPHNUM || eh->e_phnum == 0)
    675 		return ENOEXEC;
    676 
    677 	error = vn_marktext(epp->ep_vp);
    678 	if (error)
    679 		return error;
    680 
    681 	/*
    682 	 * Allocate space to hold all the program headers, and read them
    683 	 * from the file
    684 	 */
    685 	p = l->l_proc;
    686 	phsize = eh->e_phnum * sizeof(Elf_Phdr);
    687 	ph = kmem_alloc(phsize, KM_SLEEP);
    688 
    689 	if ((error = exec_read_from(l, epp->ep_vp, eh->e_phoff, ph, phsize)) !=
    690 	    0)
    691 		goto bad;
    692 
    693 	epp->ep_taddr = epp->ep_tsize = ELFDEFNNAME(NO_ADDR);
    694 	epp->ep_daddr = epp->ep_dsize = ELFDEFNNAME(NO_ADDR);
    695 
    696 	for (i = 0; i < eh->e_phnum; i++) {
    697 		pp = &ph[i];
    698 		if (pp->p_type == PT_INTERP) {
    699 			if (pp->p_filesz >= MAXPATHLEN) {
    700 				error = ENOEXEC;
    701 				goto bad;
    702 			}
    703 			interp = PNBUF_GET();
    704 			interp[0] = '\0';
    705 			if ((error = exec_read_from(l, epp->ep_vp,
    706 			    pp->p_offset, interp, pp->p_filesz)) != 0)
    707 				goto bad;
    708 			break;
    709 		}
    710 	}
    711 
    712 	/*
    713 	 * On the same architecture, we may be emulating different systems.
    714 	 * See which one will accept this executable.
    715 	 *
    716 	 * Probe functions would normally see if the interpreter (if any)
    717 	 * exists. Emulation packages may possibly replace the interpreter in
    718 	 * interp[] with a changed path (/emul/xxx/<path>).
    719 	 */
    720 	pos = ELFDEFNNAME(NO_ADDR);
    721 	if (epp->ep_esch->u.elf_probe_func) {
    722 		vaddr_t startp = (vaddr_t)pos;
    723 
    724 		error = (*epp->ep_esch->u.elf_probe_func)(l, epp, eh, interp,
    725 							  &startp);
    726 		if (error)
    727 			goto bad;
    728 		pos = (Elf_Addr)startp;
    729 	}
    730 
    731 #if defined(PAX_MPROTECT) || defined(PAX_SEGVGUARD) || defined(PAX_ASLR)
    732 	p->p_pax = epp->ep_pax_flags;
    733 #endif /* PAX_MPROTECT || PAX_SEGVGUARD || PAX_ASLR */
    734 
    735 	if (is_dyn)
    736 #ifdef PAX_ASLR
    737 		pax_aslr_elf(l, epp, eh, ph);
    738 #else
    739 		elf_placedynexec(eh, ph);
    740 #endif /* PAX_ASLR */
    741 
    742 	/*
    743 	 * Load all the necessary sections
    744 	 */
    745 	for (i = nload = 0; i < eh->e_phnum; i++) {
    746 		Elf_Addr  addr = ELFDEFNNAME(NO_ADDR);
    747 		u_long size = 0;
    748 		int prot = 0;
    749 
    750 		pp = &ph[i];
    751 
    752 		switch (ph[i].p_type) {
    753 		case PT_LOAD:
    754 			/*
    755 			 * XXX
    756 			 * Can handle only 2 sections: text and data
    757 			 */
    758 			if (nload++ == 2) {
    759 				error = ENOEXEC;
    760 				goto bad;
    761 			}
    762 			elf_load_psection(&epp->ep_vmcmds, epp->ep_vp,
    763 			    &ph[i], &addr, &size, &prot, VMCMD_FIXED);
    764 
    765 			/*
    766 			 * Decide whether it's text or data by looking
    767 			 * at the entry point.
    768 			 */
    769 			if (eh->e_entry >= addr &&
    770 			    eh->e_entry < (addr + size)) {
    771 				epp->ep_taddr = addr;
    772 				epp->ep_tsize = size;
    773 				if (epp->ep_daddr == ELFDEFNNAME(NO_ADDR)) {
    774 					epp->ep_daddr = addr;
    775 					epp->ep_dsize = size;
    776 				}
    777 			} else {
    778 				epp->ep_daddr = addr;
    779 				epp->ep_dsize = size;
    780 			}
    781 			break;
    782 
    783 		case PT_SHLIB:
    784 			/* SCO has these sections. */
    785 		case PT_INTERP:
    786 			/* Already did this one. */
    787 		case PT_DYNAMIC:
    788 			break;
    789 		case PT_NOTE:
    790 			break;
    791 		case PT_PHDR:
    792 			/* Note address of program headers (in text segment) */
    793 			phdr = pp->p_vaddr;
    794 			break;
    795 
    796 		default:
    797 			/*
    798 			 * Not fatal; we don't need to understand everything.
    799 			 */
    800 			break;
    801 		}
    802 	}
    803 
    804 	/*
    805 	 * Check if we found a dynamically linked binary and arrange to load
    806 	 * its interpreter
    807 	 */
    808 	if (interp) {
    809 		struct elf_args *ap;
    810 		int j = epp->ep_vmcmds.evs_used;
    811 		u_long interp_offset;
    812 
    813 		ap = (struct elf_args *)malloc(sizeof(struct elf_args),
    814 		    M_TEMP, M_WAITOK);
    815 		if ((error = elf_load_file(l, epp, interp,
    816 		    &epp->ep_vmcmds, &interp_offset, ap, &pos)) != 0) {
    817 			free(ap, M_TEMP);
    818 			goto bad;
    819 		}
    820 		ap->arg_interp = epp->ep_vmcmds.evs_cmds[j].ev_addr;
    821 		epp->ep_entry = ap->arg_interp + interp_offset;
    822 		ap->arg_phaddr = phdr;
    823 
    824 		ap->arg_phentsize = eh->e_phentsize;
    825 		ap->arg_phnum = eh->e_phnum;
    826 		ap->arg_entry = eh->e_entry;
    827 
    828 		epp->ep_emul_arg = ap;
    829 
    830 		PNBUF_PUT(interp);
    831 	} else
    832 		epp->ep_entry = eh->e_entry;
    833 
    834 #ifdef ELF_MAP_PAGE_ZERO
    835 	/* Dell SVR4 maps page zero, yeuch! */
    836 	NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, PAGE_SIZE, 0,
    837 	    epp->ep_vp, 0, VM_PROT_READ);
    838 #endif
    839 	kmem_free(ph, phsize);
    840 	return (*epp->ep_esch->es_setup_stack)(l, epp);
    841 
    842 bad:
    843 	if (interp)
    844 		PNBUF_PUT(interp);
    845 	kmem_free(ph, phsize);
    846 	kill_vmcmds(&epp->ep_vmcmds);
    847 	return error;
    848 }
    849 
    850 int
    851 netbsd_elf_signature(struct lwp *l, struct exec_package *epp,
    852     Elf_Ehdr *eh)
    853 {
    854 	size_t i;
    855 	Elf_Shdr *sh;
    856 	Elf_Nhdr *np;
    857 	size_t shsize;
    858 	int error;
    859 	int isnetbsd = 0;
    860 	char *ndata;
    861 
    862 	epp->ep_pax_flags = 0;
    863 	if (eh->e_shnum > MAXSHNUM || eh->e_shnum == 0)
    864 		return ENOEXEC;
    865 
    866 	shsize = eh->e_shnum * sizeof(Elf_Shdr);
    867 	sh = kmem_alloc(shsize, KM_SLEEP);
    868 	error = exec_read_from(l, epp->ep_vp, eh->e_shoff, sh, shsize);
    869 	if (error)
    870 		goto out;
    871 
    872 	np = kmem_alloc(MAXNOTESIZE, KM_SLEEP);
    873 	for (i = 0; i < eh->e_shnum; i++) {
    874 		Elf_Shdr *shp = &sh[i];
    875 
    876 		if (shp->sh_type != SHT_NOTE ||
    877 		    shp->sh_size > MAXNOTESIZE ||
    878 		    shp->sh_size < sizeof(Elf_Nhdr) + ELF_NOTE_NETBSD_NAMESZ)
    879 			continue;
    880 
    881 		error = exec_read_from(l, epp->ep_vp, shp->sh_offset, np,
    882 		    shp->sh_size);
    883 		if (error)
    884 			continue;
    885 
    886 		ndata = (char *)(np + 1);
    887 		switch (np->n_type) {
    888 		case ELF_NOTE_TYPE_NETBSD_TAG:
    889 			if (np->n_namesz != ELF_NOTE_NETBSD_NAMESZ ||
    890 			    np->n_descsz != ELF_NOTE_NETBSD_DESCSZ ||
    891 			    memcmp(ndata, ELF_NOTE_NETBSD_NAME,
    892 			    ELF_NOTE_NETBSD_NAMESZ))
    893 				goto bad;
    894 			isnetbsd = 1;
    895 			break;
    896 
    897 		case ELF_NOTE_TYPE_PAX_TAG:
    898 			if (np->n_namesz != ELF_NOTE_PAX_NAMESZ ||
    899 			    np->n_descsz != ELF_NOTE_PAX_DESCSZ ||
    900 			    memcmp(ndata, ELF_NOTE_PAX_NAME,
    901 			    ELF_NOTE_PAX_NAMESZ)) {
    902 bad:
    903 #ifdef DIAGNOSTIC
    904 				printf("%s: bad tag %d: "
    905 				    "[%d %d, %d %d, %*.*s %*.*s]\n",
    906 				    epp->ep_name,
    907 				    np->n_type
    908 				    np->n_namesz, ELF_NOTE_PAX_NAMESZ,
    909 				    np->n_descsz, ELF_NOTE_PAX_DESCSZ,
    910 				    ELF_NOTE_PAX_NAMESZ,
    911 				    ELF_NOTE_PAX_NAMESZ,
    912 				    ndata,
    913 				    ELF_NOTE_PAX_NAMESZ,
    914 				    ELF_NOTE_PAX_NAMESZ,
    915 				    ELF_NOTE_PAX_NAME);
    916 #endif
    917 				continue;
    918 			}
    919 			(void)memcpy(&epp->ep_pax_flags,
    920 			    ndata + ELF_NOTE_PAX_NAMESZ,
    921 			    sizeof(epp->ep_pax_flags));
    922 			break;
    923 
    924 		default:
    925 #ifdef DIAGNOSTIC
    926 			printf("%s: unknown note type %d\n", epp->ep_name,
    927 			    np->n_type);
    928 #endif
    929 			break;
    930 		}
    931 	}
    932 	kmem_free(np, MAXNOTESIZE);
    933 
    934 	error = isnetbsd ? 0 : ENOEXEC;
    935 out:
    936 	kmem_free(sh, shsize);
    937 	return error;
    938 }
    939 
    940 int
    941 netbsd_elf_probe(struct lwp *l, struct exec_package *epp, void *eh, char *itp,
    942     vaddr_t *pos)
    943 {
    944 	int error;
    945 
    946 	if ((error = netbsd_elf_signature(l, epp, eh)) != 0)
    947 		return error;
    948 #ifdef ELF_MD_PROBE_FUNC
    949 	if ((error = ELF_MD_PROBE_FUNC(l, epp, eh, itp, pos)) != 0)
    950 		return error;
    951 #elif defined(ELF_INTERP_NON_RELOCATABLE)
    952 	*pos = ELF_LINK_ADDR;
    953 #endif
    954 	return 0;
    955 }
    956