exec_elf32.c revision 1.123
11.123Sdsl/*	$NetBSD: exec_elf32.c,v 1.123 2007/04/22 08:30:00 dsl Exp $	*/
21.36Schristos
31.36Schristos/*-
41.102Smycroft * Copyright (c) 1994, 2000, 2005 The NetBSD Foundation, Inc.
51.36Schristos * All rights reserved.
61.36Schristos *
71.36Schristos * This code is derived from software contributed to The NetBSD Foundation
81.36Schristos * by Christos Zoulas.
91.36Schristos *
101.36Schristos * Redistribution and use in source and binary forms, with or without
111.36Schristos * modification, are permitted provided that the following conditions
121.36Schristos * are met:
131.36Schristos * 1. Redistributions of source code must retain the above copyright
141.36Schristos *    notice, this list of conditions and the following disclaimer.
151.36Schristos * 2. Redistributions in binary form must reproduce the above copyright
161.36Schristos *    notice, this list of conditions and the following disclaimer in the
171.36Schristos *    documentation and/or other materials provided with the distribution.
181.36Schristos * 3. All advertising materials mentioning features or use of this software
191.36Schristos *    must display the following acknowledgement:
201.37Schristos *	This product includes software developed by the NetBSD
211.37Schristos *	Foundation, Inc. and its contributors.
221.36Schristos * 4. Neither the name of The NetBSD Foundation nor the names of its
231.36Schristos *    contributors may be used to endorse or promote products derived
241.36Schristos *    from this software without specific prior written permission.
251.36Schristos *
261.36Schristos * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
271.36Schristos * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
281.36Schristos * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
291.36Schristos * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
301.36Schristos * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
311.36Schristos * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
321.36Schristos * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
331.36Schristos * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
341.36Schristos * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
351.36Schristos * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
361.36Schristos * POSSIBILITY OF SUCH DAMAGE.
371.36Schristos */
381.1Sfvdl
391.1Sfvdl/*
401.9Scgd * Copyright (c) 1996 Christopher G. Demetriou
411.1Sfvdl * All rights reserved.
421.1Sfvdl *
431.1Sfvdl * Redistribution and use in source and binary forms, with or without
441.1Sfvdl * modification, are permitted provided that the following conditions
451.1Sfvdl * are met:
461.1Sfvdl * 1. Redistributions of source code must retain the above copyright
471.1Sfvdl *    notice, this list of conditions and the following disclaimer.
481.1Sfvdl * 2. Redistributions in binary form must reproduce the above copyright
491.1Sfvdl *    notice, this list of conditions and the following disclaimer in the
501.1Sfvdl *    documentation and/or other materials provided with the distribution.
511.1Sfvdl * 3. The name of the author may not be used to endorse or promote products
521.1Sfvdl *    derived from this software without specific prior written permission
531.1Sfvdl *
541.1Sfvdl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
551.1Sfvdl * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
561.1Sfvdl * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
571.1Sfvdl * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
581.1Sfvdl * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
591.1Sfvdl * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
601.1Sfvdl * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
611.1Sfvdl * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
621.1Sfvdl * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
631.1Sfvdl * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
641.1Sfvdl */
651.69Slukem
661.69Slukem#include <sys/cdefs.h>
671.123Sdsl__KERNEL_RCSID(1, "$NetBSD: exec_elf32.c,v 1.123 2007/04/22 08:30:00 dsl Exp $");
681.1Sfvdl
691.9Scgd/* If not included by exec_elf64.c, ELFSIZE won't be defined. */
701.9Scgd#ifndef ELFSIZE
711.9Scgd#define	ELFSIZE		32
721.9Scgd#endif
731.30Sthorpej
741.114Selad#ifdef _KERNEL_OPT
751.113Selad#include "opt_pax.h"
761.114Selad#endif /* _KERNEL_OPT */
771.113Selad
781.1Sfvdl#include <sys/param.h>
791.1Sfvdl#include <sys/proc.h>
801.1Sfvdl#include <sys/malloc.h>
811.1Sfvdl#include <sys/namei.h>
821.1Sfvdl#include <sys/vnode.h>
831.1Sfvdl#include <sys/exec.h>
841.1Sfvdl#include <sys/exec_elf.h>
851.8Schristos#include <sys/syscall.h>
861.8Schristos#include <sys/signalvar.h>
871.14Scgd#include <sys/mount.h>
881.14Scgd#include <sys/stat.h>
891.112Selad#include <sys/kauth.h>
901.1Sfvdl
911.58Sjdolecek#include <machine/cpu.h>
921.58Sjdolecek#include <machine/reg.h>
931.57Sthorpej
941.123Sdsl#include <compat/common/compat_util.h>
951.123Sdsl
961.119Selad#if defined(PAX_MPROTECT) || defined(PAX_SEGVGUARD)
971.113Selad#include <sys/pax.h>
981.119Selad#endif /* PAX_MPROTECT || PAX_SEGVGUARD */
991.113Selad
1001.58Sjdolecekextern const struct emul emul_netbsd;
1011.42Schristos
1021.105Sjunyoung#define elf_check_header	ELFNAME(check_header)
1031.105Sjunyoung#define elf_copyargs		ELFNAME(copyargs)
1041.105Sjunyoung#define elf_load_file		ELFNAME(load_file)
1051.105Sjunyoung#define elf_load_psection	ELFNAME(load_psection)
1061.105Sjunyoung#define exec_elf_makecmds	ELFNAME2(exec,makecmds)
1071.105Sjunyoung#define netbsd_elf_signature	ELFNAME2(netbsd,signature)
1081.105Sjunyoung#define netbsd_elf_probe	ELFNAME2(netbsd,probe)
1091.105Sjunyoung
1101.108Schristosint	elf_load_file(struct lwp *, struct exec_package *, char *,
1111.54Sthorpej	    struct exec_vmcmd_set *, u_long *, struct elf_args *, Elf_Addr *);
1121.105Sjunyoungvoid	elf_load_psection(struct exec_vmcmd_set *, struct vnode *,
1131.54Sthorpej	    const Elf_Phdr *, Elf_Addr *, u_long *, int *, int);
1141.54Sthorpej
1151.108Schristosint	netbsd_elf_signature(struct lwp *, struct exec_package *, Elf_Ehdr *);
1161.108Schristosint	netbsd_elf_probe(struct lwp *, struct exec_package *, void *, char *,
1171.105Sjunyoung	    vaddr_t *);
1181.8Schristos
1191.18Scgd/* round up and down to page boundaries. */
1201.18Scgd#define	ELF_ROUND(a, b)		(((a) + (b) - 1) & ~((b) - 1))
1211.18Scgd#define	ELF_TRUNC(a, b)		((a) & ~((b) - 1))
1221.8Schristos
1231.90Schristos#define MAXPHNUM	50
1241.90Schristos
1251.8Schristos/*
1261.1Sfvdl * Copy arguments onto the stack in the normal way, but add some
1271.1Sfvdl * extra information in case of dynamic binding.
1281.1Sfvdl */
1291.66Schristosint
1301.108Schristoself_copyargs(struct lwp *l, struct exec_package *pack,
1311.72Schristos    struct ps_strings *arginfo, char **stackp, void *argp)
1321.1Sfvdl{
1331.1Sfvdl	size_t len;
1341.4Sfvdl	AuxInfo ai[ELF_AUX_ENTRIES], *a;
1351.1Sfvdl	struct elf_args *ap;
1361.66Schristos	int error;
1371.1Sfvdl
1381.108Schristos	if ((error = copyargs(l, pack, arginfo, stackp, argp)) != 0)
1391.66Schristos		return error;
1401.1Sfvdl
1411.22Scgd	a = ai;
1421.22Scgd
1431.1Sfvdl	/*
1441.1Sfvdl	 * Push extra arguments on the stack needed by dynamically
1451.1Sfvdl	 * linked binaries
1461.1Sfvdl	 */
1471.17Scgd	if ((ap = (struct elf_args *)pack->ep_emul_arg)) {
1481.77Sjdolecek		struct vattr *vap = pack->ep_vap;
1491.1Sfvdl
1501.46Skleink		a->a_type = AT_PHDR;
1511.46Skleink		a->a_v = ap->arg_phaddr;
1521.1Sfvdl		a++;
1531.1Sfvdl
1541.46Skleink		a->a_type = AT_PHENT;
1551.46Skleink		a->a_v = ap->arg_phentsize;
1561.1Sfvdl		a++;
1571.1Sfvdl
1581.46Skleink		a->a_type = AT_PHNUM;
1591.46Skleink		a->a_v = ap->arg_phnum;
1601.1Sfvdl		a++;
1611.1Sfvdl
1621.46Skleink		a->a_type = AT_PAGESZ;
1631.57Sthorpej		a->a_v = PAGE_SIZE;
1641.1Sfvdl		a++;
1651.1Sfvdl
1661.46Skleink		a->a_type = AT_BASE;
1671.46Skleink		a->a_v = ap->arg_interp;
1681.1Sfvdl		a++;
1691.1Sfvdl
1701.46Skleink		a->a_type = AT_FLAGS;
1711.46Skleink		a->a_v = 0;
1721.1Sfvdl		a++;
1731.1Sfvdl
1741.46Skleink		a->a_type = AT_ENTRY;
1751.46Skleink		a->a_v = ap->arg_entry;
1761.72Schristos		a++;
1771.72Schristos
1781.72Schristos		a->a_type = AT_EUID;
1791.77Sjdolecek		if (vap->va_mode & S_ISUID)
1801.77Sjdolecek			a->a_v = vap->va_uid;
1811.77Sjdolecek		else
1821.115Sad			a->a_v = kauth_cred_geteuid(l->l_cred);
1831.72Schristos		a++;
1841.72Schristos
1851.72Schristos		a->a_type = AT_RUID;
1861.115Sad		a->a_v = kauth_cred_getuid(l->l_cred);
1871.72Schristos		a++;
1881.72Schristos
1891.72Schristos		a->a_type = AT_EGID;
1901.77Sjdolecek		if (vap->va_mode & S_ISGID)
1911.77Sjdolecek			a->a_v = vap->va_gid;
1921.77Sjdolecek		else
1931.115Sad			a->a_v = kauth_cred_getegid(l->l_cred);
1941.72Schristos		a++;
1951.72Schristos
1961.72Schristos		a->a_type = AT_RGID;
1971.115Sad		a->a_v = kauth_cred_getgid(l->l_cred);
1981.1Sfvdl		a++;
1991.1Sfvdl
2001.76Schs		free(ap, M_TEMP);
2011.9Scgd		pack->ep_emul_arg = NULL;
2021.1Sfvdl	}
2031.22Scgd
2041.46Skleink	a->a_type = AT_NULL;
2051.46Skleink	a->a_v = 0;
2061.22Scgd	a++;
2071.22Scgd
2081.34Sperry	len = (a - ai) * sizeof(AuxInfo);
2091.66Schristos	if ((error = copyout(ai, *stackp, len)) != 0)
2101.66Schristos		return error;
2111.67Schristos	*stackp += len;
2121.22Scgd
2131.66Schristos	return 0;
2141.1Sfvdl}
2151.1Sfvdl
2161.1Sfvdl/*
2171.1Sfvdl * elf_check_header():
2181.1Sfvdl *
2191.1Sfvdl * Check header for validity; return 0 of ok ENOEXEC if error
2201.1Sfvdl */
2211.17Scgdint
2221.105Sjunyoungelf_check_header(Elf_Ehdr *eh, int type)
2231.1Sfvdl{
2241.3Sthorpej
2251.46Skleink	if (memcmp(eh->e_ident, ELFMAG, SELFMAG) != 0 ||
2261.46Skleink	    eh->e_ident[EI_CLASS] != ELFCLASS)
2271.106Sjunyoung		return ENOEXEC;
2281.1Sfvdl
2291.1Sfvdl	switch (eh->e_machine) {
2301.9Scgd
2311.10Scgd	ELFDEFNNAME(MACHDEP_ID_CASES)
2321.1Sfvdl
2331.1Sfvdl	default:
2341.106Sjunyoung		return ENOEXEC;
2351.1Sfvdl	}
2361.70Sthorpej
2371.70Sthorpej	if (ELF_EHDR_FLAGS_OK(eh) == 0)
2381.106Sjunyoung		return ENOEXEC;
2391.1Sfvdl
2401.1Sfvdl	if (eh->e_type != type)
2411.106Sjunyoung		return ENOEXEC;
2421.61Smycroft
2431.100Schristos	if (eh->e_shnum > 32768 || eh->e_phnum > 128)
2441.106Sjunyoung		return ENOEXEC;
2451.1Sfvdl
2461.106Sjunyoung	return 0;
2471.1Sfvdl}
2481.1Sfvdl
2491.1Sfvdl/*
2501.1Sfvdl * elf_load_psection():
2511.17Scgd *
2521.1Sfvdl * Load a psection at the appropriate address
2531.1Sfvdl */
2541.17Scgdvoid
2551.105Sjunyoungelf_load_psection(struct exec_vmcmd_set *vcset, struct vnode *vp,
2561.54Sthorpej    const Elf_Phdr *ph, Elf_Addr *addr, u_long *size, int *prot, int flags)
2571.1Sfvdl{
2581.87Smatt	u_long msize, psize, rm, rf;
2591.1Sfvdl	long diff, offset;
2601.1Sfvdl
2611.1Sfvdl	/*
2621.17Scgd	 * If the user specified an address, then we load there.
2631.17Scgd	 */
2641.87Smatt	if (*addr == ELFDEFNNAME(NO_ADDR))
2651.87Smatt		*addr = ph->p_vaddr;
2661.87Smatt
2671.87Smatt	if (ph->p_align > 1) {
2681.87Smatt		/*
2691.87Smatt		 * Make sure we are virtually aligned as we are supposed to be.
2701.87Smatt		 */
2711.87Smatt		diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align);
2721.87Smatt		KASSERT(*addr - diff == ELF_TRUNC(*addr, ph->p_align));
2731.87Smatt		/*
2741.87Smatt		 * But make sure to not map any pages before the start of the
2751.87Smatt		 * psection by limiting the difference to within a page.
2761.87Smatt		 */
2771.101Sperry		diff &= PAGE_MASK;
2781.87Smatt	} else
2791.87Smatt		diff = 0;
2801.1Sfvdl
2811.46Skleink	*prot |= (ph->p_flags & PF_R) ? VM_PROT_READ : 0;
2821.46Skleink	*prot |= (ph->p_flags & PF_W) ? VM_PROT_WRITE : 0;
2831.46Skleink	*prot |= (ph->p_flags & PF_X) ? VM_PROT_EXECUTE : 0;
2841.1Sfvdl
2851.87Smatt	/*
2861.87Smatt	 * Adjust everything so it all starts on a page boundary.
2871.87Smatt	 */
2881.87Smatt	*addr -= diff;
2891.1Sfvdl	offset = ph->p_offset - diff;
2901.1Sfvdl	*size = ph->p_filesz + diff;
2911.1Sfvdl	msize = ph->p_memsz + diff;
2921.1Sfvdl
2931.65Schristos	if (ph->p_align >= PAGE_SIZE) {
2941.65Schristos		if ((ph->p_flags & PF_W) != 0) {
2951.65Schristos			/*
2961.65Schristos			 * Because the pagedvn pager can't handle zero fill
2971.65Schristos			 * of the last data page if it's not page aligned we
2981.65Schristos			 * map the last page readvn.
2991.65Schristos			 */
3001.65Schristos			psize = trunc_page(*size);
3011.65Schristos		} else {
3021.65Schristos			psize = round_page(*size);
3031.65Schristos		}
3041.65Schristos	} else {
3051.65Schristos		psize = *size;
3061.53Smatt	}
3071.65Schristos
3081.53Smatt	if (psize > 0) {
3091.65Schristos		NEW_VMCMD2(vcset, ph->p_align < PAGE_SIZE ?
3101.65Schristos		    vmcmd_map_readvn : vmcmd_map_pagedvn, psize, *addr, vp,
3111.53Smatt		    offset, *prot, flags);
3121.87Smatt		flags &= VMCMD_RELATIVE;
3131.53Smatt	}
3141.53Smatt	if (psize < *size) {
3151.53Smatt		NEW_VMCMD2(vcset, vmcmd_map_readvn, *size - psize,
3161.87Smatt		    *addr + psize, vp, offset + psize, *prot, flags);
3171.53Smatt	}
3181.1Sfvdl
3191.1Sfvdl	/*
3201.87Smatt	 * Check if we need to extend the size of the segment (does
3211.87Smatt	 * bss extend page the next page boundary)?
3221.17Scgd	 */
3231.1Sfvdl	rm = round_page(*addr + msize);
3241.1Sfvdl	rf = round_page(*addr + *size);
3251.1Sfvdl
3261.1Sfvdl	if (rm != rf) {
3271.53Smatt		NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP,
3281.83Smatt		    0, *prot, flags & VMCMD_RELATIVE);
3291.1Sfvdl		*size = msize;
3301.1Sfvdl	}
3311.1Sfvdl}
3321.1Sfvdl
3331.1Sfvdl/*
3341.1Sfvdl * elf_load_file():
3351.1Sfvdl *
3361.1Sfvdl * Load a file (interpreter/library) pointed to by path
3371.1Sfvdl * [stolen from coff_load_shlib()]. Made slightly generic
3381.1Sfvdl * so it might be used externally.
3391.1Sfvdl */
3401.17Scgdint
3411.108Schristoself_load_file(struct lwp *l, struct exec_package *epp, char *path,
3421.117Syamt    struct exec_vmcmd_set *vcset, u_long *entryoff, struct elf_args *ap,
3431.117Syamt    Elf_Addr *last)
3441.1Sfvdl{
3451.83Smatt	int error, i;
3461.14Scgd	struct vnode *vp;
3471.14Scgd	struct vattr attr;
3481.9Scgd	Elf_Ehdr eh;
3491.9Scgd	Elf_Phdr *ph = NULL;
3501.83Smatt	const Elf_Phdr *ph0;
3511.83Smatt	const Elf_Phdr *base_ph;
3521.83Smatt	const Elf_Phdr *last_ph;
3531.1Sfvdl	u_long phsize;
3541.9Scgd	Elf_Addr addr = *last;
3551.108Schristos	struct proc *p;
3561.108Schristos
3571.108Schristos	p = l->l_proc;
3581.1Sfvdl
3591.1Sfvdl	/*
3601.17Scgd	 * 1. open file
3611.17Scgd	 * 2. read filehdr
3621.17Scgd	 * 3. map text, data, and bss out of it using VM_*
3631.17Scgd	 */
3641.123Sdsl	vp = epp->ep_interp;
3651.123Sdsl	if (vp == NULL) {
3661.123Sdsl		error = emul_find_interp(l, epp, path);
3671.123Sdsl		if (error != 0)
3681.123Sdsl			return error;
3691.123Sdsl		vp = epp->ep_interp;
3701.123Sdsl	}
3711.123Sdsl	/* We'll tidy this ourselves - otherwise we have locking issues */
3721.123Sdsl	epp->ep_interp = NULL;
3731.123Sdsl	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3741.14Scgd
3751.26Smycroft	/*
3761.26Smycroft	 * Similarly, if it's not marked as executable, or it's not a regular
3771.26Smycroft	 * file, we don't allow it to be used.
3781.26Smycroft	 */
3791.14Scgd	if (vp->v_type != VREG) {
3801.14Scgd		error = EACCES;
3811.14Scgd		goto badunlock;
3821.14Scgd	}
3831.115Sad	if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred, l)) != 0)
3841.26Smycroft		goto badunlock;
3851.14Scgd
3861.17Scgd	/* get attributes */
3871.115Sad	if ((error = VOP_GETATTR(vp, &attr, l->l_cred, l)) != 0)
3881.14Scgd		goto badunlock;
3891.14Scgd
3901.14Scgd	/*
3911.14Scgd	 * Check mount point.  Though we're not trying to exec this binary,
3921.15Scgd	 * we will be executing code from it, so if the mount point
3931.15Scgd	 * disallows execution or set-id-ness, we punt or kill the set-id.
3941.14Scgd	 */
3951.14Scgd	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
3961.14Scgd		error = EACCES;
3971.14Scgd		goto badunlock;
3981.14Scgd	}
3991.14Scgd	if (vp->v_mount->mnt_flag & MNT_NOSUID)
4001.24Smycroft		epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID);
4011.14Scgd
4021.12Scgd#ifdef notyet /* XXX cgd 960926 */
4031.12Scgd	XXX cgd 960926: (maybe) VOP_OPEN it (and VOP_CLOSE in copyargs?)
4041.12Scgd#endif
4051.76Schs
4061.76Schs	error = vn_marktext(vp);
4071.76Schs	if (error)
4081.76Schs		goto badunlock;
4091.76Schs
4101.28Sfvdl	VOP_UNLOCK(vp, 0);
4111.12Scgd
4121.108Schristos	if ((error = exec_read_from(l, vp, 0, &eh, sizeof(eh))) != 0)
4131.1Sfvdl		goto bad;
4141.1Sfvdl
4151.105Sjunyoung	if ((error = elf_check_header(&eh, ET_DYN)) != 0)
4161.1Sfvdl		goto bad;
4171.1Sfvdl
4181.90Schristos	if (eh.e_phnum > MAXPHNUM)
4191.90Schristos		goto bad;
4201.90Schristos
4211.9Scgd	phsize = eh.e_phnum * sizeof(Elf_Phdr);
4221.17Scgd	ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK);
4231.1Sfvdl
4241.108Schristos	if ((error = exec_read_from(l, vp, eh.e_phoff, ph, phsize)) != 0)
4251.1Sfvdl		goto bad;
4261.1Sfvdl
4271.107Ssimonb#ifdef ELF_INTERP_NON_RELOCATABLE
4281.107Ssimonb	/*
4291.107Ssimonb	 * Evil hack:  Only MIPS should be non-relocatable, and the
4301.107Ssimonb	 * psections should have a high address (typically 0x5ffe0000).
4311.107Ssimonb	 * If it's now relocatable, it should be linked at 0 and the
4321.107Ssimonb	 * psections should have zeros in the upper part of the address.
4331.107Ssimonb	 * Otherwise, force the load at the linked address.
4341.107Ssimonb	 */
4351.107Ssimonb	if (*last == ELF_LINK_ADDR && (ph->p_vaddr & 0xffff0000) == 0)
4361.107Ssimonb		*last = ELFDEFNNAME(NO_ADDR);
4371.107Ssimonb#endif
4381.107Ssimonb
4391.1Sfvdl	/*
4401.83Smatt	 * If no position to load the interpreter was set by a probe
4411.83Smatt	 * function, pick the same address that a non-fixed mmap(0, ..)
4421.83Smatt	 * would (i.e. something safely out of the way).
4431.83Smatt	 */
4441.83Smatt	if (*last == ELFDEFNNAME(NO_ADDR)) {
4451.83Smatt		u_long limit = 0;
4461.83Smatt		/*
4471.83Smatt		 * Find the start and ending addresses of the psections to
4481.83Smatt		 * be loaded.  This will give us the size.
4491.83Smatt		 */
4501.83Smatt		for (i = 0, ph0 = ph, base_ph = NULL; i < eh.e_phnum;
4511.83Smatt		     i++, ph0++) {
4521.83Smatt			if (ph0->p_type == PT_LOAD) {
4531.83Smatt				u_long psize = ph0->p_vaddr + ph0->p_memsz;
4541.83Smatt				if (base_ph == NULL)
4551.83Smatt					base_ph = ph0;
4561.83Smatt				if (psize > limit)
4571.83Smatt					limit = psize;
4581.83Smatt			}
4591.83Smatt		}
4601.83Smatt
4611.111Sskrll		if (base_ph == NULL) {
4621.110Serh			error = ENOEXEC;
4631.110Serh			goto bad;
4641.110Serh		}
4651.110Serh
4661.83Smatt		/*
4671.83Smatt		 * Now compute the size and load address.
4681.83Smatt		 */
4691.105Sjunyoung		addr = (*epp->ep_esch->es_emul->e_vm_default_addr)(p,
4701.103Sfvdl		    epp->ep_daddr,
4711.83Smatt		    round_page(limit) - trunc_page(base_ph->p_vaddr));
4721.83Smatt	} else
4731.95Sdrochner		addr = *last; /* may be ELF_LINK_ADDR */
4741.83Smatt
4751.83Smatt	/*
4761.83Smatt	 * Load all the necessary sections
4771.83Smatt	 */
4781.83Smatt	for (i = 0, ph0 = ph, base_ph = NULL, last_ph = NULL;
4791.83Smatt	     i < eh.e_phnum; i++, ph0++) {
4801.83Smatt		switch (ph0->p_type) {
4811.83Smatt		case PT_LOAD: {
4821.83Smatt			u_long size;
4831.83Smatt			int prot = 0;
4841.83Smatt			int flags;
4851.1Sfvdl
4861.53Smatt			if (base_ph == NULL) {
4871.82Smatt				/*
4881.82Smatt				 * First encountered psection is always the
4891.88Smatt				 * base psection.  Make sure it's aligned
4901.89Smatt				 * properly (align down for topdown and align
4911.89Smatt				 * upwards for not topdown).
4921.82Smatt				 */
4931.83Smatt				base_ph = ph0;
4941.53Smatt				flags = VMCMD_BASE;
4951.95Sdrochner				if (addr == ELF_LINK_ADDR)
4961.95Sdrochner					addr = ph0->p_vaddr;
4971.89Smatt				if (p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN)
4981.89Smatt					addr = ELF_TRUNC(addr, ph0->p_align);
4991.89Smatt				else
5001.89Smatt					addr = ELF_ROUND(addr, ph0->p_align);
5011.53Smatt			} else {
5021.83Smatt				u_long limit = round_page(last_ph->p_vaddr
5031.83Smatt				    + last_ph->p_memsz);
5041.87Smatt				u_long base = trunc_page(ph0->p_vaddr);
5051.83Smatt
5061.82Smatt				/*
5071.83Smatt				 * If there is a gap in between the psections,
5081.83Smatt				 * map it as inaccessible so nothing else
5091.83Smatt				 * mmap'ed will be placed there.
5101.82Smatt				 */
5111.85Smatt				if (limit != base) {
5121.83Smatt					NEW_VMCMD2(vcset, vmcmd_map_zero,
5131.85Smatt					    base - limit,
5141.87Smatt					    limit - base_ph->p_vaddr, NULLVP,
5151.87Smatt					    0, VM_PROT_NONE, VMCMD_RELATIVE);
5161.83Smatt				}
5171.83Smatt
5181.83Smatt				addr = ph0->p_vaddr - base_ph->p_vaddr;
5191.53Smatt				flags = VMCMD_RELATIVE;
5201.53Smatt			}
5211.83Smatt			last_ph = ph0;
5221.105Sjunyoung			elf_load_psection(vcset, vp, &ph[i], &addr,
5231.83Smatt			    &size, &prot, flags);
5241.82Smatt			/*
5251.82Smatt			 * If entry is within this psection then this
5261.82Smatt			 * must contain the .text section.  *entryoff is
5271.83Smatt			 * relative to the base psection.
5281.82Smatt			 */
5291.83Smatt			if (eh.e_entry >= ph0->p_vaddr &&
5301.83Smatt			    eh.e_entry < (ph0->p_vaddr + size)) {
5311.83Smatt				*entryoff = eh.e_entry - base_ph->p_vaddr;
5321.1Sfvdl			}
5331.84Smatt			addr += size;
5341.1Sfvdl			break;
5351.83Smatt		}
5361.1Sfvdl
5371.46Skleink		case PT_DYNAMIC:
5381.46Skleink		case PT_PHDR:
5391.113Selad			break;
5401.113Selad
5411.46Skleink		case PT_NOTE:
5421.1Sfvdl			break;
5431.1Sfvdl
5441.1Sfvdl		default:
5451.1Sfvdl			break;
5461.1Sfvdl		}
5471.1Sfvdl	}
5481.1Sfvdl
5491.76Schs	free(ph, M_TEMP);
5501.82Smatt	/*
5511.82Smatt	 * This value is ignored if TOPDOWN.
5521.82Smatt	 */
5531.12Scgd	*last = addr;
5541.14Scgd	vrele(vp);
5551.12Scgd	return 0;
5561.12Scgd
5571.14Scgdbadunlock:
5581.28Sfvdl	VOP_UNLOCK(vp, 0);
5591.14Scgd
5601.1Sfvdlbad:
5611.1Sfvdl	if (ph != NULL)
5621.76Schs		free(ph, M_TEMP);
5631.12Scgd#ifdef notyet /* XXX cgd 960926 */
5641.12Scgd	(maybe) VOP_CLOSE it
5651.12Scgd#endif
5661.14Scgd	vrele(vp);
5671.1Sfvdl	return error;
5681.1Sfvdl}
5691.1Sfvdl
5701.1Sfvdl/*
5711.1Sfvdl * exec_elf_makecmds(): Prepare an Elf binary's exec package
5721.1Sfvdl *
5731.1Sfvdl * First, set of the various offsets/lengths in the exec package.
5741.1Sfvdl *
5751.1Sfvdl * Then, mark the text image busy (so it can be demand paged) or error
5761.1Sfvdl * out if this is not possible.  Finally, set up vmcmds for the
5771.1Sfvdl * text, data, bss, and stack segments.
5781.1Sfvdl */
5791.1Sfvdlint
5801.108Schristosexec_elf_makecmds(struct lwp *l, struct exec_package *epp)
5811.1Sfvdl{
5821.9Scgd	Elf_Ehdr *eh = epp->ep_hdr;
5831.9Scgd	Elf_Phdr *ph, *pp;
5841.9Scgd	Elf_Addr phdr = 0, pos = 0;
5851.97Sthorpej	int error, i, nload;
5861.58Sjdolecek	char *interp = NULL;
5871.9Scgd	u_long phsize;
5881.108Schristos	struct proc *p;
5891.1Sfvdl
5901.9Scgd	if (epp->ep_hdrvalid < sizeof(Elf_Ehdr))
5911.1Sfvdl		return ENOEXEC;
5921.1Sfvdl
5931.45Sfvdl	/*
5941.45Sfvdl	 * XXX allow for executing shared objects. It seems silly
5951.45Sfvdl	 * but other ELF-based systems allow it as well.
5961.45Sfvdl	 */
5971.105Sjunyoung	if (elf_check_header(eh, ET_EXEC) != 0 &&
5981.105Sjunyoung	    elf_check_header(eh, ET_DYN) != 0)
5991.1Sfvdl		return ENOEXEC;
6001.1Sfvdl
6011.90Schristos	if (eh->e_phnum > MAXPHNUM)
6021.90Schristos		return ENOEXEC;
6031.90Schristos
6041.76Schs	error = vn_marktext(epp->ep_vp);
6051.76Schs	if (error)
6061.106Sjunyoung		return error;
6071.76Schs
6081.1Sfvdl	/*
6091.17Scgd	 * Allocate space to hold all the program headers, and read them
6101.17Scgd	 * from the file
6111.17Scgd	 */
6121.108Schristos	p = l->l_proc;
6131.9Scgd	phsize = eh->e_phnum * sizeof(Elf_Phdr);
6141.17Scgd	ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK);
6151.1Sfvdl
6161.108Schristos	if ((error = exec_read_from(l, epp->ep_vp, eh->e_phoff, ph, phsize)) !=
6171.64Schristos	    0)
6181.1Sfvdl		goto bad;
6191.1Sfvdl
6201.19Scgd	epp->ep_taddr = epp->ep_tsize = ELFDEFNNAME(NO_ADDR);
6211.19Scgd	epp->ep_daddr = epp->ep_dsize = ELFDEFNNAME(NO_ADDR);
6221.1Sfvdl
6231.1Sfvdl	for (i = 0; i < eh->e_phnum; i++) {
6241.1Sfvdl		pp = &ph[i];
6251.46Skleink		if (pp->p_type == PT_INTERP) {
6261.58Sjdolecek			if (pp->p_filesz >= MAXPATHLEN)
6271.1Sfvdl				goto bad;
6281.109Syamt			interp = PNBUF_GET();
6291.95Sdrochner			interp[0] = '\0';
6301.108Schristos			if ((error = exec_read_from(l, epp->ep_vp,
6311.64Schristos			    pp->p_offset, interp, pp->p_filesz)) != 0)
6321.1Sfvdl				goto bad;
6331.1Sfvdl			break;
6341.1Sfvdl		}
6351.1Sfvdl	}
6361.1Sfvdl
6371.9Scgd	/*
6381.1Sfvdl	 * On the same architecture, we may be emulating different systems.
6391.99Sskrll	 * See which one will accept this executable.
6401.1Sfvdl	 *
6411.1Sfvdl	 * Probe functions would normally see if the interpreter (if any)
6421.1Sfvdl	 * exists. Emulation packages may possibly replace the interpreter in
6431.58Sjdolecek	 * interp[] with a changed path (/emul/xxx/<path>).
6441.1Sfvdl	 */
6451.95Sdrochner	pos = ELFDEFNNAME(NO_ADDR);
6461.95Sdrochner	if (epp->ep_esch->u.elf_probe_func) {
6471.95Sdrochner		vaddr_t startp = (vaddr_t)pos;
6481.62Seeh
6491.108Schristos		error = (*epp->ep_esch->u.elf_probe_func)(l, epp, eh, interp,
6501.62Seeh							  &startp);
6511.1Sfvdl		if (error)
6521.1Sfvdl			goto bad;
6531.95Sdrochner		pos = (Elf_Addr)startp;
6541.1Sfvdl	}
6551.1Sfvdl
6561.1Sfvdl	/*
6571.17Scgd	 * Load all the necessary sections
6581.17Scgd	 */
6591.97Sthorpej	for (i = nload = 0; i < eh->e_phnum; i++) {
6601.9Scgd		Elf_Addr  addr = ELFDEFNNAME(NO_ADDR);
6611.9Scgd		u_long size = 0;
6621.1Sfvdl		int prot = 0;
6631.1Sfvdl
6641.1Sfvdl		pp = &ph[i];
6651.1Sfvdl
6661.1Sfvdl		switch (ph[i].p_type) {
6671.46Skleink		case PT_LOAD:
6681.4Sfvdl			/*
6691.97Sthorpej			 * XXX
6701.97Sthorpej			 * Can handle only 2 sections: text and data
6711.4Sfvdl			 */
6721.97Sthorpej			if (nload++ == 2)
6731.97Sthorpej				goto bad;
6741.105Sjunyoung			elf_load_psection(&epp->ep_vmcmds, epp->ep_vp,
6751.79Satatat			    &ph[i], &addr, &size, &prot, VMCMD_FIXED);
6761.17Scgd
6771.4Sfvdl			/*
6781.4Sfvdl			 * Decide whether it's text or data by looking
6791.97Sthorpej			 * at the entry point.
6801.4Sfvdl			 */
6811.97Sthorpej			if (eh->e_entry >= addr &&
6821.97Sthorpej			    eh->e_entry < (addr + size)) {
6831.97Sthorpej				epp->ep_taddr = addr;
6841.97Sthorpej				epp->ep_tsize = size;
6851.97Sthorpej				if (epp->ep_daddr == ELFDEFNNAME(NO_ADDR)) {
6861.19Scgd					epp->ep_daddr = addr;
6871.19Scgd					epp->ep_dsize = size;
6881.19Scgd				}
6891.97Sthorpej			} else {
6901.97Sthorpej				epp->ep_daddr = addr;
6911.97Sthorpej				epp->ep_dsize = size;
6921.4Sfvdl			}
6931.1Sfvdl			break;
6941.1Sfvdl
6951.46Skleink		case PT_SHLIB:
6961.60Smycroft			/* SCO has these sections. */
6971.46Skleink		case PT_INTERP:
6981.60Smycroft			/* Already did this one. */
6991.46Skleink		case PT_DYNAMIC:
7001.120Selad			break;
7011.46Skleink		case PT_NOTE:
7021.119Selad#if defined(PAX_MPROTECT) || defined(PAX_SEGVGUARD)
7031.118Selad			pax_adjust(l, ph[i].p_flags);
7041.119Selad#endif /* PAX_MPROTECT || PAX_SEGVGUARD */
7051.1Sfvdl			break;
7061.1Sfvdl
7071.46Skleink		case PT_PHDR:
7081.4Sfvdl			/* Note address of program headers (in text segment) */
7091.4Sfvdl			phdr = pp->p_vaddr;
7101.7Schristos			break;
7111.4Sfvdl
7121.1Sfvdl		default:
7131.1Sfvdl			/*
7141.9Scgd			 * Not fatal; we don't need to understand everything.
7151.1Sfvdl			 */
7161.1Sfvdl			break;
7171.1Sfvdl		}
7181.1Sfvdl	}
7191.1Sfvdl
7201.1Sfvdl	/*
7211.17Scgd	 * Check if we found a dynamically linked binary and arrange to load
7221.79Satatat	 * its interpreter
7231.17Scgd	 */
7241.95Sdrochner	if (interp) {
7251.1Sfvdl		struct elf_args *ap;
7261.104Schristos		int j = epp->ep_vmcmds.evs_used;
7271.79Satatat		u_long interp_offset;
7281.1Sfvdl
7291.58Sjdolecek		MALLOC(ap, struct elf_args *, sizeof(struct elf_args),
7301.17Scgd		    M_TEMP, M_WAITOK);
7311.108Schristos		if ((error = elf_load_file(l, epp, interp,
7321.79Satatat		    &epp->ep_vmcmds, &interp_offset, ap, &pos)) != 0) {
7331.76Schs			FREE(ap, M_TEMP);
7341.1Sfvdl			goto bad;
7351.1Sfvdl		}
7361.104Schristos		ap->arg_interp = epp->ep_vmcmds.evs_cmds[j].ev_addr;
7371.79Satatat		epp->ep_entry = ap->arg_interp + interp_offset;
7381.4Sfvdl		ap->arg_phaddr = phdr;
7391.1Sfvdl
7401.1Sfvdl		ap->arg_phentsize = eh->e_phentsize;
7411.1Sfvdl		ap->arg_phnum = eh->e_phnum;
7421.1Sfvdl		ap->arg_entry = eh->e_entry;
7431.1Sfvdl
7441.1Sfvdl		epp->ep_emul_arg = ap;
7451.95Sdrochner
7461.109Syamt		PNBUF_PUT(interp);
7471.1Sfvdl	} else
7481.1Sfvdl		epp->ep_entry = eh->e_entry;
7491.1Sfvdl
7501.8Schristos#ifdef ELF_MAP_PAGE_ZERO
7511.8Schristos	/* Dell SVR4 maps page zero, yeuch! */
7521.57Sthorpej	NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, PAGE_SIZE, 0,
7531.57Sthorpej	    epp->ep_vp, 0, VM_PROT_READ);
7541.8Schristos#endif
7551.76Schs	free(ph, M_TEMP);
7561.108Schristos	return (*epp->ep_esch->es_setup_stack)(l, epp);
7571.1Sfvdl
7581.1Sfvdlbad:
7591.58Sjdolecek	if (interp)
7601.109Syamt		PNBUF_PUT(interp);
7611.76Schs	free(ph, M_TEMP);
7621.1Sfvdl	kill_vmcmds(&epp->ep_vmcmds);
7631.1Sfvdl	return ENOEXEC;
7641.40Schristos}
7651.40Schristos
7661.59Smrgint
7671.108Schristosnetbsd_elf_signature(struct lwp *l, struct exec_package *epp,
7681.54Sthorpej    Elf_Ehdr *eh)
7691.40Schristos{
7701.61Smycroft	size_t i;
7711.61Smycroft	Elf_Phdr *ph;
7721.40Schristos	size_t phsize;
7731.40Schristos	int error;
7741.90Schristos
7751.90Schristos	if (eh->e_phnum > MAXPHNUM)
7761.90Schristos		return ENOEXEC;
7771.40Schristos
7781.40Schristos	phsize = eh->e_phnum * sizeof(Elf_Phdr);
7791.61Smycroft	ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK);
7801.108Schristos	error = exec_read_from(l, epp->ep_vp, eh->e_phoff, ph, phsize);
7811.61Smycroft	if (error)
7821.61Smycroft		goto out;
7831.40Schristos
7841.61Smycroft	for (i = 0; i < eh->e_phnum; i++) {
7851.61Smycroft		Elf_Phdr *ephp = &ph[i];
7861.61Smycroft		Elf_Nhdr *np;
7871.40Schristos
7881.61Smycroft		if (ephp->p_type != PT_NOTE ||
7891.61Smycroft		    ephp->p_filesz > 1024 ||
7901.61Smycroft		    ephp->p_filesz < sizeof(Elf_Nhdr) + ELF_NOTE_NETBSD_NAMESZ)
7911.40Schristos			continue;
7921.40Schristos
7931.61Smycroft		np = (Elf_Nhdr *)malloc(ephp->p_filesz, M_TEMP, M_WAITOK);
7941.108Schristos		error = exec_read_from(l, epp->ep_vp, ephp->p_offset, np,
7951.64Schristos		    ephp->p_filesz);
7961.61Smycroft		if (error)
7971.61Smycroft			goto next;
7981.40Schristos
7991.61Smycroft		if (np->n_type != ELF_NOTE_TYPE_NETBSD_TAG ||
8001.61Smycroft		    np->n_namesz != ELF_NOTE_NETBSD_NAMESZ ||
8011.61Smycroft		    np->n_descsz != ELF_NOTE_NETBSD_DESCSZ ||
8021.122Syamt		    memcmp(np + 1, ELF_NOTE_NETBSD_NAME,
8031.40Schristos		    ELF_NOTE_NETBSD_NAMESZ))
8041.61Smycroft			goto next;
8051.40Schristos
8061.40Schristos		error = 0;
8071.61Smycroft		free(np, M_TEMP);
8081.61Smycroft		goto out;
8091.61Smycroft
8101.61Smycroft	next:
8111.61Smycroft		free(np, M_TEMP);
8121.61Smycroft		continue;
8131.40Schristos	}
8141.40Schristos
8151.40Schristos	error = ENOEXEC;
8161.61Smycroftout:
8171.61Smycroft	free(ph, M_TEMP);
8181.106Sjunyoung	return error;
8191.40Schristos}
8201.40Schristos
8211.58Sjdolecekint
8221.117Syamtnetbsd_elf_probe(struct lwp *l, struct exec_package *epp, void *eh, char *itp,
8231.117Syamt    vaddr_t *pos)
8241.40Schristos{
8251.40Schristos	int error;
8261.40Schristos
8271.108Schristos	if ((error = netbsd_elf_signature(l, epp, eh)) != 0)
8281.40Schristos		return error;
8291.95Sdrochner#ifdef ELF_INTERP_NON_RELOCATABLE
8301.95Sdrochner	*pos = ELF_LINK_ADDR;
8311.95Sdrochner#endif
8321.40Schristos	return 0;
8331.1Sfvdl}
834