exec_elf32.c revision 1.114
11.114Selad/*	$NetBSD: exec_elf32.c,v 1.114 2006/05/18 17:35:49 elad Exp $	*/
21.36Schristos
31.36Schristos/*-
41.102Smycroft * Copyright (c) 1994, 2000, 2005 The NetBSD Foundation, Inc.
51.36Schristos * All rights reserved.
61.36Schristos *
71.36Schristos * This code is derived from software contributed to The NetBSD Foundation
81.36Schristos * by Christos Zoulas.
91.36Schristos *
101.36Schristos * Redistribution and use in source and binary forms, with or without
111.36Schristos * modification, are permitted provided that the following conditions
121.36Schristos * are met:
131.36Schristos * 1. Redistributions of source code must retain the above copyright
141.36Schristos *    notice, this list of conditions and the following disclaimer.
151.36Schristos * 2. Redistributions in binary form must reproduce the above copyright
161.36Schristos *    notice, this list of conditions and the following disclaimer in the
171.36Schristos *    documentation and/or other materials provided with the distribution.
181.36Schristos * 3. All advertising materials mentioning features or use of this software
191.36Schristos *    must display the following acknowledgement:
201.37Schristos *	This product includes software developed by the NetBSD
211.37Schristos *	Foundation, Inc. and its contributors.
221.36Schristos * 4. Neither the name of The NetBSD Foundation nor the names of its
231.36Schristos *    contributors may be used to endorse or promote products derived
241.36Schristos *    from this software without specific prior written permission.
251.36Schristos *
261.36Schristos * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
271.36Schristos * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
281.36Schristos * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
291.36Schristos * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
301.36Schristos * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
311.36Schristos * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
321.36Schristos * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
331.36Schristos * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
341.36Schristos * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
351.36Schristos * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
361.36Schristos * POSSIBILITY OF SUCH DAMAGE.
371.36Schristos */
381.1Sfvdl
391.1Sfvdl/*
401.9Scgd * Copyright (c) 1996 Christopher G. Demetriou
411.1Sfvdl * All rights reserved.
421.1Sfvdl *
431.1Sfvdl * Redistribution and use in source and binary forms, with or without
441.1Sfvdl * modification, are permitted provided that the following conditions
451.1Sfvdl * are met:
461.1Sfvdl * 1. Redistributions of source code must retain the above copyright
471.1Sfvdl *    notice, this list of conditions and the following disclaimer.
481.1Sfvdl * 2. Redistributions in binary form must reproduce the above copyright
491.1Sfvdl *    notice, this list of conditions and the following disclaimer in the
501.1Sfvdl *    documentation and/or other materials provided with the distribution.
511.1Sfvdl * 3. The name of the author may not be used to endorse or promote products
521.1Sfvdl *    derived from this software without specific prior written permission
531.1Sfvdl *
541.1Sfvdl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
551.1Sfvdl * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
561.1Sfvdl * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
571.1Sfvdl * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
581.1Sfvdl * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
591.1Sfvdl * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
601.1Sfvdl * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
611.1Sfvdl * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
621.1Sfvdl * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
631.1Sfvdl * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
641.1Sfvdl */
651.69Slukem
661.69Slukem#include <sys/cdefs.h>
671.114Selad__KERNEL_RCSID(1, "$NetBSD: exec_elf32.c,v 1.114 2006/05/18 17:35:49 elad Exp $");
681.1Sfvdl
691.9Scgd/* If not included by exec_elf64.c, ELFSIZE won't be defined. */
701.9Scgd#ifndef ELFSIZE
711.9Scgd#define	ELFSIZE		32
721.9Scgd#endif
731.30Sthorpej
741.114Selad#ifdef _KERNEL_OPT
751.113Selad#include "opt_pax.h"
761.114Selad#endif /* _KERNEL_OPT */
771.113Selad
781.1Sfvdl#include <sys/param.h>
791.1Sfvdl#include <sys/proc.h>
801.1Sfvdl#include <sys/malloc.h>
811.1Sfvdl#include <sys/namei.h>
821.1Sfvdl#include <sys/vnode.h>
831.1Sfvdl#include <sys/exec.h>
841.1Sfvdl#include <sys/exec_elf.h>
851.8Schristos#include <sys/syscall.h>
861.8Schristos#include <sys/signalvar.h>
871.14Scgd#include <sys/mount.h>
881.14Scgd#include <sys/stat.h>
891.112Selad#include <sys/kauth.h>
901.1Sfvdl
911.58Sjdolecek#include <machine/cpu.h>
921.58Sjdolecek#include <machine/reg.h>
931.57Sthorpej
941.113Selad#ifdef PAX_MPROTECT
951.113Selad#include <sys/pax.h>
961.113Selad#endif /* PAX_MPROTECT */
971.113Selad
981.58Sjdolecekextern const struct emul emul_netbsd;
991.42Schristos
1001.105Sjunyoung#define elf_check_header	ELFNAME(check_header)
1011.105Sjunyoung#define elf_copyargs		ELFNAME(copyargs)
1021.105Sjunyoung#define elf_load_file		ELFNAME(load_file)
1031.105Sjunyoung#define elf_load_psection	ELFNAME(load_psection)
1041.105Sjunyoung#define exec_elf_makecmds	ELFNAME2(exec,makecmds)
1051.105Sjunyoung#define netbsd_elf_signature	ELFNAME2(netbsd,signature)
1061.105Sjunyoung#define netbsd_elf_probe	ELFNAME2(netbsd,probe)
1071.105Sjunyoung
1081.108Schristosint	elf_load_file(struct lwp *, struct exec_package *, char *,
1091.54Sthorpej	    struct exec_vmcmd_set *, u_long *, struct elf_args *, Elf_Addr *);
1101.105Sjunyoungvoid	elf_load_psection(struct exec_vmcmd_set *, struct vnode *,
1111.54Sthorpej	    const Elf_Phdr *, Elf_Addr *, u_long *, int *, int);
1121.54Sthorpej
1131.108Schristosint	netbsd_elf_signature(struct lwp *, struct exec_package *, Elf_Ehdr *);
1141.108Schristosint	netbsd_elf_probe(struct lwp *, struct exec_package *, void *, char *,
1151.105Sjunyoung	    vaddr_t *);
1161.8Schristos
1171.18Scgd/* round up and down to page boundaries. */
1181.18Scgd#define	ELF_ROUND(a, b)		(((a) + (b) - 1) & ~((b) - 1))
1191.18Scgd#define	ELF_TRUNC(a, b)		((a) & ~((b) - 1))
1201.8Schristos
1211.90Schristos#define MAXPHNUM	50
1221.90Schristos
1231.8Schristos/*
1241.1Sfvdl * Copy arguments onto the stack in the normal way, but add some
1251.1Sfvdl * extra information in case of dynamic binding.
1261.1Sfvdl */
1271.66Schristosint
1281.108Schristoself_copyargs(struct lwp *l, struct exec_package *pack,
1291.72Schristos    struct ps_strings *arginfo, char **stackp, void *argp)
1301.1Sfvdl{
1311.1Sfvdl	size_t len;
1321.4Sfvdl	AuxInfo ai[ELF_AUX_ENTRIES], *a;
1331.1Sfvdl	struct elf_args *ap;
1341.108Schristos	struct proc *p;
1351.66Schristos	int error;
1361.1Sfvdl
1371.108Schristos	if ((error = copyargs(l, pack, arginfo, stackp, argp)) != 0)
1381.66Schristos		return error;
1391.1Sfvdl
1401.22Scgd	a = ai;
1411.108Schristos	p = l->l_proc;
1421.22Scgd
1431.1Sfvdl	/*
1441.1Sfvdl	 * Push extra arguments on the stack needed by dynamically
1451.1Sfvdl	 * linked binaries
1461.1Sfvdl	 */
1471.17Scgd	if ((ap = (struct elf_args *)pack->ep_emul_arg)) {
1481.77Sjdolecek		struct vattr *vap = pack->ep_vap;
1491.1Sfvdl
1501.46Skleink		a->a_type = AT_PHDR;
1511.46Skleink		a->a_v = ap->arg_phaddr;
1521.1Sfvdl		a++;
1531.1Sfvdl
1541.46Skleink		a->a_type = AT_PHENT;
1551.46Skleink		a->a_v = ap->arg_phentsize;
1561.1Sfvdl		a++;
1571.1Sfvdl
1581.46Skleink		a->a_type = AT_PHNUM;
1591.46Skleink		a->a_v = ap->arg_phnum;
1601.1Sfvdl		a++;
1611.1Sfvdl
1621.46Skleink		a->a_type = AT_PAGESZ;
1631.57Sthorpej		a->a_v = PAGE_SIZE;
1641.1Sfvdl		a++;
1651.1Sfvdl
1661.46Skleink		a->a_type = AT_BASE;
1671.46Skleink		a->a_v = ap->arg_interp;
1681.1Sfvdl		a++;
1691.1Sfvdl
1701.46Skleink		a->a_type = AT_FLAGS;
1711.46Skleink		a->a_v = 0;
1721.1Sfvdl		a++;
1731.1Sfvdl
1741.46Skleink		a->a_type = AT_ENTRY;
1751.46Skleink		a->a_v = ap->arg_entry;
1761.72Schristos		a++;
1771.72Schristos
1781.72Schristos		a->a_type = AT_EUID;
1791.77Sjdolecek		if (vap->va_mode & S_ISUID)
1801.77Sjdolecek			a->a_v = vap->va_uid;
1811.77Sjdolecek		else
1821.112Selad			a->a_v = kauth_cred_geteuid(p->p_cred);
1831.72Schristos		a++;
1841.72Schristos
1851.72Schristos		a->a_type = AT_RUID;
1861.112Selad		a->a_v = kauth_cred_getuid(p->p_cred);
1871.72Schristos		a++;
1881.72Schristos
1891.72Schristos		a->a_type = AT_EGID;
1901.77Sjdolecek		if (vap->va_mode & S_ISGID)
1911.77Sjdolecek			a->a_v = vap->va_gid;
1921.77Sjdolecek		else
1931.112Selad			a->a_v = kauth_cred_getegid(p->p_cred);
1941.72Schristos		a++;
1951.72Schristos
1961.72Schristos		a->a_type = AT_RGID;
1971.112Selad		a->a_v = kauth_cred_getgid(p->p_cred);
1981.1Sfvdl		a++;
1991.1Sfvdl
2001.76Schs		free(ap, M_TEMP);
2011.9Scgd		pack->ep_emul_arg = NULL;
2021.1Sfvdl	}
2031.22Scgd
2041.46Skleink	a->a_type = AT_NULL;
2051.46Skleink	a->a_v = 0;
2061.22Scgd	a++;
2071.22Scgd
2081.34Sperry	len = (a - ai) * sizeof(AuxInfo);
2091.66Schristos	if ((error = copyout(ai, *stackp, len)) != 0)
2101.66Schristos		return error;
2111.67Schristos	*stackp += len;
2121.22Scgd
2131.66Schristos	return 0;
2141.1Sfvdl}
2151.1Sfvdl
2161.1Sfvdl/*
2171.1Sfvdl * elf_check_header():
2181.1Sfvdl *
2191.1Sfvdl * Check header for validity; return 0 of ok ENOEXEC if error
2201.1Sfvdl */
2211.17Scgdint
2221.105Sjunyoungelf_check_header(Elf_Ehdr *eh, int type)
2231.1Sfvdl{
2241.3Sthorpej
2251.46Skleink	if (memcmp(eh->e_ident, ELFMAG, SELFMAG) != 0 ||
2261.46Skleink	    eh->e_ident[EI_CLASS] != ELFCLASS)
2271.106Sjunyoung		return ENOEXEC;
2281.1Sfvdl
2291.1Sfvdl	switch (eh->e_machine) {
2301.9Scgd
2311.10Scgd	ELFDEFNNAME(MACHDEP_ID_CASES)
2321.1Sfvdl
2331.1Sfvdl	default:
2341.106Sjunyoung		return ENOEXEC;
2351.1Sfvdl	}
2361.70Sthorpej
2371.70Sthorpej	if (ELF_EHDR_FLAGS_OK(eh) == 0)
2381.106Sjunyoung		return ENOEXEC;
2391.1Sfvdl
2401.1Sfvdl	if (eh->e_type != type)
2411.106Sjunyoung		return ENOEXEC;
2421.61Smycroft
2431.100Schristos	if (eh->e_shnum > 32768 || eh->e_phnum > 128)
2441.106Sjunyoung		return ENOEXEC;
2451.1Sfvdl
2461.106Sjunyoung	return 0;
2471.1Sfvdl}
2481.1Sfvdl
2491.1Sfvdl/*
2501.1Sfvdl * elf_load_psection():
2511.17Scgd *
2521.1Sfvdl * Load a psection at the appropriate address
2531.1Sfvdl */
2541.17Scgdvoid
2551.105Sjunyoungelf_load_psection(struct exec_vmcmd_set *vcset, struct vnode *vp,
2561.54Sthorpej    const Elf_Phdr *ph, Elf_Addr *addr, u_long *size, int *prot, int flags)
2571.1Sfvdl{
2581.87Smatt	u_long msize, psize, rm, rf;
2591.1Sfvdl	long diff, offset;
2601.1Sfvdl
2611.1Sfvdl	/*
2621.17Scgd	 * If the user specified an address, then we load there.
2631.17Scgd	 */
2641.87Smatt	if (*addr == ELFDEFNNAME(NO_ADDR))
2651.87Smatt		*addr = ph->p_vaddr;
2661.87Smatt
2671.87Smatt	if (ph->p_align > 1) {
2681.87Smatt		/*
2691.87Smatt		 * Make sure we are virtually aligned as we are supposed to be.
2701.87Smatt		 */
2711.87Smatt		diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align);
2721.87Smatt		KASSERT(*addr - diff == ELF_TRUNC(*addr, ph->p_align));
2731.87Smatt		/*
2741.87Smatt		 * But make sure to not map any pages before the start of the
2751.87Smatt		 * psection by limiting the difference to within a page.
2761.87Smatt		 */
2771.101Sperry		diff &= PAGE_MASK;
2781.87Smatt	} else
2791.87Smatt		diff = 0;
2801.1Sfvdl
2811.46Skleink	*prot |= (ph->p_flags & PF_R) ? VM_PROT_READ : 0;
2821.46Skleink	*prot |= (ph->p_flags & PF_W) ? VM_PROT_WRITE : 0;
2831.46Skleink	*prot |= (ph->p_flags & PF_X) ? VM_PROT_EXECUTE : 0;
2841.1Sfvdl
2851.87Smatt	/*
2861.87Smatt	 * Adjust everything so it all starts on a page boundary.
2871.87Smatt	 */
2881.87Smatt	*addr -= diff;
2891.1Sfvdl	offset = ph->p_offset - diff;
2901.1Sfvdl	*size = ph->p_filesz + diff;
2911.1Sfvdl	msize = ph->p_memsz + diff;
2921.1Sfvdl
2931.65Schristos	if (ph->p_align >= PAGE_SIZE) {
2941.65Schristos		if ((ph->p_flags & PF_W) != 0) {
2951.65Schristos			/*
2961.65Schristos			 * Because the pagedvn pager can't handle zero fill
2971.65Schristos			 * of the last data page if it's not page aligned we
2981.65Schristos			 * map the last page readvn.
2991.65Schristos			 */
3001.65Schristos			psize = trunc_page(*size);
3011.65Schristos		} else {
3021.65Schristos			psize = round_page(*size);
3031.65Schristos		}
3041.65Schristos	} else {
3051.65Schristos		psize = *size;
3061.53Smatt	}
3071.65Schristos
3081.53Smatt	if (psize > 0) {
3091.65Schristos		NEW_VMCMD2(vcset, ph->p_align < PAGE_SIZE ?
3101.65Schristos		    vmcmd_map_readvn : vmcmd_map_pagedvn, psize, *addr, vp,
3111.53Smatt		    offset, *prot, flags);
3121.87Smatt		flags &= VMCMD_RELATIVE;
3131.53Smatt	}
3141.53Smatt	if (psize < *size) {
3151.53Smatt		NEW_VMCMD2(vcset, vmcmd_map_readvn, *size - psize,
3161.87Smatt		    *addr + psize, vp, offset + psize, *prot, flags);
3171.53Smatt	}
3181.1Sfvdl
3191.1Sfvdl	/*
3201.87Smatt	 * Check if we need to extend the size of the segment (does
3211.87Smatt	 * bss extend page the next page boundary)?
3221.17Scgd	 */
3231.1Sfvdl	rm = round_page(*addr + msize);
3241.1Sfvdl	rf = round_page(*addr + *size);
3251.1Sfvdl
3261.1Sfvdl	if (rm != rf) {
3271.53Smatt		NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP,
3281.83Smatt		    0, *prot, flags & VMCMD_RELATIVE);
3291.1Sfvdl		*size = msize;
3301.1Sfvdl	}
3311.1Sfvdl}
3321.1Sfvdl
3331.1Sfvdl/*
3341.1Sfvdl * elf_load_file():
3351.1Sfvdl *
3361.1Sfvdl * Load a file (interpreter/library) pointed to by path
3371.1Sfvdl * [stolen from coff_load_shlib()]. Made slightly generic
3381.1Sfvdl * so it might be used externally.
3391.1Sfvdl */
3401.17Scgdint
3411.108Schristoself_load_file(struct lwp *l, struct exec_package *epp, char *path,
3421.79Satatat    struct exec_vmcmd_set *vcset, u_long *entryoff, struct elf_args *ap,
3431.54Sthorpej    Elf_Addr *last)
3441.1Sfvdl{
3451.83Smatt	int error, i;
3461.1Sfvdl	struct nameidata nd;
3471.14Scgd	struct vnode *vp;
3481.14Scgd	struct vattr attr;
3491.9Scgd	Elf_Ehdr eh;
3501.9Scgd	Elf_Phdr *ph = NULL;
3511.83Smatt	const Elf_Phdr *ph0;
3521.83Smatt	const Elf_Phdr *base_ph;
3531.83Smatt	const Elf_Phdr *last_ph;
3541.1Sfvdl	u_long phsize;
3551.9Scgd	Elf_Addr addr = *last;
3561.108Schristos	struct proc *p;
3571.108Schristos
3581.108Schristos	p = l->l_proc;
3591.1Sfvdl
3601.1Sfvdl	/*
3611.17Scgd	 * 1. open file
3621.17Scgd	 * 2. read filehdr
3631.17Scgd	 * 3. map text, data, and bss out of it using VM_*
3641.17Scgd	 */
3651.108Schristos	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, l);
3661.12Scgd	if ((error = namei(&nd)) != 0)
3671.1Sfvdl		return error;
3681.14Scgd	vp = nd.ni_vp;
3691.14Scgd
3701.26Smycroft	/*
3711.26Smycroft	 * Similarly, if it's not marked as executable, or it's not a regular
3721.26Smycroft	 * file, we don't allow it to be used.
3731.26Smycroft	 */
3741.14Scgd	if (vp->v_type != VREG) {
3751.14Scgd		error = EACCES;
3761.14Scgd		goto badunlock;
3771.14Scgd	}
3781.112Selad	if ((error = VOP_ACCESS(vp, VEXEC, l->l_proc->p_cred, l)) != 0)
3791.26Smycroft		goto badunlock;
3801.14Scgd
3811.17Scgd	/* get attributes */
3821.112Selad	if ((error = VOP_GETATTR(vp, &attr, l->l_proc->p_cred, l)) != 0)
3831.14Scgd		goto badunlock;
3841.14Scgd
3851.14Scgd	/*
3861.14Scgd	 * Check mount point.  Though we're not trying to exec this binary,
3871.15Scgd	 * we will be executing code from it, so if the mount point
3881.15Scgd	 * disallows execution or set-id-ness, we punt or kill the set-id.
3891.14Scgd	 */
3901.14Scgd	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
3911.14Scgd		error = EACCES;
3921.14Scgd		goto badunlock;
3931.14Scgd	}
3941.14Scgd	if (vp->v_mount->mnt_flag & MNT_NOSUID)
3951.24Smycroft		epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID);
3961.14Scgd
3971.12Scgd#ifdef notyet /* XXX cgd 960926 */
3981.12Scgd	XXX cgd 960926: (maybe) VOP_OPEN it (and VOP_CLOSE in copyargs?)
3991.12Scgd#endif
4001.76Schs
4011.76Schs	error = vn_marktext(vp);
4021.76Schs	if (error)
4031.76Schs		goto badunlock;
4041.76Schs
4051.28Sfvdl	VOP_UNLOCK(vp, 0);
4061.12Scgd
4071.108Schristos	if ((error = exec_read_from(l, vp, 0, &eh, sizeof(eh))) != 0)
4081.1Sfvdl		goto bad;
4091.1Sfvdl
4101.105Sjunyoung	if ((error = elf_check_header(&eh, ET_DYN)) != 0)
4111.1Sfvdl		goto bad;
4121.1Sfvdl
4131.90Schristos	if (eh.e_phnum > MAXPHNUM)
4141.90Schristos		goto bad;
4151.90Schristos
4161.9Scgd	phsize = eh.e_phnum * sizeof(Elf_Phdr);
4171.17Scgd	ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK);
4181.1Sfvdl
4191.108Schristos	if ((error = exec_read_from(l, vp, eh.e_phoff, ph, phsize)) != 0)
4201.1Sfvdl		goto bad;
4211.1Sfvdl
4221.107Ssimonb#ifdef ELF_INTERP_NON_RELOCATABLE
4231.107Ssimonb	/*
4241.107Ssimonb	 * Evil hack:  Only MIPS should be non-relocatable, and the
4251.107Ssimonb	 * psections should have a high address (typically 0x5ffe0000).
4261.107Ssimonb	 * If it's now relocatable, it should be linked at 0 and the
4271.107Ssimonb	 * psections should have zeros in the upper part of the address.
4281.107Ssimonb	 * Otherwise, force the load at the linked address.
4291.107Ssimonb	 */
4301.107Ssimonb	if (*last == ELF_LINK_ADDR && (ph->p_vaddr & 0xffff0000) == 0)
4311.107Ssimonb		*last = ELFDEFNNAME(NO_ADDR);
4321.107Ssimonb#endif
4331.107Ssimonb
4341.1Sfvdl	/*
4351.83Smatt	 * If no position to load the interpreter was set by a probe
4361.83Smatt	 * function, pick the same address that a non-fixed mmap(0, ..)
4371.83Smatt	 * would (i.e. something safely out of the way).
4381.83Smatt	 */
4391.83Smatt	if (*last == ELFDEFNNAME(NO_ADDR)) {
4401.83Smatt		u_long limit = 0;
4411.83Smatt		/*
4421.83Smatt		 * Find the start and ending addresses of the psections to
4431.83Smatt		 * be loaded.  This will give us the size.
4441.83Smatt		 */
4451.83Smatt		for (i = 0, ph0 = ph, base_ph = NULL; i < eh.e_phnum;
4461.83Smatt		     i++, ph0++) {
4471.83Smatt			if (ph0->p_type == PT_LOAD) {
4481.83Smatt				u_long psize = ph0->p_vaddr + ph0->p_memsz;
4491.83Smatt				if (base_ph == NULL)
4501.83Smatt					base_ph = ph0;
4511.83Smatt				if (psize > limit)
4521.83Smatt					limit = psize;
4531.83Smatt			}
4541.83Smatt		}
4551.83Smatt
4561.111Sskrll		if (base_ph == NULL) {
4571.110Serh			error = ENOEXEC;
4581.110Serh			goto bad;
4591.110Serh		}
4601.110Serh
4611.83Smatt		/*
4621.83Smatt		 * Now compute the size and load address.
4631.83Smatt		 */
4641.105Sjunyoung		addr = (*epp->ep_esch->es_emul->e_vm_default_addr)(p,
4651.103Sfvdl		    epp->ep_daddr,
4661.83Smatt		    round_page(limit) - trunc_page(base_ph->p_vaddr));
4671.83Smatt	} else
4681.95Sdrochner		addr = *last; /* may be ELF_LINK_ADDR */
4691.83Smatt
4701.83Smatt	/*
4711.83Smatt	 * Load all the necessary sections
4721.83Smatt	 */
4731.83Smatt	for (i = 0, ph0 = ph, base_ph = NULL, last_ph = NULL;
4741.83Smatt	     i < eh.e_phnum; i++, ph0++) {
4751.83Smatt		switch (ph0->p_type) {
4761.83Smatt		case PT_LOAD: {
4771.83Smatt			u_long size;
4781.83Smatt			int prot = 0;
4791.83Smatt			int flags;
4801.1Sfvdl
4811.53Smatt			if (base_ph == NULL) {
4821.82Smatt				/*
4831.82Smatt				 * First encountered psection is always the
4841.88Smatt				 * base psection.  Make sure it's aligned
4851.89Smatt				 * properly (align down for topdown and align
4861.89Smatt				 * upwards for not topdown).
4871.82Smatt				 */
4881.83Smatt				base_ph = ph0;
4891.53Smatt				flags = VMCMD_BASE;
4901.95Sdrochner				if (addr == ELF_LINK_ADDR)
4911.95Sdrochner					addr = ph0->p_vaddr;
4921.89Smatt				if (p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN)
4931.89Smatt					addr = ELF_TRUNC(addr, ph0->p_align);
4941.89Smatt				else
4951.89Smatt					addr = ELF_ROUND(addr, ph0->p_align);
4961.53Smatt			} else {
4971.83Smatt				u_long limit = round_page(last_ph->p_vaddr
4981.83Smatt				    + last_ph->p_memsz);
4991.87Smatt				u_long base = trunc_page(ph0->p_vaddr);
5001.83Smatt
5011.82Smatt				/*
5021.83Smatt				 * If there is a gap in between the psections,
5031.83Smatt				 * map it as inaccessible so nothing else
5041.83Smatt				 * mmap'ed will be placed there.
5051.82Smatt				 */
5061.85Smatt				if (limit != base) {
5071.83Smatt					NEW_VMCMD2(vcset, vmcmd_map_zero,
5081.85Smatt					    base - limit,
5091.87Smatt					    limit - base_ph->p_vaddr, NULLVP,
5101.87Smatt					    0, VM_PROT_NONE, VMCMD_RELATIVE);
5111.83Smatt				}
5121.83Smatt
5131.83Smatt				addr = ph0->p_vaddr - base_ph->p_vaddr;
5141.53Smatt				flags = VMCMD_RELATIVE;
5151.53Smatt			}
5161.83Smatt			last_ph = ph0;
5171.105Sjunyoung			elf_load_psection(vcset, vp, &ph[i], &addr,
5181.83Smatt			    &size, &prot, flags);
5191.82Smatt			/*
5201.82Smatt			 * If entry is within this psection then this
5211.82Smatt			 * must contain the .text section.  *entryoff is
5221.83Smatt			 * relative to the base psection.
5231.82Smatt			 */
5241.83Smatt			if (eh.e_entry >= ph0->p_vaddr &&
5251.83Smatt			    eh.e_entry < (ph0->p_vaddr + size)) {
5261.83Smatt				*entryoff = eh.e_entry - base_ph->p_vaddr;
5271.1Sfvdl			}
5281.84Smatt			addr += size;
5291.1Sfvdl			break;
5301.83Smatt		}
5311.1Sfvdl
5321.46Skleink		case PT_DYNAMIC:
5331.46Skleink		case PT_PHDR:
5341.113Selad			break;
5351.113Selad
5361.46Skleink		case PT_NOTE:
5371.113Selad#ifdef PAX_MPROTECT
5381.113Selad			pax_mprotect_adjust(l, ph[i].p_flags);
5391.1Sfvdl			break;
5401.113Selad#endif /* PAX_MPROTECT */
5411.1Sfvdl
5421.1Sfvdl		default:
5431.1Sfvdl			break;
5441.1Sfvdl		}
5451.1Sfvdl	}
5461.1Sfvdl
5471.76Schs	free(ph, M_TEMP);
5481.82Smatt	/*
5491.82Smatt	 * This value is ignored if TOPDOWN.
5501.82Smatt	 */
5511.12Scgd	*last = addr;
5521.14Scgd	vrele(vp);
5531.12Scgd	return 0;
5541.12Scgd
5551.14Scgdbadunlock:
5561.28Sfvdl	VOP_UNLOCK(vp, 0);
5571.14Scgd
5581.1Sfvdlbad:
5591.1Sfvdl	if (ph != NULL)
5601.76Schs		free(ph, M_TEMP);
5611.12Scgd#ifdef notyet /* XXX cgd 960926 */
5621.12Scgd	(maybe) VOP_CLOSE it
5631.12Scgd#endif
5641.14Scgd	vrele(vp);
5651.1Sfvdl	return error;
5661.1Sfvdl}
5671.1Sfvdl
5681.1Sfvdl/*
5691.1Sfvdl * exec_elf_makecmds(): Prepare an Elf binary's exec package
5701.1Sfvdl *
5711.1Sfvdl * First, set of the various offsets/lengths in the exec package.
5721.1Sfvdl *
5731.1Sfvdl * Then, mark the text image busy (so it can be demand paged) or error
5741.1Sfvdl * out if this is not possible.  Finally, set up vmcmds for the
5751.1Sfvdl * text, data, bss, and stack segments.
5761.1Sfvdl */
5771.1Sfvdlint
5781.108Schristosexec_elf_makecmds(struct lwp *l, struct exec_package *epp)
5791.1Sfvdl{
5801.9Scgd	Elf_Ehdr *eh = epp->ep_hdr;
5811.9Scgd	Elf_Phdr *ph, *pp;
5821.9Scgd	Elf_Addr phdr = 0, pos = 0;
5831.97Sthorpej	int error, i, nload;
5841.58Sjdolecek	char *interp = NULL;
5851.9Scgd	u_long phsize;
5861.108Schristos	struct proc *p;
5871.1Sfvdl
5881.9Scgd	if (epp->ep_hdrvalid < sizeof(Elf_Ehdr))
5891.1Sfvdl		return ENOEXEC;
5901.1Sfvdl
5911.45Sfvdl	/*
5921.45Sfvdl	 * XXX allow for executing shared objects. It seems silly
5931.45Sfvdl	 * but other ELF-based systems allow it as well.
5941.45Sfvdl	 */
5951.105Sjunyoung	if (elf_check_header(eh, ET_EXEC) != 0 &&
5961.105Sjunyoung	    elf_check_header(eh, ET_DYN) != 0)
5971.1Sfvdl		return ENOEXEC;
5981.1Sfvdl
5991.90Schristos	if (eh->e_phnum > MAXPHNUM)
6001.90Schristos		return ENOEXEC;
6011.90Schristos
6021.76Schs	error = vn_marktext(epp->ep_vp);
6031.76Schs	if (error)
6041.106Sjunyoung		return error;
6051.76Schs
6061.1Sfvdl	/*
6071.17Scgd	 * Allocate space to hold all the program headers, and read them
6081.17Scgd	 * from the file
6091.17Scgd	 */
6101.108Schristos	p = l->l_proc;
6111.9Scgd	phsize = eh->e_phnum * sizeof(Elf_Phdr);
6121.17Scgd	ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK);
6131.1Sfvdl
6141.108Schristos	if ((error = exec_read_from(l, epp->ep_vp, eh->e_phoff, ph, phsize)) !=
6151.64Schristos	    0)
6161.1Sfvdl		goto bad;
6171.1Sfvdl
6181.19Scgd	epp->ep_taddr = epp->ep_tsize = ELFDEFNNAME(NO_ADDR);
6191.19Scgd	epp->ep_daddr = epp->ep_dsize = ELFDEFNNAME(NO_ADDR);
6201.1Sfvdl
6211.1Sfvdl	for (i = 0; i < eh->e_phnum; i++) {
6221.1Sfvdl		pp = &ph[i];
6231.46Skleink		if (pp->p_type == PT_INTERP) {
6241.58Sjdolecek			if (pp->p_filesz >= MAXPATHLEN)
6251.1Sfvdl				goto bad;
6261.109Syamt			interp = PNBUF_GET();
6271.95Sdrochner			interp[0] = '\0';
6281.108Schristos			if ((error = exec_read_from(l, epp->ep_vp,
6291.64Schristos			    pp->p_offset, interp, pp->p_filesz)) != 0)
6301.1Sfvdl				goto bad;
6311.1Sfvdl			break;
6321.1Sfvdl		}
6331.1Sfvdl	}
6341.1Sfvdl
6351.9Scgd	/*
6361.1Sfvdl	 * On the same architecture, we may be emulating different systems.
6371.99Sskrll	 * See which one will accept this executable.
6381.1Sfvdl	 *
6391.1Sfvdl	 * Probe functions would normally see if the interpreter (if any)
6401.1Sfvdl	 * exists. Emulation packages may possibly replace the interpreter in
6411.58Sjdolecek	 * interp[] with a changed path (/emul/xxx/<path>).
6421.1Sfvdl	 */
6431.95Sdrochner	pos = ELFDEFNNAME(NO_ADDR);
6441.95Sdrochner	if (epp->ep_esch->u.elf_probe_func) {
6451.95Sdrochner		vaddr_t startp = (vaddr_t)pos;
6461.62Seeh
6471.108Schristos		error = (*epp->ep_esch->u.elf_probe_func)(l, epp, eh, interp,
6481.62Seeh							  &startp);
6491.1Sfvdl		if (error)
6501.1Sfvdl			goto bad;
6511.95Sdrochner		pos = (Elf_Addr)startp;
6521.1Sfvdl	}
6531.1Sfvdl
6541.1Sfvdl	/*
6551.17Scgd	 * Load all the necessary sections
6561.17Scgd	 */
6571.97Sthorpej	for (i = nload = 0; i < eh->e_phnum; i++) {
6581.9Scgd		Elf_Addr  addr = ELFDEFNNAME(NO_ADDR);
6591.9Scgd		u_long size = 0;
6601.1Sfvdl		int prot = 0;
6611.1Sfvdl
6621.1Sfvdl		pp = &ph[i];
6631.1Sfvdl
6641.1Sfvdl		switch (ph[i].p_type) {
6651.46Skleink		case PT_LOAD:
6661.4Sfvdl			/*
6671.97Sthorpej			 * XXX
6681.97Sthorpej			 * Can handle only 2 sections: text and data
6691.4Sfvdl			 */
6701.97Sthorpej			if (nload++ == 2)
6711.97Sthorpej				goto bad;
6721.105Sjunyoung			elf_load_psection(&epp->ep_vmcmds, epp->ep_vp,
6731.79Satatat			    &ph[i], &addr, &size, &prot, VMCMD_FIXED);
6741.17Scgd
6751.4Sfvdl			/*
6761.4Sfvdl			 * Decide whether it's text or data by looking
6771.97Sthorpej			 * at the entry point.
6781.4Sfvdl			 */
6791.97Sthorpej			if (eh->e_entry >= addr &&
6801.97Sthorpej			    eh->e_entry < (addr + size)) {
6811.97Sthorpej				epp->ep_taddr = addr;
6821.97Sthorpej				epp->ep_tsize = size;
6831.97Sthorpej				if (epp->ep_daddr == ELFDEFNNAME(NO_ADDR)) {
6841.19Scgd					epp->ep_daddr = addr;
6851.19Scgd					epp->ep_dsize = size;
6861.19Scgd				}
6871.97Sthorpej			} else {
6881.97Sthorpej				epp->ep_daddr = addr;
6891.97Sthorpej				epp->ep_dsize = size;
6901.4Sfvdl			}
6911.1Sfvdl			break;
6921.1Sfvdl
6931.46Skleink		case PT_SHLIB:
6941.60Smycroft			/* SCO has these sections. */
6951.46Skleink		case PT_INTERP:
6961.60Smycroft			/* Already did this one. */
6971.46Skleink		case PT_DYNAMIC:
6981.46Skleink		case PT_NOTE:
6991.1Sfvdl			break;
7001.1Sfvdl
7011.46Skleink		case PT_PHDR:
7021.4Sfvdl			/* Note address of program headers (in text segment) */
7031.4Sfvdl			phdr = pp->p_vaddr;
7041.7Schristos			break;
7051.4Sfvdl
7061.1Sfvdl		default:
7071.1Sfvdl			/*
7081.9Scgd			 * Not fatal; we don't need to understand everything.
7091.1Sfvdl			 */
7101.1Sfvdl			break;
7111.1Sfvdl		}
7121.1Sfvdl	}
7131.1Sfvdl
7141.1Sfvdl	/*
7151.17Scgd	 * Check if we found a dynamically linked binary and arrange to load
7161.79Satatat	 * its interpreter
7171.17Scgd	 */
7181.95Sdrochner	if (interp) {
7191.1Sfvdl		struct elf_args *ap;
7201.104Schristos		int j = epp->ep_vmcmds.evs_used;
7211.79Satatat		u_long interp_offset;
7221.1Sfvdl
7231.58Sjdolecek		MALLOC(ap, struct elf_args *, sizeof(struct elf_args),
7241.17Scgd		    M_TEMP, M_WAITOK);
7251.108Schristos		if ((error = elf_load_file(l, epp, interp,
7261.79Satatat		    &epp->ep_vmcmds, &interp_offset, ap, &pos)) != 0) {
7271.76Schs			FREE(ap, M_TEMP);
7281.1Sfvdl			goto bad;
7291.1Sfvdl		}
7301.104Schristos		ap->arg_interp = epp->ep_vmcmds.evs_cmds[j].ev_addr;
7311.79Satatat		epp->ep_entry = ap->arg_interp + interp_offset;
7321.4Sfvdl		ap->arg_phaddr = phdr;
7331.1Sfvdl
7341.1Sfvdl		ap->arg_phentsize = eh->e_phentsize;
7351.1Sfvdl		ap->arg_phnum = eh->e_phnum;
7361.1Sfvdl		ap->arg_entry = eh->e_entry;
7371.1Sfvdl
7381.1Sfvdl		epp->ep_emul_arg = ap;
7391.95Sdrochner
7401.109Syamt		PNBUF_PUT(interp);
7411.1Sfvdl	} else
7421.1Sfvdl		epp->ep_entry = eh->e_entry;
7431.1Sfvdl
7441.8Schristos#ifdef ELF_MAP_PAGE_ZERO
7451.8Schristos	/* Dell SVR4 maps page zero, yeuch! */
7461.57Sthorpej	NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, PAGE_SIZE, 0,
7471.57Sthorpej	    epp->ep_vp, 0, VM_PROT_READ);
7481.8Schristos#endif
7491.76Schs	free(ph, M_TEMP);
7501.108Schristos	return (*epp->ep_esch->es_setup_stack)(l, epp);
7511.1Sfvdl
7521.1Sfvdlbad:
7531.58Sjdolecek	if (interp)
7541.109Syamt		PNBUF_PUT(interp);
7551.76Schs	free(ph, M_TEMP);
7561.1Sfvdl	kill_vmcmds(&epp->ep_vmcmds);
7571.1Sfvdl	return ENOEXEC;
7581.40Schristos}
7591.40Schristos
7601.59Smrgint
7611.108Schristosnetbsd_elf_signature(struct lwp *l, struct exec_package *epp,
7621.54Sthorpej    Elf_Ehdr *eh)
7631.40Schristos{
7641.61Smycroft	size_t i;
7651.61Smycroft	Elf_Phdr *ph;
7661.40Schristos	size_t phsize;
7671.40Schristos	int error;
7681.90Schristos
7691.90Schristos	if (eh->e_phnum > MAXPHNUM)
7701.90Schristos		return ENOEXEC;
7711.40Schristos
7721.40Schristos	phsize = eh->e_phnum * sizeof(Elf_Phdr);
7731.61Smycroft	ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK);
7741.108Schristos	error = exec_read_from(l, epp->ep_vp, eh->e_phoff, ph, phsize);
7751.61Smycroft	if (error)
7761.61Smycroft		goto out;
7771.40Schristos
7781.61Smycroft	for (i = 0; i < eh->e_phnum; i++) {
7791.61Smycroft		Elf_Phdr *ephp = &ph[i];
7801.61Smycroft		Elf_Nhdr *np;
7811.40Schristos
7821.61Smycroft		if (ephp->p_type != PT_NOTE ||
7831.61Smycroft		    ephp->p_filesz > 1024 ||
7841.61Smycroft		    ephp->p_filesz < sizeof(Elf_Nhdr) + ELF_NOTE_NETBSD_NAMESZ)
7851.40Schristos			continue;
7861.40Schristos
7871.61Smycroft		np = (Elf_Nhdr *)malloc(ephp->p_filesz, M_TEMP, M_WAITOK);
7881.108Schristos		error = exec_read_from(l, epp->ep_vp, ephp->p_offset, np,
7891.64Schristos		    ephp->p_filesz);
7901.61Smycroft		if (error)
7911.61Smycroft			goto next;
7921.40Schristos
7931.61Smycroft		if (np->n_type != ELF_NOTE_TYPE_NETBSD_TAG ||
7941.61Smycroft		    np->n_namesz != ELF_NOTE_NETBSD_NAMESZ ||
7951.61Smycroft		    np->n_descsz != ELF_NOTE_NETBSD_DESCSZ ||
7961.61Smycroft		    memcmp((caddr_t)(np + 1), ELF_NOTE_NETBSD_NAME,
7971.40Schristos		    ELF_NOTE_NETBSD_NAMESZ))
7981.61Smycroft			goto next;
7991.40Schristos
8001.40Schristos		error = 0;
8011.61Smycroft		free(np, M_TEMP);
8021.61Smycroft		goto out;
8031.61Smycroft
8041.61Smycroft	next:
8051.61Smycroft		free(np, M_TEMP);
8061.61Smycroft		continue;
8071.40Schristos	}
8081.40Schristos
8091.40Schristos	error = ENOEXEC;
8101.61Smycroftout:
8111.61Smycroft	free(ph, M_TEMP);
8121.106Sjunyoung	return error;
8131.40Schristos}
8141.40Schristos
8151.58Sjdolecekint
8161.108Schristosnetbsd_elf_probe(struct lwp *l, struct exec_package *epp,
8171.58Sjdolecek    void *eh, char *itp, vaddr_t *pos)
8181.40Schristos{
8191.40Schristos	int error;
8201.40Schristos
8211.108Schristos	if ((error = netbsd_elf_signature(l, epp, eh)) != 0)
8221.40Schristos		return error;
8231.95Sdrochner#ifdef ELF_INTERP_NON_RELOCATABLE
8241.95Sdrochner	*pos = ELF_LINK_ADDR;
8251.95Sdrochner#endif
8261.40Schristos	return 0;
8271.1Sfvdl}
828