exec_elf32.c revision 1.113
11.113Selad/*	$NetBSD: exec_elf32.c,v 1.113 2006/05/16 00:08:25 elad Exp $	*/
21.36Schristos
31.36Schristos/*-
41.102Smycroft * Copyright (c) 1994, 2000, 2005 The NetBSD Foundation, Inc.
51.36Schristos * All rights reserved.
61.36Schristos *
71.36Schristos * This code is derived from software contributed to The NetBSD Foundation
81.36Schristos * by Christos Zoulas.
91.36Schristos *
101.36Schristos * Redistribution and use in source and binary forms, with or without
111.36Schristos * modification, are permitted provided that the following conditions
121.36Schristos * are met:
131.36Schristos * 1. Redistributions of source code must retain the above copyright
141.36Schristos *    notice, this list of conditions and the following disclaimer.
151.36Schristos * 2. Redistributions in binary form must reproduce the above copyright
161.36Schristos *    notice, this list of conditions and the following disclaimer in the
171.36Schristos *    documentation and/or other materials provided with the distribution.
181.36Schristos * 3. All advertising materials mentioning features or use of this software
191.36Schristos *    must display the following acknowledgement:
201.37Schristos *	This product includes software developed by the NetBSD
211.37Schristos *	Foundation, Inc. and its contributors.
221.36Schristos * 4. Neither the name of The NetBSD Foundation nor the names of its
231.36Schristos *    contributors may be used to endorse or promote products derived
241.36Schristos *    from this software without specific prior written permission.
251.36Schristos *
261.36Schristos * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
271.36Schristos * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
281.36Schristos * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
291.36Schristos * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
301.36Schristos * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
311.36Schristos * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
321.36Schristos * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
331.36Schristos * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
341.36Schristos * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
351.36Schristos * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
361.36Schristos * POSSIBILITY OF SUCH DAMAGE.
371.36Schristos */
381.1Sfvdl
391.1Sfvdl/*
401.9Scgd * Copyright (c) 1996 Christopher G. Demetriou
411.1Sfvdl * All rights reserved.
421.1Sfvdl *
431.1Sfvdl * Redistribution and use in source and binary forms, with or without
441.1Sfvdl * modification, are permitted provided that the following conditions
451.1Sfvdl * are met:
461.1Sfvdl * 1. Redistributions of source code must retain the above copyright
471.1Sfvdl *    notice, this list of conditions and the following disclaimer.
481.1Sfvdl * 2. Redistributions in binary form must reproduce the above copyright
491.1Sfvdl *    notice, this list of conditions and the following disclaimer in the
501.1Sfvdl *    documentation and/or other materials provided with the distribution.
511.1Sfvdl * 3. The name of the author may not be used to endorse or promote products
521.1Sfvdl *    derived from this software without specific prior written permission
531.1Sfvdl *
541.1Sfvdl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
551.1Sfvdl * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
561.1Sfvdl * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
571.1Sfvdl * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
581.1Sfvdl * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
591.1Sfvdl * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
601.1Sfvdl * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
611.1Sfvdl * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
621.1Sfvdl * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
631.1Sfvdl * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
641.1Sfvdl */
651.69Slukem
661.69Slukem#include <sys/cdefs.h>
671.113Selad__KERNEL_RCSID(1, "$NetBSD: exec_elf32.c,v 1.113 2006/05/16 00:08:25 elad Exp $");
681.1Sfvdl
691.9Scgd/* If not included by exec_elf64.c, ELFSIZE won't be defined. */
701.9Scgd#ifndef ELFSIZE
711.9Scgd#define	ELFSIZE		32
721.9Scgd#endif
731.30Sthorpej
741.113Selad#include "opt_pax.h"
751.113Selad
761.1Sfvdl#include <sys/param.h>
771.1Sfvdl#include <sys/proc.h>
781.1Sfvdl#include <sys/malloc.h>
791.1Sfvdl#include <sys/namei.h>
801.1Sfvdl#include <sys/vnode.h>
811.1Sfvdl#include <sys/exec.h>
821.1Sfvdl#include <sys/exec_elf.h>
831.8Schristos#include <sys/syscall.h>
841.8Schristos#include <sys/signalvar.h>
851.14Scgd#include <sys/mount.h>
861.14Scgd#include <sys/stat.h>
871.112Selad#include <sys/kauth.h>
881.1Sfvdl
891.58Sjdolecek#include <machine/cpu.h>
901.58Sjdolecek#include <machine/reg.h>
911.57Sthorpej
921.113Selad#ifdef PAX_MPROTECT
931.113Selad#include <sys/pax.h>
941.113Selad#endif /* PAX_MPROTECT */
951.113Selad
961.58Sjdolecekextern const struct emul emul_netbsd;
971.42Schristos
981.105Sjunyoung#define elf_check_header	ELFNAME(check_header)
991.105Sjunyoung#define elf_copyargs		ELFNAME(copyargs)
1001.105Sjunyoung#define elf_load_file		ELFNAME(load_file)
1011.105Sjunyoung#define elf_load_psection	ELFNAME(load_psection)
1021.105Sjunyoung#define exec_elf_makecmds	ELFNAME2(exec,makecmds)
1031.105Sjunyoung#define netbsd_elf_signature	ELFNAME2(netbsd,signature)
1041.105Sjunyoung#define netbsd_elf_probe	ELFNAME2(netbsd,probe)
1051.105Sjunyoung
1061.108Schristosint	elf_load_file(struct lwp *, struct exec_package *, char *,
1071.54Sthorpej	    struct exec_vmcmd_set *, u_long *, struct elf_args *, Elf_Addr *);
1081.105Sjunyoungvoid	elf_load_psection(struct exec_vmcmd_set *, struct vnode *,
1091.54Sthorpej	    const Elf_Phdr *, Elf_Addr *, u_long *, int *, int);
1101.54Sthorpej
1111.108Schristosint	netbsd_elf_signature(struct lwp *, struct exec_package *, Elf_Ehdr *);
1121.108Schristosint	netbsd_elf_probe(struct lwp *, struct exec_package *, void *, char *,
1131.105Sjunyoung	    vaddr_t *);
1141.8Schristos
1151.18Scgd/* round up and down to page boundaries. */
1161.18Scgd#define	ELF_ROUND(a, b)		(((a) + (b) - 1) & ~((b) - 1))
1171.18Scgd#define	ELF_TRUNC(a, b)		((a) & ~((b) - 1))
1181.8Schristos
1191.90Schristos#define MAXPHNUM	50
1201.90Schristos
1211.8Schristos/*
1221.1Sfvdl * Copy arguments onto the stack in the normal way, but add some
1231.1Sfvdl * extra information in case of dynamic binding.
1241.1Sfvdl */
1251.66Schristosint
1261.108Schristoself_copyargs(struct lwp *l, struct exec_package *pack,
1271.72Schristos    struct ps_strings *arginfo, char **stackp, void *argp)
1281.1Sfvdl{
1291.1Sfvdl	size_t len;
1301.4Sfvdl	AuxInfo ai[ELF_AUX_ENTRIES], *a;
1311.1Sfvdl	struct elf_args *ap;
1321.108Schristos	struct proc *p;
1331.66Schristos	int error;
1341.1Sfvdl
1351.108Schristos	if ((error = copyargs(l, pack, arginfo, stackp, argp)) != 0)
1361.66Schristos		return error;
1371.1Sfvdl
1381.22Scgd	a = ai;
1391.108Schristos	p = l->l_proc;
1401.22Scgd
1411.1Sfvdl	/*
1421.1Sfvdl	 * Push extra arguments on the stack needed by dynamically
1431.1Sfvdl	 * linked binaries
1441.1Sfvdl	 */
1451.17Scgd	if ((ap = (struct elf_args *)pack->ep_emul_arg)) {
1461.77Sjdolecek		struct vattr *vap = pack->ep_vap;
1471.1Sfvdl
1481.46Skleink		a->a_type = AT_PHDR;
1491.46Skleink		a->a_v = ap->arg_phaddr;
1501.1Sfvdl		a++;
1511.1Sfvdl
1521.46Skleink		a->a_type = AT_PHENT;
1531.46Skleink		a->a_v = ap->arg_phentsize;
1541.1Sfvdl		a++;
1551.1Sfvdl
1561.46Skleink		a->a_type = AT_PHNUM;
1571.46Skleink		a->a_v = ap->arg_phnum;
1581.1Sfvdl		a++;
1591.1Sfvdl
1601.46Skleink		a->a_type = AT_PAGESZ;
1611.57Sthorpej		a->a_v = PAGE_SIZE;
1621.1Sfvdl		a++;
1631.1Sfvdl
1641.46Skleink		a->a_type = AT_BASE;
1651.46Skleink		a->a_v = ap->arg_interp;
1661.1Sfvdl		a++;
1671.1Sfvdl
1681.46Skleink		a->a_type = AT_FLAGS;
1691.46Skleink		a->a_v = 0;
1701.1Sfvdl		a++;
1711.1Sfvdl
1721.46Skleink		a->a_type = AT_ENTRY;
1731.46Skleink		a->a_v = ap->arg_entry;
1741.72Schristos		a++;
1751.72Schristos
1761.72Schristos		a->a_type = AT_EUID;
1771.77Sjdolecek		if (vap->va_mode & S_ISUID)
1781.77Sjdolecek			a->a_v = vap->va_uid;
1791.77Sjdolecek		else
1801.112Selad			a->a_v = kauth_cred_geteuid(p->p_cred);
1811.72Schristos		a++;
1821.72Schristos
1831.72Schristos		a->a_type = AT_RUID;
1841.112Selad		a->a_v = kauth_cred_getuid(p->p_cred);
1851.72Schristos		a++;
1861.72Schristos
1871.72Schristos		a->a_type = AT_EGID;
1881.77Sjdolecek		if (vap->va_mode & S_ISGID)
1891.77Sjdolecek			a->a_v = vap->va_gid;
1901.77Sjdolecek		else
1911.112Selad			a->a_v = kauth_cred_getegid(p->p_cred);
1921.72Schristos		a++;
1931.72Schristos
1941.72Schristos		a->a_type = AT_RGID;
1951.112Selad		a->a_v = kauth_cred_getgid(p->p_cred);
1961.1Sfvdl		a++;
1971.1Sfvdl
1981.76Schs		free(ap, M_TEMP);
1991.9Scgd		pack->ep_emul_arg = NULL;
2001.1Sfvdl	}
2011.22Scgd
2021.46Skleink	a->a_type = AT_NULL;
2031.46Skleink	a->a_v = 0;
2041.22Scgd	a++;
2051.22Scgd
2061.34Sperry	len = (a - ai) * sizeof(AuxInfo);
2071.66Schristos	if ((error = copyout(ai, *stackp, len)) != 0)
2081.66Schristos		return error;
2091.67Schristos	*stackp += len;
2101.22Scgd
2111.66Schristos	return 0;
2121.1Sfvdl}
2131.1Sfvdl
2141.1Sfvdl/*
2151.1Sfvdl * elf_check_header():
2161.1Sfvdl *
2171.1Sfvdl * Check header for validity; return 0 of ok ENOEXEC if error
2181.1Sfvdl */
2191.17Scgdint
2201.105Sjunyoungelf_check_header(Elf_Ehdr *eh, int type)
2211.1Sfvdl{
2221.3Sthorpej
2231.46Skleink	if (memcmp(eh->e_ident, ELFMAG, SELFMAG) != 0 ||
2241.46Skleink	    eh->e_ident[EI_CLASS] != ELFCLASS)
2251.106Sjunyoung		return ENOEXEC;
2261.1Sfvdl
2271.1Sfvdl	switch (eh->e_machine) {
2281.9Scgd
2291.10Scgd	ELFDEFNNAME(MACHDEP_ID_CASES)
2301.1Sfvdl
2311.1Sfvdl	default:
2321.106Sjunyoung		return ENOEXEC;
2331.1Sfvdl	}
2341.70Sthorpej
2351.70Sthorpej	if (ELF_EHDR_FLAGS_OK(eh) == 0)
2361.106Sjunyoung		return ENOEXEC;
2371.1Sfvdl
2381.1Sfvdl	if (eh->e_type != type)
2391.106Sjunyoung		return ENOEXEC;
2401.61Smycroft
2411.100Schristos	if (eh->e_shnum > 32768 || eh->e_phnum > 128)
2421.106Sjunyoung		return ENOEXEC;
2431.1Sfvdl
2441.106Sjunyoung	return 0;
2451.1Sfvdl}
2461.1Sfvdl
2471.1Sfvdl/*
2481.1Sfvdl * elf_load_psection():
2491.17Scgd *
2501.1Sfvdl * Load a psection at the appropriate address
2511.1Sfvdl */
2521.17Scgdvoid
2531.105Sjunyoungelf_load_psection(struct exec_vmcmd_set *vcset, struct vnode *vp,
2541.54Sthorpej    const Elf_Phdr *ph, Elf_Addr *addr, u_long *size, int *prot, int flags)
2551.1Sfvdl{
2561.87Smatt	u_long msize, psize, rm, rf;
2571.1Sfvdl	long diff, offset;
2581.1Sfvdl
2591.1Sfvdl	/*
2601.17Scgd	 * If the user specified an address, then we load there.
2611.17Scgd	 */
2621.87Smatt	if (*addr == ELFDEFNNAME(NO_ADDR))
2631.87Smatt		*addr = ph->p_vaddr;
2641.87Smatt
2651.87Smatt	if (ph->p_align > 1) {
2661.87Smatt		/*
2671.87Smatt		 * Make sure we are virtually aligned as we are supposed to be.
2681.87Smatt		 */
2691.87Smatt		diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align);
2701.87Smatt		KASSERT(*addr - diff == ELF_TRUNC(*addr, ph->p_align));
2711.87Smatt		/*
2721.87Smatt		 * But make sure to not map any pages before the start of the
2731.87Smatt		 * psection by limiting the difference to within a page.
2741.87Smatt		 */
2751.101Sperry		diff &= PAGE_MASK;
2761.87Smatt	} else
2771.87Smatt		diff = 0;
2781.1Sfvdl
2791.46Skleink	*prot |= (ph->p_flags & PF_R) ? VM_PROT_READ : 0;
2801.46Skleink	*prot |= (ph->p_flags & PF_W) ? VM_PROT_WRITE : 0;
2811.46Skleink	*prot |= (ph->p_flags & PF_X) ? VM_PROT_EXECUTE : 0;
2821.1Sfvdl
2831.87Smatt	/*
2841.87Smatt	 * Adjust everything so it all starts on a page boundary.
2851.87Smatt	 */
2861.87Smatt	*addr -= diff;
2871.1Sfvdl	offset = ph->p_offset - diff;
2881.1Sfvdl	*size = ph->p_filesz + diff;
2891.1Sfvdl	msize = ph->p_memsz + diff;
2901.1Sfvdl
2911.65Schristos	if (ph->p_align >= PAGE_SIZE) {
2921.65Schristos		if ((ph->p_flags & PF_W) != 0) {
2931.65Schristos			/*
2941.65Schristos			 * Because the pagedvn pager can't handle zero fill
2951.65Schristos			 * of the last data page if it's not page aligned we
2961.65Schristos			 * map the last page readvn.
2971.65Schristos			 */
2981.65Schristos			psize = trunc_page(*size);
2991.65Schristos		} else {
3001.65Schristos			psize = round_page(*size);
3011.65Schristos		}
3021.65Schristos	} else {
3031.65Schristos		psize = *size;
3041.53Smatt	}
3051.65Schristos
3061.53Smatt	if (psize > 0) {
3071.65Schristos		NEW_VMCMD2(vcset, ph->p_align < PAGE_SIZE ?
3081.65Schristos		    vmcmd_map_readvn : vmcmd_map_pagedvn, psize, *addr, vp,
3091.53Smatt		    offset, *prot, flags);
3101.87Smatt		flags &= VMCMD_RELATIVE;
3111.53Smatt	}
3121.53Smatt	if (psize < *size) {
3131.53Smatt		NEW_VMCMD2(vcset, vmcmd_map_readvn, *size - psize,
3141.87Smatt		    *addr + psize, vp, offset + psize, *prot, flags);
3151.53Smatt	}
3161.1Sfvdl
3171.1Sfvdl	/*
3181.87Smatt	 * Check if we need to extend the size of the segment (does
3191.87Smatt	 * bss extend page the next page boundary)?
3201.17Scgd	 */
3211.1Sfvdl	rm = round_page(*addr + msize);
3221.1Sfvdl	rf = round_page(*addr + *size);
3231.1Sfvdl
3241.1Sfvdl	if (rm != rf) {
3251.53Smatt		NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP,
3261.83Smatt		    0, *prot, flags & VMCMD_RELATIVE);
3271.1Sfvdl		*size = msize;
3281.1Sfvdl	}
3291.1Sfvdl}
3301.1Sfvdl
3311.1Sfvdl/*
3321.1Sfvdl * elf_load_file():
3331.1Sfvdl *
3341.1Sfvdl * Load a file (interpreter/library) pointed to by path
3351.1Sfvdl * [stolen from coff_load_shlib()]. Made slightly generic
3361.1Sfvdl * so it might be used externally.
3371.1Sfvdl */
3381.17Scgdint
3391.108Schristoself_load_file(struct lwp *l, struct exec_package *epp, char *path,
3401.79Satatat    struct exec_vmcmd_set *vcset, u_long *entryoff, struct elf_args *ap,
3411.54Sthorpej    Elf_Addr *last)
3421.1Sfvdl{
3431.83Smatt	int error, i;
3441.1Sfvdl	struct nameidata nd;
3451.14Scgd	struct vnode *vp;
3461.14Scgd	struct vattr attr;
3471.9Scgd	Elf_Ehdr eh;
3481.9Scgd	Elf_Phdr *ph = NULL;
3491.83Smatt	const Elf_Phdr *ph0;
3501.83Smatt	const Elf_Phdr *base_ph;
3511.83Smatt	const Elf_Phdr *last_ph;
3521.1Sfvdl	u_long phsize;
3531.9Scgd	Elf_Addr addr = *last;
3541.108Schristos	struct proc *p;
3551.108Schristos
3561.108Schristos	p = l->l_proc;
3571.1Sfvdl
3581.1Sfvdl	/*
3591.17Scgd	 * 1. open file
3601.17Scgd	 * 2. read filehdr
3611.17Scgd	 * 3. map text, data, and bss out of it using VM_*
3621.17Scgd	 */
3631.108Schristos	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, l);
3641.12Scgd	if ((error = namei(&nd)) != 0)
3651.1Sfvdl		return error;
3661.14Scgd	vp = nd.ni_vp;
3671.14Scgd
3681.26Smycroft	/*
3691.26Smycroft	 * Similarly, if it's not marked as executable, or it's not a regular
3701.26Smycroft	 * file, we don't allow it to be used.
3711.26Smycroft	 */
3721.14Scgd	if (vp->v_type != VREG) {
3731.14Scgd		error = EACCES;
3741.14Scgd		goto badunlock;
3751.14Scgd	}
3761.112Selad	if ((error = VOP_ACCESS(vp, VEXEC, l->l_proc->p_cred, l)) != 0)
3771.26Smycroft		goto badunlock;
3781.14Scgd
3791.17Scgd	/* get attributes */
3801.112Selad	if ((error = VOP_GETATTR(vp, &attr, l->l_proc->p_cred, l)) != 0)
3811.14Scgd		goto badunlock;
3821.14Scgd
3831.14Scgd	/*
3841.14Scgd	 * Check mount point.  Though we're not trying to exec this binary,
3851.15Scgd	 * we will be executing code from it, so if the mount point
3861.15Scgd	 * disallows execution or set-id-ness, we punt or kill the set-id.
3871.14Scgd	 */
3881.14Scgd	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
3891.14Scgd		error = EACCES;
3901.14Scgd		goto badunlock;
3911.14Scgd	}
3921.14Scgd	if (vp->v_mount->mnt_flag & MNT_NOSUID)
3931.24Smycroft		epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID);
3941.14Scgd
3951.12Scgd#ifdef notyet /* XXX cgd 960926 */
3961.12Scgd	XXX cgd 960926: (maybe) VOP_OPEN it (and VOP_CLOSE in copyargs?)
3971.12Scgd#endif
3981.76Schs
3991.76Schs	error = vn_marktext(vp);
4001.76Schs	if (error)
4011.76Schs		goto badunlock;
4021.76Schs
4031.28Sfvdl	VOP_UNLOCK(vp, 0);
4041.12Scgd
4051.108Schristos	if ((error = exec_read_from(l, vp, 0, &eh, sizeof(eh))) != 0)
4061.1Sfvdl		goto bad;
4071.1Sfvdl
4081.105Sjunyoung	if ((error = elf_check_header(&eh, ET_DYN)) != 0)
4091.1Sfvdl		goto bad;
4101.1Sfvdl
4111.90Schristos	if (eh.e_phnum > MAXPHNUM)
4121.90Schristos		goto bad;
4131.90Schristos
4141.9Scgd	phsize = eh.e_phnum * sizeof(Elf_Phdr);
4151.17Scgd	ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK);
4161.1Sfvdl
4171.108Schristos	if ((error = exec_read_from(l, vp, eh.e_phoff, ph, phsize)) != 0)
4181.1Sfvdl		goto bad;
4191.1Sfvdl
4201.107Ssimonb#ifdef ELF_INTERP_NON_RELOCATABLE
4211.107Ssimonb	/*
4221.107Ssimonb	 * Evil hack:  Only MIPS should be non-relocatable, and the
4231.107Ssimonb	 * psections should have a high address (typically 0x5ffe0000).
4241.107Ssimonb	 * If it's now relocatable, it should be linked at 0 and the
4251.107Ssimonb	 * psections should have zeros in the upper part of the address.
4261.107Ssimonb	 * Otherwise, force the load at the linked address.
4271.107Ssimonb	 */
4281.107Ssimonb	if (*last == ELF_LINK_ADDR && (ph->p_vaddr & 0xffff0000) == 0)
4291.107Ssimonb		*last = ELFDEFNNAME(NO_ADDR);
4301.107Ssimonb#endif
4311.107Ssimonb
4321.1Sfvdl	/*
4331.83Smatt	 * If no position to load the interpreter was set by a probe
4341.83Smatt	 * function, pick the same address that a non-fixed mmap(0, ..)
4351.83Smatt	 * would (i.e. something safely out of the way).
4361.83Smatt	 */
4371.83Smatt	if (*last == ELFDEFNNAME(NO_ADDR)) {
4381.83Smatt		u_long limit = 0;
4391.83Smatt		/*
4401.83Smatt		 * Find the start and ending addresses of the psections to
4411.83Smatt		 * be loaded.  This will give us the size.
4421.83Smatt		 */
4431.83Smatt		for (i = 0, ph0 = ph, base_ph = NULL; i < eh.e_phnum;
4441.83Smatt		     i++, ph0++) {
4451.83Smatt			if (ph0->p_type == PT_LOAD) {
4461.83Smatt				u_long psize = ph0->p_vaddr + ph0->p_memsz;
4471.83Smatt				if (base_ph == NULL)
4481.83Smatt					base_ph = ph0;
4491.83Smatt				if (psize > limit)
4501.83Smatt					limit = psize;
4511.83Smatt			}
4521.83Smatt		}
4531.83Smatt
4541.111Sskrll		if (base_ph == NULL) {
4551.110Serh			error = ENOEXEC;
4561.110Serh			goto bad;
4571.110Serh		}
4581.110Serh
4591.83Smatt		/*
4601.83Smatt		 * Now compute the size and load address.
4611.83Smatt		 */
4621.105Sjunyoung		addr = (*epp->ep_esch->es_emul->e_vm_default_addr)(p,
4631.103Sfvdl		    epp->ep_daddr,
4641.83Smatt		    round_page(limit) - trunc_page(base_ph->p_vaddr));
4651.83Smatt	} else
4661.95Sdrochner		addr = *last; /* may be ELF_LINK_ADDR */
4671.83Smatt
4681.83Smatt	/*
4691.83Smatt	 * Load all the necessary sections
4701.83Smatt	 */
4711.83Smatt	for (i = 0, ph0 = ph, base_ph = NULL, last_ph = NULL;
4721.83Smatt	     i < eh.e_phnum; i++, ph0++) {
4731.83Smatt		switch (ph0->p_type) {
4741.83Smatt		case PT_LOAD: {
4751.83Smatt			u_long size;
4761.83Smatt			int prot = 0;
4771.83Smatt			int flags;
4781.1Sfvdl
4791.53Smatt			if (base_ph == NULL) {
4801.82Smatt				/*
4811.82Smatt				 * First encountered psection is always the
4821.88Smatt				 * base psection.  Make sure it's aligned
4831.89Smatt				 * properly (align down for topdown and align
4841.89Smatt				 * upwards for not topdown).
4851.82Smatt				 */
4861.83Smatt				base_ph = ph0;
4871.53Smatt				flags = VMCMD_BASE;
4881.95Sdrochner				if (addr == ELF_LINK_ADDR)
4891.95Sdrochner					addr = ph0->p_vaddr;
4901.89Smatt				if (p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN)
4911.89Smatt					addr = ELF_TRUNC(addr, ph0->p_align);
4921.89Smatt				else
4931.89Smatt					addr = ELF_ROUND(addr, ph0->p_align);
4941.53Smatt			} else {
4951.83Smatt				u_long limit = round_page(last_ph->p_vaddr
4961.83Smatt				    + last_ph->p_memsz);
4971.87Smatt				u_long base = trunc_page(ph0->p_vaddr);
4981.83Smatt
4991.82Smatt				/*
5001.83Smatt				 * If there is a gap in between the psections,
5011.83Smatt				 * map it as inaccessible so nothing else
5021.83Smatt				 * mmap'ed will be placed there.
5031.82Smatt				 */
5041.85Smatt				if (limit != base) {
5051.83Smatt					NEW_VMCMD2(vcset, vmcmd_map_zero,
5061.85Smatt					    base - limit,
5071.87Smatt					    limit - base_ph->p_vaddr, NULLVP,
5081.87Smatt					    0, VM_PROT_NONE, VMCMD_RELATIVE);
5091.83Smatt				}
5101.83Smatt
5111.83Smatt				addr = ph0->p_vaddr - base_ph->p_vaddr;
5121.53Smatt				flags = VMCMD_RELATIVE;
5131.53Smatt			}
5141.83Smatt			last_ph = ph0;
5151.105Sjunyoung			elf_load_psection(vcset, vp, &ph[i], &addr,
5161.83Smatt			    &size, &prot, flags);
5171.82Smatt			/*
5181.82Smatt			 * If entry is within this psection then this
5191.82Smatt			 * must contain the .text section.  *entryoff is
5201.83Smatt			 * relative to the base psection.
5211.82Smatt			 */
5221.83Smatt			if (eh.e_entry >= ph0->p_vaddr &&
5231.83Smatt			    eh.e_entry < (ph0->p_vaddr + size)) {
5241.83Smatt				*entryoff = eh.e_entry - base_ph->p_vaddr;
5251.1Sfvdl			}
5261.84Smatt			addr += size;
5271.1Sfvdl			break;
5281.83Smatt		}
5291.1Sfvdl
5301.46Skleink		case PT_DYNAMIC:
5311.46Skleink		case PT_PHDR:
5321.113Selad			break;
5331.113Selad
5341.46Skleink		case PT_NOTE:
5351.113Selad#ifdef PAX_MPROTECT
5361.113Selad			pax_mprotect_adjust(l, ph[i].p_flags);
5371.1Sfvdl			break;
5381.113Selad#endif /* PAX_MPROTECT */
5391.1Sfvdl
5401.1Sfvdl		default:
5411.1Sfvdl			break;
5421.1Sfvdl		}
5431.1Sfvdl	}
5441.1Sfvdl
5451.76Schs	free(ph, M_TEMP);
5461.82Smatt	/*
5471.82Smatt	 * This value is ignored if TOPDOWN.
5481.82Smatt	 */
5491.12Scgd	*last = addr;
5501.14Scgd	vrele(vp);
5511.12Scgd	return 0;
5521.12Scgd
5531.14Scgdbadunlock:
5541.28Sfvdl	VOP_UNLOCK(vp, 0);
5551.14Scgd
5561.1Sfvdlbad:
5571.1Sfvdl	if (ph != NULL)
5581.76Schs		free(ph, M_TEMP);
5591.12Scgd#ifdef notyet /* XXX cgd 960926 */
5601.12Scgd	(maybe) VOP_CLOSE it
5611.12Scgd#endif
5621.14Scgd	vrele(vp);
5631.1Sfvdl	return error;
5641.1Sfvdl}
5651.1Sfvdl
5661.1Sfvdl/*
5671.1Sfvdl * exec_elf_makecmds(): Prepare an Elf binary's exec package
5681.1Sfvdl *
5691.1Sfvdl * First, set of the various offsets/lengths in the exec package.
5701.1Sfvdl *
5711.1Sfvdl * Then, mark the text image busy (so it can be demand paged) or error
5721.1Sfvdl * out if this is not possible.  Finally, set up vmcmds for the
5731.1Sfvdl * text, data, bss, and stack segments.
5741.1Sfvdl */
5751.1Sfvdlint
5761.108Schristosexec_elf_makecmds(struct lwp *l, struct exec_package *epp)
5771.1Sfvdl{
5781.9Scgd	Elf_Ehdr *eh = epp->ep_hdr;
5791.9Scgd	Elf_Phdr *ph, *pp;
5801.9Scgd	Elf_Addr phdr = 0, pos = 0;
5811.97Sthorpej	int error, i, nload;
5821.58Sjdolecek	char *interp = NULL;
5831.9Scgd	u_long phsize;
5841.108Schristos	struct proc *p;
5851.1Sfvdl
5861.9Scgd	if (epp->ep_hdrvalid < sizeof(Elf_Ehdr))
5871.1Sfvdl		return ENOEXEC;
5881.1Sfvdl
5891.45Sfvdl	/*
5901.45Sfvdl	 * XXX allow for executing shared objects. It seems silly
5911.45Sfvdl	 * but other ELF-based systems allow it as well.
5921.45Sfvdl	 */
5931.105Sjunyoung	if (elf_check_header(eh, ET_EXEC) != 0 &&
5941.105Sjunyoung	    elf_check_header(eh, ET_DYN) != 0)
5951.1Sfvdl		return ENOEXEC;
5961.1Sfvdl
5971.90Schristos	if (eh->e_phnum > MAXPHNUM)
5981.90Schristos		return ENOEXEC;
5991.90Schristos
6001.76Schs	error = vn_marktext(epp->ep_vp);
6011.76Schs	if (error)
6021.106Sjunyoung		return error;
6031.76Schs
6041.1Sfvdl	/*
6051.17Scgd	 * Allocate space to hold all the program headers, and read them
6061.17Scgd	 * from the file
6071.17Scgd	 */
6081.108Schristos	p = l->l_proc;
6091.9Scgd	phsize = eh->e_phnum * sizeof(Elf_Phdr);
6101.17Scgd	ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK);
6111.1Sfvdl
6121.108Schristos	if ((error = exec_read_from(l, epp->ep_vp, eh->e_phoff, ph, phsize)) !=
6131.64Schristos	    0)
6141.1Sfvdl		goto bad;
6151.1Sfvdl
6161.19Scgd	epp->ep_taddr = epp->ep_tsize = ELFDEFNNAME(NO_ADDR);
6171.19Scgd	epp->ep_daddr = epp->ep_dsize = ELFDEFNNAME(NO_ADDR);
6181.1Sfvdl
6191.1Sfvdl	for (i = 0; i < eh->e_phnum; i++) {
6201.1Sfvdl		pp = &ph[i];
6211.46Skleink		if (pp->p_type == PT_INTERP) {
6221.58Sjdolecek			if (pp->p_filesz >= MAXPATHLEN)
6231.1Sfvdl				goto bad;
6241.109Syamt			interp = PNBUF_GET();
6251.95Sdrochner			interp[0] = '\0';
6261.108Schristos			if ((error = exec_read_from(l, epp->ep_vp,
6271.64Schristos			    pp->p_offset, interp, pp->p_filesz)) != 0)
6281.1Sfvdl				goto bad;
6291.1Sfvdl			break;
6301.1Sfvdl		}
6311.1Sfvdl	}
6321.1Sfvdl
6331.9Scgd	/*
6341.1Sfvdl	 * On the same architecture, we may be emulating different systems.
6351.99Sskrll	 * See which one will accept this executable.
6361.1Sfvdl	 *
6371.1Sfvdl	 * Probe functions would normally see if the interpreter (if any)
6381.1Sfvdl	 * exists. Emulation packages may possibly replace the interpreter in
6391.58Sjdolecek	 * interp[] with a changed path (/emul/xxx/<path>).
6401.1Sfvdl	 */
6411.95Sdrochner	pos = ELFDEFNNAME(NO_ADDR);
6421.95Sdrochner	if (epp->ep_esch->u.elf_probe_func) {
6431.95Sdrochner		vaddr_t startp = (vaddr_t)pos;
6441.62Seeh
6451.108Schristos		error = (*epp->ep_esch->u.elf_probe_func)(l, epp, eh, interp,
6461.62Seeh							  &startp);
6471.1Sfvdl		if (error)
6481.1Sfvdl			goto bad;
6491.95Sdrochner		pos = (Elf_Addr)startp;
6501.1Sfvdl	}
6511.1Sfvdl
6521.1Sfvdl	/*
6531.17Scgd	 * Load all the necessary sections
6541.17Scgd	 */
6551.97Sthorpej	for (i = nload = 0; i < eh->e_phnum; i++) {
6561.9Scgd		Elf_Addr  addr = ELFDEFNNAME(NO_ADDR);
6571.9Scgd		u_long size = 0;
6581.1Sfvdl		int prot = 0;
6591.1Sfvdl
6601.1Sfvdl		pp = &ph[i];
6611.1Sfvdl
6621.1Sfvdl		switch (ph[i].p_type) {
6631.46Skleink		case PT_LOAD:
6641.4Sfvdl			/*
6651.97Sthorpej			 * XXX
6661.97Sthorpej			 * Can handle only 2 sections: text and data
6671.4Sfvdl			 */
6681.97Sthorpej			if (nload++ == 2)
6691.97Sthorpej				goto bad;
6701.105Sjunyoung			elf_load_psection(&epp->ep_vmcmds, epp->ep_vp,
6711.79Satatat			    &ph[i], &addr, &size, &prot, VMCMD_FIXED);
6721.17Scgd
6731.4Sfvdl			/*
6741.4Sfvdl			 * Decide whether it's text or data by looking
6751.97Sthorpej			 * at the entry point.
6761.4Sfvdl			 */
6771.97Sthorpej			if (eh->e_entry >= addr &&
6781.97Sthorpej			    eh->e_entry < (addr + size)) {
6791.97Sthorpej				epp->ep_taddr = addr;
6801.97Sthorpej				epp->ep_tsize = size;
6811.97Sthorpej				if (epp->ep_daddr == ELFDEFNNAME(NO_ADDR)) {
6821.19Scgd					epp->ep_daddr = addr;
6831.19Scgd					epp->ep_dsize = size;
6841.19Scgd				}
6851.97Sthorpej			} else {
6861.97Sthorpej				epp->ep_daddr = addr;
6871.97Sthorpej				epp->ep_dsize = size;
6881.4Sfvdl			}
6891.1Sfvdl			break;
6901.1Sfvdl
6911.46Skleink		case PT_SHLIB:
6921.60Smycroft			/* SCO has these sections. */
6931.46Skleink		case PT_INTERP:
6941.60Smycroft			/* Already did this one. */
6951.46Skleink		case PT_DYNAMIC:
6961.46Skleink		case PT_NOTE:
6971.1Sfvdl			break;
6981.1Sfvdl
6991.46Skleink		case PT_PHDR:
7001.4Sfvdl			/* Note address of program headers (in text segment) */
7011.4Sfvdl			phdr = pp->p_vaddr;
7021.7Schristos			break;
7031.4Sfvdl
7041.1Sfvdl		default:
7051.1Sfvdl			/*
7061.9Scgd			 * Not fatal; we don't need to understand everything.
7071.1Sfvdl			 */
7081.1Sfvdl			break;
7091.1Sfvdl		}
7101.1Sfvdl	}
7111.1Sfvdl
7121.1Sfvdl	/*
7131.17Scgd	 * Check if we found a dynamically linked binary and arrange to load
7141.79Satatat	 * its interpreter
7151.17Scgd	 */
7161.95Sdrochner	if (interp) {
7171.1Sfvdl		struct elf_args *ap;
7181.104Schristos		int j = epp->ep_vmcmds.evs_used;
7191.79Satatat		u_long interp_offset;
7201.1Sfvdl
7211.58Sjdolecek		MALLOC(ap, struct elf_args *, sizeof(struct elf_args),
7221.17Scgd		    M_TEMP, M_WAITOK);
7231.108Schristos		if ((error = elf_load_file(l, epp, interp,
7241.79Satatat		    &epp->ep_vmcmds, &interp_offset, ap, &pos)) != 0) {
7251.76Schs			FREE(ap, M_TEMP);
7261.1Sfvdl			goto bad;
7271.1Sfvdl		}
7281.104Schristos		ap->arg_interp = epp->ep_vmcmds.evs_cmds[j].ev_addr;
7291.79Satatat		epp->ep_entry = ap->arg_interp + interp_offset;
7301.4Sfvdl		ap->arg_phaddr = phdr;
7311.1Sfvdl
7321.1Sfvdl		ap->arg_phentsize = eh->e_phentsize;
7331.1Sfvdl		ap->arg_phnum = eh->e_phnum;
7341.1Sfvdl		ap->arg_entry = eh->e_entry;
7351.1Sfvdl
7361.1Sfvdl		epp->ep_emul_arg = ap;
7371.95Sdrochner
7381.109Syamt		PNBUF_PUT(interp);
7391.1Sfvdl	} else
7401.1Sfvdl		epp->ep_entry = eh->e_entry;
7411.1Sfvdl
7421.8Schristos#ifdef ELF_MAP_PAGE_ZERO
7431.8Schristos	/* Dell SVR4 maps page zero, yeuch! */
7441.57Sthorpej	NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, PAGE_SIZE, 0,
7451.57Sthorpej	    epp->ep_vp, 0, VM_PROT_READ);
7461.8Schristos#endif
7471.76Schs	free(ph, M_TEMP);
7481.108Schristos	return (*epp->ep_esch->es_setup_stack)(l, epp);
7491.1Sfvdl
7501.1Sfvdlbad:
7511.58Sjdolecek	if (interp)
7521.109Syamt		PNBUF_PUT(interp);
7531.76Schs	free(ph, M_TEMP);
7541.1Sfvdl	kill_vmcmds(&epp->ep_vmcmds);
7551.1Sfvdl	return ENOEXEC;
7561.40Schristos}
7571.40Schristos
7581.59Smrgint
7591.108Schristosnetbsd_elf_signature(struct lwp *l, struct exec_package *epp,
7601.54Sthorpej    Elf_Ehdr *eh)
7611.40Schristos{
7621.61Smycroft	size_t i;
7631.61Smycroft	Elf_Phdr *ph;
7641.40Schristos	size_t phsize;
7651.40Schristos	int error;
7661.90Schristos
7671.90Schristos	if (eh->e_phnum > MAXPHNUM)
7681.90Schristos		return ENOEXEC;
7691.40Schristos
7701.40Schristos	phsize = eh->e_phnum * sizeof(Elf_Phdr);
7711.61Smycroft	ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK);
7721.108Schristos	error = exec_read_from(l, epp->ep_vp, eh->e_phoff, ph, phsize);
7731.61Smycroft	if (error)
7741.61Smycroft		goto out;
7751.40Schristos
7761.61Smycroft	for (i = 0; i < eh->e_phnum; i++) {
7771.61Smycroft		Elf_Phdr *ephp = &ph[i];
7781.61Smycroft		Elf_Nhdr *np;
7791.40Schristos
7801.61Smycroft		if (ephp->p_type != PT_NOTE ||
7811.61Smycroft		    ephp->p_filesz > 1024 ||
7821.61Smycroft		    ephp->p_filesz < sizeof(Elf_Nhdr) + ELF_NOTE_NETBSD_NAMESZ)
7831.40Schristos			continue;
7841.40Schristos
7851.61Smycroft		np = (Elf_Nhdr *)malloc(ephp->p_filesz, M_TEMP, M_WAITOK);
7861.108Schristos		error = exec_read_from(l, epp->ep_vp, ephp->p_offset, np,
7871.64Schristos		    ephp->p_filesz);
7881.61Smycroft		if (error)
7891.61Smycroft			goto next;
7901.40Schristos
7911.61Smycroft		if (np->n_type != ELF_NOTE_TYPE_NETBSD_TAG ||
7921.61Smycroft		    np->n_namesz != ELF_NOTE_NETBSD_NAMESZ ||
7931.61Smycroft		    np->n_descsz != ELF_NOTE_NETBSD_DESCSZ ||
7941.61Smycroft		    memcmp((caddr_t)(np + 1), ELF_NOTE_NETBSD_NAME,
7951.40Schristos		    ELF_NOTE_NETBSD_NAMESZ))
7961.61Smycroft			goto next;
7971.40Schristos
7981.40Schristos		error = 0;
7991.61Smycroft		free(np, M_TEMP);
8001.61Smycroft		goto out;
8011.61Smycroft
8021.61Smycroft	next:
8031.61Smycroft		free(np, M_TEMP);
8041.61Smycroft		continue;
8051.40Schristos	}
8061.40Schristos
8071.40Schristos	error = ENOEXEC;
8081.61Smycroftout:
8091.61Smycroft	free(ph, M_TEMP);
8101.106Sjunyoung	return error;
8111.40Schristos}
8121.40Schristos
8131.58Sjdolecekint
8141.108Schristosnetbsd_elf_probe(struct lwp *l, struct exec_package *epp,
8151.58Sjdolecek    void *eh, char *itp, vaddr_t *pos)
8161.40Schristos{
8171.40Schristos	int error;
8181.40Schristos
8191.108Schristos	if ((error = netbsd_elf_signature(l, epp, eh)) != 0)
8201.40Schristos		return error;
8211.95Sdrochner#ifdef ELF_INTERP_NON_RELOCATABLE
8221.95Sdrochner	*pos = ELF_LINK_ADDR;
8231.95Sdrochner#endif
8241.40Schristos	return 0;
8251.1Sfvdl}
826