exec_elf32.c revision 1.88
11.88Smatt/* $NetBSD: exec_elf32.c,v 1.88 2003/02/28 19:44:42 matt Exp $ */ 21.36Schristos 31.36Schristos/*- 41.60Smycroft * Copyright (c) 1994, 2000 The NetBSD Foundation, Inc. 51.36Schristos * All rights reserved. 61.36Schristos * 71.36Schristos * This code is derived from software contributed to The NetBSD Foundation 81.36Schristos * by Christos Zoulas. 91.36Schristos * 101.36Schristos * Redistribution and use in source and binary forms, with or without 111.36Schristos * modification, are permitted provided that the following conditions 121.36Schristos * are met: 131.36Schristos * 1. Redistributions of source code must retain the above copyright 141.36Schristos * notice, this list of conditions and the following disclaimer. 151.36Schristos * 2. Redistributions in binary form must reproduce the above copyright 161.36Schristos * notice, this list of conditions and the following disclaimer in the 171.36Schristos * documentation and/or other materials provided with the distribution. 181.36Schristos * 3. All advertising materials mentioning features or use of this software 191.36Schristos * must display the following acknowledgement: 201.37Schristos * This product includes software developed by the NetBSD 211.37Schristos * Foundation, Inc. and its contributors. 221.36Schristos * 4. Neither the name of The NetBSD Foundation nor the names of its 231.36Schristos * contributors may be used to endorse or promote products derived 241.36Schristos * from this software without specific prior written permission. 251.36Schristos * 261.36Schristos * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 271.36Schristos * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 281.36Schristos * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 291.36Schristos * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 301.36Schristos * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 311.36Schristos * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 321.36Schristos * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 331.36Schristos * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 341.36Schristos * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 351.36Schristos * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 361.36Schristos * POSSIBILITY OF SUCH DAMAGE. 371.36Schristos */ 381.1Sfvdl 391.1Sfvdl/* 401.9Scgd * Copyright (c) 1996 Christopher G. Demetriou 411.1Sfvdl * All rights reserved. 421.1Sfvdl * 431.1Sfvdl * Redistribution and use in source and binary forms, with or without 441.1Sfvdl * modification, are permitted provided that the following conditions 451.1Sfvdl * are met: 461.1Sfvdl * 1. Redistributions of source code must retain the above copyright 471.1Sfvdl * notice, this list of conditions and the following disclaimer. 481.1Sfvdl * 2. Redistributions in binary form must reproduce the above copyright 491.1Sfvdl * notice, this list of conditions and the following disclaimer in the 501.1Sfvdl * documentation and/or other materials provided with the distribution. 511.1Sfvdl * 3. The name of the author may not be used to endorse or promote products 521.1Sfvdl * derived from this software without specific prior written permission 531.1Sfvdl * 541.1Sfvdl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 551.1Sfvdl * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 561.1Sfvdl * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 571.1Sfvdl * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 581.1Sfvdl * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 591.1Sfvdl * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 601.1Sfvdl * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 611.1Sfvdl * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 621.1Sfvdl * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 631.1Sfvdl * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 641.1Sfvdl */ 651.69Slukem 661.69Slukem#include <sys/cdefs.h> 671.88Smatt__KERNEL_RCSID(1, "$NetBSD: exec_elf32.c,v 1.88 2003/02/28 19:44:42 matt Exp $"); 681.1Sfvdl 691.9Scgd/* If not included by exec_elf64.c, ELFSIZE won't be defined. */ 701.9Scgd#ifndef ELFSIZE 711.9Scgd#define ELFSIZE 32 721.9Scgd#endif 731.30Sthorpej 741.1Sfvdl#include <sys/param.h> 751.1Sfvdl#include <sys/proc.h> 761.1Sfvdl#include <sys/malloc.h> 771.1Sfvdl#include <sys/namei.h> 781.1Sfvdl#include <sys/vnode.h> 791.1Sfvdl#include <sys/exec.h> 801.1Sfvdl#include <sys/exec_elf.h> 811.8Schristos#include <sys/syscall.h> 821.8Schristos#include <sys/signalvar.h> 831.14Scgd#include <sys/mount.h> 841.14Scgd#include <sys/stat.h> 851.1Sfvdl 861.58Sjdolecek#include <machine/cpu.h> 871.58Sjdolecek#include <machine/reg.h> 881.57Sthorpej 891.58Sjdolecekextern const struct emul emul_netbsd; 901.42Schristos 911.54Sthorpejint ELFNAME(check_header)(Elf_Ehdr *, int); 921.54Sthorpejint ELFNAME(load_file)(struct proc *, struct exec_package *, char *, 931.54Sthorpej struct exec_vmcmd_set *, u_long *, struct elf_args *, Elf_Addr *); 941.54Sthorpejvoid ELFNAME(load_psection)(struct exec_vmcmd_set *, struct vnode *, 951.54Sthorpej const Elf_Phdr *, Elf_Addr *, u_long *, int *, int); 961.54Sthorpej 971.59Smrgint ELFNAME2(netbsd,signature)(struct proc *, struct exec_package *, 981.54Sthorpej Elf_Ehdr *); 991.58Sjdolecekint ELFNAME2(netbsd,probe)(struct proc *, struct exec_package *, 1001.58Sjdolecek void *, char *, vaddr_t *); 1011.8Schristos 1021.18Scgd/* round up and down to page boundaries. */ 1031.18Scgd#define ELF_ROUND(a, b) (((a) + (b) - 1) & ~((b) - 1)) 1041.18Scgd#define ELF_TRUNC(a, b) ((a) & ~((b) - 1)) 1051.8Schristos 1061.8Schristos/* 1071.1Sfvdl * Copy arguments onto the stack in the normal way, but add some 1081.1Sfvdl * extra information in case of dynamic binding. 1091.1Sfvdl */ 1101.66Schristosint 1111.72SchristosELFNAME(copyargs)(struct proc *p, struct exec_package *pack, 1121.72Schristos struct ps_strings *arginfo, char **stackp, void *argp) 1131.1Sfvdl{ 1141.1Sfvdl size_t len; 1151.4Sfvdl AuxInfo ai[ELF_AUX_ENTRIES], *a; 1161.1Sfvdl struct elf_args *ap; 1171.66Schristos int error; 1181.1Sfvdl 1191.72Schristos if ((error = copyargs(p, pack, arginfo, stackp, argp)) != 0) 1201.66Schristos return error; 1211.1Sfvdl 1221.22Scgd a = ai; 1231.22Scgd 1241.1Sfvdl /* 1251.1Sfvdl * Push extra arguments on the stack needed by dynamically 1261.1Sfvdl * linked binaries 1271.1Sfvdl */ 1281.17Scgd if ((ap = (struct elf_args *)pack->ep_emul_arg)) { 1291.77Sjdolecek struct vattr *vap = pack->ep_vap; 1301.1Sfvdl 1311.46Skleink a->a_type = AT_PHDR; 1321.46Skleink a->a_v = ap->arg_phaddr; 1331.1Sfvdl a++; 1341.1Sfvdl 1351.46Skleink a->a_type = AT_PHENT; 1361.46Skleink a->a_v = ap->arg_phentsize; 1371.1Sfvdl a++; 1381.1Sfvdl 1391.46Skleink a->a_type = AT_PHNUM; 1401.46Skleink a->a_v = ap->arg_phnum; 1411.1Sfvdl a++; 1421.1Sfvdl 1431.46Skleink a->a_type = AT_PAGESZ; 1441.57Sthorpej a->a_v = PAGE_SIZE; 1451.1Sfvdl a++; 1461.1Sfvdl 1471.46Skleink a->a_type = AT_BASE; 1481.46Skleink a->a_v = ap->arg_interp; 1491.1Sfvdl a++; 1501.1Sfvdl 1511.46Skleink a->a_type = AT_FLAGS; 1521.46Skleink a->a_v = 0; 1531.1Sfvdl a++; 1541.1Sfvdl 1551.46Skleink a->a_type = AT_ENTRY; 1561.46Skleink a->a_v = ap->arg_entry; 1571.72Schristos a++; 1581.72Schristos 1591.72Schristos a->a_type = AT_EUID; 1601.77Sjdolecek if (vap->va_mode & S_ISUID) 1611.77Sjdolecek a->a_v = vap->va_uid; 1621.77Sjdolecek else 1631.77Sjdolecek a->a_v = p->p_ucred->cr_uid; 1641.72Schristos a++; 1651.72Schristos 1661.72Schristos a->a_type = AT_RUID; 1671.72Schristos a->a_v = p->p_cred->p_ruid; 1681.72Schristos a++; 1691.72Schristos 1701.72Schristos a->a_type = AT_EGID; 1711.77Sjdolecek if (vap->va_mode & S_ISGID) 1721.77Sjdolecek a->a_v = vap->va_gid; 1731.77Sjdolecek else 1741.77Sjdolecek a->a_v = p->p_ucred->cr_gid; 1751.72Schristos a++; 1761.72Schristos 1771.72Schristos a->a_type = AT_RGID; 1781.72Schristos a->a_v = p->p_cred->p_rgid; 1791.1Sfvdl a++; 1801.1Sfvdl 1811.76Schs free(ap, M_TEMP); 1821.9Scgd pack->ep_emul_arg = NULL; 1831.1Sfvdl } 1841.22Scgd 1851.46Skleink a->a_type = AT_NULL; 1861.46Skleink a->a_v = 0; 1871.22Scgd a++; 1881.22Scgd 1891.34Sperry len = (a - ai) * sizeof(AuxInfo); 1901.66Schristos if ((error = copyout(ai, *stackp, len)) != 0) 1911.66Schristos return error; 1921.67Schristos *stackp += len; 1931.22Scgd 1941.66Schristos return 0; 1951.1Sfvdl} 1961.1Sfvdl 1971.1Sfvdl/* 1981.1Sfvdl * elf_check_header(): 1991.1Sfvdl * 2001.1Sfvdl * Check header for validity; return 0 of ok ENOEXEC if error 2011.1Sfvdl */ 2021.17Scgdint 2031.54SthorpejELFNAME(check_header)(Elf_Ehdr *eh, int type) 2041.1Sfvdl{ 2051.3Sthorpej 2061.46Skleink if (memcmp(eh->e_ident, ELFMAG, SELFMAG) != 0 || 2071.46Skleink eh->e_ident[EI_CLASS] != ELFCLASS) 2081.61Smycroft return (ENOEXEC); 2091.1Sfvdl 2101.1Sfvdl switch (eh->e_machine) { 2111.9Scgd 2121.10Scgd ELFDEFNNAME(MACHDEP_ID_CASES) 2131.1Sfvdl 2141.1Sfvdl default: 2151.61Smycroft return (ENOEXEC); 2161.1Sfvdl } 2171.70Sthorpej 2181.70Sthorpej if (ELF_EHDR_FLAGS_OK(eh) == 0) 2191.70Sthorpej return (ENOEXEC); 2201.1Sfvdl 2211.1Sfvdl if (eh->e_type != type) 2221.61Smycroft return (ENOEXEC); 2231.61Smycroft 2241.63Sjdolecek if (eh->e_shnum > 512 || 2251.61Smycroft eh->e_phnum > 128) 2261.61Smycroft return (ENOEXEC); 2271.1Sfvdl 2281.61Smycroft return (0); 2291.1Sfvdl} 2301.1Sfvdl 2311.1Sfvdl/* 2321.1Sfvdl * elf_load_psection(): 2331.17Scgd * 2341.1Sfvdl * Load a psection at the appropriate address 2351.1Sfvdl */ 2361.17Scgdvoid 2371.54SthorpejELFNAME(load_psection)(struct exec_vmcmd_set *vcset, struct vnode *vp, 2381.54Sthorpej const Elf_Phdr *ph, Elf_Addr *addr, u_long *size, int *prot, int flags) 2391.1Sfvdl{ 2401.87Smatt u_long msize, psize, rm, rf; 2411.1Sfvdl long diff, offset; 2421.1Sfvdl 2431.1Sfvdl /* 2441.17Scgd * If the user specified an address, then we load there. 2451.17Scgd */ 2461.87Smatt if (*addr == ELFDEFNNAME(NO_ADDR)) 2471.87Smatt *addr = ph->p_vaddr; 2481.87Smatt 2491.87Smatt if (ph->p_align > 1) { 2501.87Smatt /* 2511.87Smatt * Make sure we are virtually aligned as we are supposed to be. 2521.87Smatt */ 2531.87Smatt diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align); 2541.87Smatt KASSERT(*addr - diff == ELF_TRUNC(*addr, ph->p_align)); 2551.87Smatt /* 2561.87Smatt * But make sure to not map any pages before the start of the 2571.87Smatt * psection by limiting the difference to within a page. 2581.87Smatt */ 2591.87Smatt diff &= PAGE_MASK; 2601.87Smatt } else 2611.87Smatt diff = 0; 2621.1Sfvdl 2631.46Skleink *prot |= (ph->p_flags & PF_R) ? VM_PROT_READ : 0; 2641.46Skleink *prot |= (ph->p_flags & PF_W) ? VM_PROT_WRITE : 0; 2651.46Skleink *prot |= (ph->p_flags & PF_X) ? VM_PROT_EXECUTE : 0; 2661.1Sfvdl 2671.87Smatt /* 2681.87Smatt * Adjust everything so it all starts on a page boundary. 2691.87Smatt */ 2701.87Smatt *addr -= diff; 2711.1Sfvdl offset = ph->p_offset - diff; 2721.1Sfvdl *size = ph->p_filesz + diff; 2731.1Sfvdl msize = ph->p_memsz + diff; 2741.1Sfvdl 2751.65Schristos if (ph->p_align >= PAGE_SIZE) { 2761.65Schristos if ((ph->p_flags & PF_W) != 0) { 2771.65Schristos /* 2781.65Schristos * Because the pagedvn pager can't handle zero fill 2791.65Schristos * of the last data page if it's not page aligned we 2801.65Schristos * map the last page readvn. 2811.65Schristos */ 2821.65Schristos psize = trunc_page(*size); 2831.65Schristos } else { 2841.65Schristos psize = round_page(*size); 2851.65Schristos } 2861.65Schristos } else { 2871.65Schristos psize = *size; 2881.53Smatt } 2891.65Schristos 2901.53Smatt if (psize > 0) { 2911.65Schristos NEW_VMCMD2(vcset, ph->p_align < PAGE_SIZE ? 2921.65Schristos vmcmd_map_readvn : vmcmd_map_pagedvn, psize, *addr, vp, 2931.53Smatt offset, *prot, flags); 2941.87Smatt flags &= VMCMD_RELATIVE; 2951.53Smatt } 2961.53Smatt if (psize < *size) { 2971.53Smatt NEW_VMCMD2(vcset, vmcmd_map_readvn, *size - psize, 2981.87Smatt *addr + psize, vp, offset + psize, *prot, flags); 2991.53Smatt } 3001.1Sfvdl 3011.1Sfvdl /* 3021.87Smatt * Check if we need to extend the size of the segment (does 3031.87Smatt * bss extend page the next page boundary)? 3041.17Scgd */ 3051.1Sfvdl rm = round_page(*addr + msize); 3061.1Sfvdl rf = round_page(*addr + *size); 3071.1Sfvdl 3081.1Sfvdl if (rm != rf) { 3091.53Smatt NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP, 3101.83Smatt 0, *prot, flags & VMCMD_RELATIVE); 3111.1Sfvdl *size = msize; 3121.1Sfvdl } 3131.1Sfvdl} 3141.1Sfvdl 3151.1Sfvdl/* 3161.1Sfvdl * elf_load_file(): 3171.1Sfvdl * 3181.1Sfvdl * Load a file (interpreter/library) pointed to by path 3191.1Sfvdl * [stolen from coff_load_shlib()]. Made slightly generic 3201.1Sfvdl * so it might be used externally. 3211.1Sfvdl */ 3221.17Scgdint 3231.54SthorpejELFNAME(load_file)(struct proc *p, struct exec_package *epp, char *path, 3241.79Satatat struct exec_vmcmd_set *vcset, u_long *entryoff, struct elf_args *ap, 3251.54Sthorpej Elf_Addr *last) 3261.1Sfvdl{ 3271.83Smatt int error, i; 3281.1Sfvdl struct nameidata nd; 3291.14Scgd struct vnode *vp; 3301.14Scgd struct vattr attr; 3311.9Scgd Elf_Ehdr eh; 3321.9Scgd Elf_Phdr *ph = NULL; 3331.83Smatt const Elf_Phdr *ph0; 3341.83Smatt const Elf_Phdr *base_ph; 3351.83Smatt const Elf_Phdr *last_ph; 3361.1Sfvdl u_long phsize; 3371.9Scgd Elf_Addr addr = *last; 3381.1Sfvdl 3391.1Sfvdl /* 3401.17Scgd * 1. open file 3411.17Scgd * 2. read filehdr 3421.17Scgd * 3. map text, data, and bss out of it using VM_* 3431.17Scgd */ 3441.12Scgd NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, p); 3451.12Scgd if ((error = namei(&nd)) != 0) 3461.1Sfvdl return error; 3471.14Scgd vp = nd.ni_vp; 3481.14Scgd 3491.26Smycroft /* 3501.26Smycroft * Similarly, if it's not marked as executable, or it's not a regular 3511.26Smycroft * file, we don't allow it to be used. 3521.26Smycroft */ 3531.14Scgd if (vp->v_type != VREG) { 3541.14Scgd error = EACCES; 3551.14Scgd goto badunlock; 3561.14Scgd } 3571.26Smycroft if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0) 3581.26Smycroft goto badunlock; 3591.14Scgd 3601.17Scgd /* get attributes */ 3611.14Scgd if ((error = VOP_GETATTR(vp, &attr, p->p_ucred, p)) != 0) 3621.14Scgd goto badunlock; 3631.14Scgd 3641.14Scgd /* 3651.14Scgd * Check mount point. Though we're not trying to exec this binary, 3661.15Scgd * we will be executing code from it, so if the mount point 3671.15Scgd * disallows execution or set-id-ness, we punt or kill the set-id. 3681.14Scgd */ 3691.14Scgd if (vp->v_mount->mnt_flag & MNT_NOEXEC) { 3701.14Scgd error = EACCES; 3711.14Scgd goto badunlock; 3721.14Scgd } 3731.14Scgd if (vp->v_mount->mnt_flag & MNT_NOSUID) 3741.24Smycroft epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID); 3751.14Scgd 3761.12Scgd#ifdef notyet /* XXX cgd 960926 */ 3771.12Scgd XXX cgd 960926: (maybe) VOP_OPEN it (and VOP_CLOSE in copyargs?) 3781.12Scgd#endif 3791.76Schs 3801.76Schs error = vn_marktext(vp); 3811.76Schs if (error) 3821.76Schs goto badunlock; 3831.76Schs 3841.28Sfvdl VOP_UNLOCK(vp, 0); 3851.12Scgd 3861.64Schristos if ((error = exec_read_from(p, vp, 0, &eh, sizeof(eh))) != 0) 3871.1Sfvdl goto bad; 3881.1Sfvdl 3891.46Skleink if ((error = ELFNAME(check_header)(&eh, ET_DYN)) != 0) 3901.1Sfvdl goto bad; 3911.1Sfvdl 3921.9Scgd phsize = eh.e_phnum * sizeof(Elf_Phdr); 3931.17Scgd ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK); 3941.1Sfvdl 3951.64Schristos if ((error = exec_read_from(p, vp, eh.e_phoff, ph, phsize)) != 0) 3961.1Sfvdl goto bad; 3971.1Sfvdl 3981.83Smatt /* this breaks on, e.g., OpenBSD-compatible mips shared binaries. */ 3991.83Smatt#ifndef ELF_INTERP_NON_RELOCATABLE 4001.1Sfvdl /* 4011.83Smatt * If no position to load the interpreter was set by a probe 4021.83Smatt * function, pick the same address that a non-fixed mmap(0, ..) 4031.83Smatt * would (i.e. something safely out of the way). 4041.83Smatt */ 4051.83Smatt if (*last == ELFDEFNNAME(NO_ADDR)) { 4061.83Smatt u_long limit = 0; 4071.83Smatt /* 4081.83Smatt * Find the start and ending addresses of the psections to 4091.83Smatt * be loaded. This will give us the size. 4101.83Smatt */ 4111.83Smatt for (i = 0, ph0 = ph, base_ph = NULL; i < eh.e_phnum; 4121.83Smatt i++, ph0++) { 4131.83Smatt if (ph0->p_type == PT_LOAD) { 4141.83Smatt u_long psize = ph0->p_vaddr + ph0->p_memsz; 4151.83Smatt if (base_ph == NULL) 4161.83Smatt base_ph = ph0; 4171.83Smatt if (psize > limit) 4181.83Smatt limit = psize; 4191.83Smatt } 4201.83Smatt } 4211.83Smatt 4221.83Smatt /* 4231.83Smatt * Now compute the size and load address. 4241.83Smatt */ 4251.86Smatt addr = VM_DEFAULT_ADDRESS(epp->ep_daddr, 4261.83Smatt round_page(limit) - trunc_page(base_ph->p_vaddr)); 4271.83Smatt } else 4281.83Smatt#endif /* !ELF_INTERP_NON_RELOCATABLE */ 4291.83Smatt addr = *last; 4301.83Smatt 4311.83Smatt /* 4321.83Smatt * Load all the necessary sections 4331.83Smatt */ 4341.83Smatt for (i = 0, ph0 = ph, base_ph = NULL, last_ph = NULL; 4351.83Smatt i < eh.e_phnum; i++, ph0++) { 4361.83Smatt switch (ph0->p_type) { 4371.83Smatt case PT_LOAD: { 4381.83Smatt u_long size; 4391.83Smatt int prot = 0; 4401.83Smatt int flags; 4411.1Sfvdl 4421.53Smatt if (base_ph == NULL) { 4431.82Smatt /* 4441.82Smatt * First encountered psection is always the 4451.88Smatt * base psection. Make sure it's aligned 4461.88Smatt * properly. 4471.82Smatt */ 4481.83Smatt base_ph = ph0; 4491.53Smatt flags = VMCMD_BASE; 4501.88Smatt addr = ELF_TRUNC(addr + ph0->p_align - 1, 4511.88Smatt ph0->p_align); 4521.53Smatt } else { 4531.83Smatt u_long limit = round_page(last_ph->p_vaddr 4541.83Smatt + last_ph->p_memsz); 4551.87Smatt u_long base = trunc_page(ph0->p_vaddr); 4561.83Smatt 4571.82Smatt /* 4581.83Smatt * If there is a gap in between the psections, 4591.83Smatt * map it as inaccessible so nothing else 4601.83Smatt * mmap'ed will be placed there. 4611.82Smatt */ 4621.85Smatt if (limit != base) { 4631.83Smatt NEW_VMCMD2(vcset, vmcmd_map_zero, 4641.85Smatt base - limit, 4651.87Smatt limit - base_ph->p_vaddr, NULLVP, 4661.87Smatt 0, VM_PROT_NONE, VMCMD_RELATIVE); 4671.83Smatt } 4681.83Smatt 4691.83Smatt addr = ph0->p_vaddr - base_ph->p_vaddr; 4701.53Smatt flags = VMCMD_RELATIVE; 4711.53Smatt } 4721.83Smatt last_ph = ph0; 4731.14Scgd ELFNAME(load_psection)(vcset, vp, &ph[i], &addr, 4741.83Smatt &size, &prot, flags); 4751.82Smatt /* 4761.82Smatt * If entry is within this psection then this 4771.82Smatt * must contain the .text section. *entryoff is 4781.83Smatt * relative to the base psection. 4791.82Smatt */ 4801.83Smatt if (eh.e_entry >= ph0->p_vaddr && 4811.83Smatt eh.e_entry < (ph0->p_vaddr + size)) { 4821.83Smatt *entryoff = eh.e_entry - base_ph->p_vaddr; 4831.1Sfvdl } 4841.84Smatt addr += size; 4851.1Sfvdl break; 4861.83Smatt } 4871.1Sfvdl 4881.46Skleink case PT_DYNAMIC: 4891.46Skleink case PT_PHDR: 4901.46Skleink case PT_NOTE: 4911.1Sfvdl break; 4921.1Sfvdl 4931.1Sfvdl default: 4941.1Sfvdl break; 4951.1Sfvdl } 4961.1Sfvdl } 4971.1Sfvdl 4981.76Schs free(ph, M_TEMP); 4991.82Smatt /* 5001.82Smatt * This value is ignored if TOPDOWN. 5011.82Smatt */ 5021.12Scgd *last = addr; 5031.14Scgd vrele(vp); 5041.12Scgd return 0; 5051.12Scgd 5061.14Scgdbadunlock: 5071.28Sfvdl VOP_UNLOCK(vp, 0); 5081.14Scgd 5091.1Sfvdlbad: 5101.1Sfvdl if (ph != NULL) 5111.76Schs free(ph, M_TEMP); 5121.12Scgd#ifdef notyet /* XXX cgd 960926 */ 5131.12Scgd (maybe) VOP_CLOSE it 5141.12Scgd#endif 5151.14Scgd vrele(vp); 5161.1Sfvdl return error; 5171.1Sfvdl} 5181.1Sfvdl 5191.1Sfvdl/* 5201.1Sfvdl * exec_elf_makecmds(): Prepare an Elf binary's exec package 5211.1Sfvdl * 5221.1Sfvdl * First, set of the various offsets/lengths in the exec package. 5231.1Sfvdl * 5241.1Sfvdl * Then, mark the text image busy (so it can be demand paged) or error 5251.1Sfvdl * out if this is not possible. Finally, set up vmcmds for the 5261.1Sfvdl * text, data, bss, and stack segments. 5271.1Sfvdl */ 5281.1Sfvdlint 5291.54SthorpejELFNAME2(exec,makecmds)(struct proc *p, struct exec_package *epp) 5301.1Sfvdl{ 5311.9Scgd Elf_Ehdr *eh = epp->ep_hdr; 5321.9Scgd Elf_Phdr *ph, *pp; 5331.9Scgd Elf_Addr phdr = 0, pos = 0; 5341.58Sjdolecek int error, i, nload; 5351.58Sjdolecek char *interp = NULL; 5361.9Scgd u_long phsize; 5371.1Sfvdl 5381.9Scgd if (epp->ep_hdrvalid < sizeof(Elf_Ehdr)) 5391.1Sfvdl return ENOEXEC; 5401.1Sfvdl 5411.45Sfvdl /* 5421.45Sfvdl * XXX allow for executing shared objects. It seems silly 5431.45Sfvdl * but other ELF-based systems allow it as well. 5441.45Sfvdl */ 5451.46Skleink if (ELFNAME(check_header)(eh, ET_EXEC) != 0 && 5461.46Skleink ELFNAME(check_header)(eh, ET_DYN) != 0) 5471.1Sfvdl return ENOEXEC; 5481.1Sfvdl 5491.76Schs error = vn_marktext(epp->ep_vp); 5501.76Schs if (error) 5511.76Schs return (error); 5521.76Schs 5531.1Sfvdl /* 5541.17Scgd * Allocate space to hold all the program headers, and read them 5551.17Scgd * from the file 5561.17Scgd */ 5571.9Scgd phsize = eh->e_phnum * sizeof(Elf_Phdr); 5581.17Scgd ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK); 5591.1Sfvdl 5601.64Schristos if ((error = exec_read_from(p, epp->ep_vp, eh->e_phoff, ph, phsize)) != 5611.64Schristos 0) 5621.1Sfvdl goto bad; 5631.1Sfvdl 5641.19Scgd epp->ep_taddr = epp->ep_tsize = ELFDEFNNAME(NO_ADDR); 5651.19Scgd epp->ep_daddr = epp->ep_dsize = ELFDEFNNAME(NO_ADDR); 5661.1Sfvdl 5671.58Sjdolecek MALLOC(interp, char *, MAXPATHLEN, M_TEMP, M_WAITOK); 5681.1Sfvdl interp[0] = '\0'; 5691.1Sfvdl 5701.1Sfvdl for (i = 0; i < eh->e_phnum; i++) { 5711.1Sfvdl pp = &ph[i]; 5721.46Skleink if (pp->p_type == PT_INTERP) { 5731.58Sjdolecek if (pp->p_filesz >= MAXPATHLEN) 5741.1Sfvdl goto bad; 5751.64Schristos if ((error = exec_read_from(p, epp->ep_vp, 5761.64Schristos pp->p_offset, interp, pp->p_filesz)) != 0) 5771.1Sfvdl goto bad; 5781.1Sfvdl break; 5791.1Sfvdl } 5801.1Sfvdl } 5811.1Sfvdl 5821.9Scgd /* 5831.1Sfvdl * On the same architecture, we may be emulating different systems. 5841.1Sfvdl * See which one will accept this executable. This currently only 5851.38Serh * applies to SVR4, and IBCS2 on the i386 and Linux on the i386 5861.38Serh * and the Alpha. 5871.1Sfvdl * 5881.1Sfvdl * Probe functions would normally see if the interpreter (if any) 5891.1Sfvdl * exists. Emulation packages may possibly replace the interpreter in 5901.58Sjdolecek * interp[] with a changed path (/emul/xxx/<path>). 5911.1Sfvdl */ 5921.58Sjdolecek if (!epp->ep_esch->u.elf_probe_func) { 5931.58Sjdolecek pos = ELFDEFNNAME(NO_ADDR); 5941.58Sjdolecek } else { 5951.62Seeh vaddr_t startp = 0; 5961.62Seeh 5971.58Sjdolecek error = (*epp->ep_esch->u.elf_probe_func)(p, epp, eh, interp, 5981.62Seeh &startp); 5991.62Seeh pos = (Elf_Addr)startp; 6001.1Sfvdl if (error) 6011.1Sfvdl goto bad; 6021.1Sfvdl } 6031.1Sfvdl 6041.1Sfvdl /* 6051.17Scgd * Load all the necessary sections 6061.17Scgd */ 6071.4Sfvdl for (i = nload = 0; i < eh->e_phnum; i++) { 6081.9Scgd Elf_Addr addr = ELFDEFNNAME(NO_ADDR); 6091.9Scgd u_long size = 0; 6101.1Sfvdl int prot = 0; 6111.1Sfvdl 6121.1Sfvdl pp = &ph[i]; 6131.1Sfvdl 6141.1Sfvdl switch (ph[i].p_type) { 6151.46Skleink case PT_LOAD: 6161.4Sfvdl /* 6171.4Sfvdl * XXX 6181.4Sfvdl * Can handle only 2 sections: text and data 6191.4Sfvdl */ 6201.4Sfvdl if (nload++ == 2) 6211.4Sfvdl goto bad; 6221.9Scgd ELFNAME(load_psection)(&epp->ep_vmcmds, epp->ep_vp, 6231.79Satatat &ph[i], &addr, &size, &prot, VMCMD_FIXED); 6241.17Scgd 6251.4Sfvdl /* 6261.4Sfvdl * Decide whether it's text or data by looking 6271.4Sfvdl * at the entry point. 6281.4Sfvdl */ 6291.19Scgd if (eh->e_entry >= addr && 6301.19Scgd eh->e_entry < (addr + size)) { 6311.4Sfvdl epp->ep_taddr = addr; 6321.4Sfvdl epp->ep_tsize = size; 6331.19Scgd if (epp->ep_daddr == ELFDEFNNAME(NO_ADDR)) { 6341.19Scgd epp->ep_daddr = addr; 6351.19Scgd epp->ep_dsize = size; 6361.19Scgd } 6371.4Sfvdl } else { 6381.4Sfvdl epp->ep_daddr = addr; 6391.4Sfvdl epp->ep_dsize = size; 6401.4Sfvdl } 6411.1Sfvdl break; 6421.1Sfvdl 6431.46Skleink case PT_SHLIB: 6441.60Smycroft /* SCO has these sections. */ 6451.46Skleink case PT_INTERP: 6461.60Smycroft /* Already did this one. */ 6471.46Skleink case PT_DYNAMIC: 6481.46Skleink case PT_NOTE: 6491.1Sfvdl break; 6501.1Sfvdl 6511.46Skleink case PT_PHDR: 6521.4Sfvdl /* Note address of program headers (in text segment) */ 6531.4Sfvdl phdr = pp->p_vaddr; 6541.7Schristos break; 6551.4Sfvdl 6561.1Sfvdl default: 6571.1Sfvdl /* 6581.9Scgd * Not fatal; we don't need to understand everything. 6591.1Sfvdl */ 6601.1Sfvdl break; 6611.1Sfvdl } 6621.1Sfvdl } 6631.1Sfvdl 6641.1Sfvdl /* 6651.17Scgd * Check if we found a dynamically linked binary and arrange to load 6661.79Satatat * its interpreter 6671.17Scgd */ 6681.1Sfvdl if (interp[0]) { 6691.1Sfvdl struct elf_args *ap; 6701.79Satatat int i = epp->ep_vmcmds.evs_used; 6711.79Satatat u_long interp_offset; 6721.1Sfvdl 6731.58Sjdolecek MALLOC(ap, struct elf_args *, sizeof(struct elf_args), 6741.17Scgd M_TEMP, M_WAITOK); 6751.14Scgd if ((error = ELFNAME(load_file)(p, epp, interp, 6761.79Satatat &epp->ep_vmcmds, &interp_offset, ap, &pos)) != 0) { 6771.76Schs FREE(ap, M_TEMP); 6781.1Sfvdl goto bad; 6791.1Sfvdl } 6801.79Satatat ap->arg_interp = epp->ep_vmcmds.evs_cmds[i].ev_addr; 6811.79Satatat epp->ep_entry = ap->arg_interp + interp_offset; 6821.4Sfvdl ap->arg_phaddr = phdr; 6831.1Sfvdl 6841.1Sfvdl ap->arg_phentsize = eh->e_phentsize; 6851.1Sfvdl ap->arg_phnum = eh->e_phnum; 6861.1Sfvdl ap->arg_entry = eh->e_entry; 6871.1Sfvdl 6881.1Sfvdl epp->ep_emul_arg = ap; 6891.1Sfvdl } else 6901.1Sfvdl epp->ep_entry = eh->e_entry; 6911.1Sfvdl 6921.8Schristos#ifdef ELF_MAP_PAGE_ZERO 6931.8Schristos /* Dell SVR4 maps page zero, yeuch! */ 6941.57Sthorpej NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, PAGE_SIZE, 0, 6951.57Sthorpej epp->ep_vp, 0, VM_PROT_READ); 6961.8Schristos#endif 6971.58Sjdolecek FREE(interp, M_TEMP); 6981.76Schs free(ph, M_TEMP); 6991.9Scgd return exec_elf_setup_stack(p, epp); 7001.1Sfvdl 7011.1Sfvdlbad: 7021.58Sjdolecek if (interp) 7031.58Sjdolecek FREE(interp, M_TEMP); 7041.76Schs free(ph, M_TEMP); 7051.1Sfvdl kill_vmcmds(&epp->ep_vmcmds); 7061.1Sfvdl return ENOEXEC; 7071.40Schristos} 7081.40Schristos 7091.59Smrgint 7101.54SthorpejELFNAME2(netbsd,signature)(struct proc *p, struct exec_package *epp, 7111.54Sthorpej Elf_Ehdr *eh) 7121.40Schristos{ 7131.61Smycroft size_t i; 7141.61Smycroft Elf_Phdr *ph; 7151.40Schristos size_t phsize; 7161.40Schristos int error; 7171.40Schristos 7181.40Schristos phsize = eh->e_phnum * sizeof(Elf_Phdr); 7191.61Smycroft ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK); 7201.64Schristos error = exec_read_from(p, epp->ep_vp, eh->e_phoff, ph, phsize); 7211.61Smycroft if (error) 7221.61Smycroft goto out; 7231.40Schristos 7241.61Smycroft for (i = 0; i < eh->e_phnum; i++) { 7251.61Smycroft Elf_Phdr *ephp = &ph[i]; 7261.61Smycroft Elf_Nhdr *np; 7271.40Schristos 7281.61Smycroft if (ephp->p_type != PT_NOTE || 7291.61Smycroft ephp->p_filesz > 1024 || 7301.61Smycroft ephp->p_filesz < sizeof(Elf_Nhdr) + ELF_NOTE_NETBSD_NAMESZ) 7311.40Schristos continue; 7321.40Schristos 7331.61Smycroft np = (Elf_Nhdr *)malloc(ephp->p_filesz, M_TEMP, M_WAITOK); 7341.64Schristos error = exec_read_from(p, epp->ep_vp, ephp->p_offset, np, 7351.64Schristos ephp->p_filesz); 7361.61Smycroft if (error) 7371.61Smycroft goto next; 7381.40Schristos 7391.61Smycroft if (np->n_type != ELF_NOTE_TYPE_NETBSD_TAG || 7401.61Smycroft np->n_namesz != ELF_NOTE_NETBSD_NAMESZ || 7411.61Smycroft np->n_descsz != ELF_NOTE_NETBSD_DESCSZ || 7421.61Smycroft memcmp((caddr_t)(np + 1), ELF_NOTE_NETBSD_NAME, 7431.40Schristos ELF_NOTE_NETBSD_NAMESZ)) 7441.61Smycroft goto next; 7451.40Schristos 7461.40Schristos error = 0; 7471.61Smycroft free(np, M_TEMP); 7481.61Smycroft goto out; 7491.61Smycroft 7501.61Smycroft next: 7511.61Smycroft free(np, M_TEMP); 7521.61Smycroft continue; 7531.40Schristos } 7541.40Schristos 7551.40Schristos error = ENOEXEC; 7561.61Smycroftout: 7571.61Smycroft free(ph, M_TEMP); 7581.61Smycroft return (error); 7591.40Schristos} 7601.40Schristos 7611.58Sjdolecekint 7621.54SthorpejELFNAME2(netbsd,probe)(struct proc *p, struct exec_package *epp, 7631.58Sjdolecek void *eh, char *itp, vaddr_t *pos) 7641.40Schristos{ 7651.40Schristos int error; 7661.40Schristos 7671.40Schristos if ((error = ELFNAME2(netbsd,signature)(p, epp, eh)) != 0) 7681.40Schristos return error; 7691.41Schristos *pos = ELFDEFNNAME(NO_ADDR); 7701.40Schristos return 0; 7711.1Sfvdl} 772