exec_elf.c revision 1.9.82.3 1 1.9.82.3 yamt /* $NetBSD: exec_elf.c,v 1.9.82.3 2010/08/11 22:54:37 yamt Exp $ */
2 1.9.82.1 yamt
3 1.9.82.1 yamt /*-
4 1.9.82.1 yamt * Copyright (c) 1994, 2000, 2005 The NetBSD Foundation, Inc.
5 1.9.82.1 yamt * All rights reserved.
6 1.9.82.1 yamt *
7 1.9.82.1 yamt * This code is derived from software contributed to The NetBSD Foundation
8 1.9.82.1 yamt * by Christos Zoulas.
9 1.9.82.1 yamt *
10 1.9.82.1 yamt * Redistribution and use in source and binary forms, with or without
11 1.9.82.1 yamt * modification, are permitted provided that the following conditions
12 1.9.82.1 yamt * are met:
13 1.9.82.1 yamt * 1. Redistributions of source code must retain the above copyright
14 1.9.82.1 yamt * notice, this list of conditions and the following disclaimer.
15 1.9.82.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
16 1.9.82.1 yamt * notice, this list of conditions and the following disclaimer in the
17 1.9.82.1 yamt * documentation and/or other materials provided with the distribution.
18 1.9.82.1 yamt *
19 1.9.82.1 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.9.82.1 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.9.82.1 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.9.82.1 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.9.82.1 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.9.82.1 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.9.82.1 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.9.82.1 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.9.82.1 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.9.82.1 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.9.82.1 yamt * POSSIBILITY OF SUCH DAMAGE.
30 1.9.82.1 yamt */
31 1.1 fvdl
32 1.1 fvdl /*
33 1.9.82.1 yamt * Copyright (c) 1996 Christopher G. Demetriou
34 1.1 fvdl * All rights reserved.
35 1.1 fvdl *
36 1.1 fvdl * Redistribution and use in source and binary forms, with or without
37 1.1 fvdl * modification, are permitted provided that the following conditions
38 1.1 fvdl * are met:
39 1.1 fvdl * 1. Redistributions of source code must retain the above copyright
40 1.1 fvdl * notice, this list of conditions and the following disclaimer.
41 1.1 fvdl * 2. Redistributions in binary form must reproduce the above copyright
42 1.1 fvdl * notice, this list of conditions and the following disclaimer in the
43 1.1 fvdl * documentation and/or other materials provided with the distribution.
44 1.1 fvdl * 3. The name of the author may not be used to endorse or promote products
45 1.1 fvdl * derived from this software without specific prior written permission
46 1.1 fvdl *
47 1.1 fvdl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48 1.1 fvdl * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49 1.1 fvdl * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50 1.1 fvdl * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
51 1.1 fvdl * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52 1.1 fvdl * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 1.1 fvdl * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 1.1 fvdl * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 1.1 fvdl * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
56 1.1 fvdl * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 1.1 fvdl */
58 1.1 fvdl
59 1.9.82.1 yamt #include <sys/cdefs.h>
60 1.9.82.3 yamt __KERNEL_RCSID(1, "$NetBSD: exec_elf.c,v 1.9.82.3 2010/08/11 22:54:37 yamt Exp $");
61 1.9.82.1 yamt
62 1.9.82.1 yamt #ifdef _KERNEL_OPT
63 1.9.82.1 yamt #include "opt_pax.h"
64 1.9.82.1 yamt #endif /* _KERNEL_OPT */
65 1.9.82.1 yamt
66 1.1 fvdl #include <sys/param.h>
67 1.1 fvdl #include <sys/proc.h>
68 1.1 fvdl #include <sys/malloc.h>
69 1.9.82.1 yamt #include <sys/kmem.h>
70 1.1 fvdl #include <sys/namei.h>
71 1.1 fvdl #include <sys/vnode.h>
72 1.1 fvdl #include <sys/exec.h>
73 1.1 fvdl #include <sys/exec_elf.h>
74 1.8 christos #include <sys/syscall.h>
75 1.8 christos #include <sys/signalvar.h>
76 1.9.82.1 yamt #include <sys/mount.h>
77 1.9.82.1 yamt #include <sys/stat.h>
78 1.9.82.1 yamt #include <sys/kauth.h>
79 1.9.82.1 yamt #include <sys/bitops.h>
80 1.1 fvdl
81 1.9.82.1 yamt #include <sys/cpu.h>
82 1.1 fvdl #include <machine/reg.h>
83 1.1 fvdl
84 1.9.82.1 yamt #include <compat/common/compat_util.h>
85 1.1 fvdl
86 1.9.82.1 yamt #include <sys/pax.h>
87 1.1 fvdl
88 1.9.82.1 yamt extern struct emul emul_netbsd;
89 1.9.82.1 yamt
90 1.9.82.1 yamt #define elf_check_header ELFNAME(check_header)
91 1.9.82.1 yamt #define elf_copyargs ELFNAME(copyargs)
92 1.9.82.1 yamt #define elf_load_file ELFNAME(load_file)
93 1.9.82.1 yamt #define elf_load_psection ELFNAME(load_psection)
94 1.9.82.1 yamt #define exec_elf_makecmds ELFNAME2(exec,makecmds)
95 1.9.82.1 yamt #define netbsd_elf_signature ELFNAME2(netbsd,signature)
96 1.9.82.1 yamt #define netbsd_elf_probe ELFNAME2(netbsd,probe)
97 1.9.82.1 yamt #define coredump ELFNAMEEND(coredump)
98 1.6 christos
99 1.9.82.1 yamt int elf_load_file(struct lwp *, struct exec_package *, char *,
100 1.9.82.1 yamt struct exec_vmcmd_set *, u_long *, struct elf_args *, Elf_Addr *);
101 1.9.82.1 yamt void elf_load_psection(struct exec_vmcmd_set *, struct vnode *,
102 1.9.82.1 yamt const Elf_Phdr *, Elf_Addr *, u_long *, int *, int);
103 1.1 fvdl
104 1.9.82.1 yamt int netbsd_elf_signature(struct lwp *, struct exec_package *, Elf_Ehdr *);
105 1.9.82.1 yamt int netbsd_elf_probe(struct lwp *, struct exec_package *, void *, char *,
106 1.9.82.1 yamt vaddr_t *);
107 1.1 fvdl
108 1.9.82.1 yamt /* round up and down to page boundaries. */
109 1.9.82.1 yamt #define ELF_ROUND(a, b) (((a) + (b) - 1) & ~((b) - 1))
110 1.9.82.1 yamt #define ELF_TRUNC(a, b) ((a) & ~((b) - 1))
111 1.1 fvdl
112 1.1 fvdl /*
113 1.9.82.3 yamt * Arbitrary limits to avoid DoS for excessive memory allocation.
114 1.8 christos */
115 1.9.82.3 yamt #define MAXPHNUM 128
116 1.9.82.3 yamt #define MAXSHNUM 32768
117 1.9.82.3 yamt #define MAXNOTESIZE 1024
118 1.9.82.3 yamt
119 1.9.82.1 yamt static void
120 1.9.82.3 yamt elf_placedynexec(struct lwp *l, struct exec_package *epp, Elf_Ehdr *eh,
121 1.9.82.1 yamt Elf_Phdr *ph)
122 1.9.82.1 yamt {
123 1.9.82.3 yamt Elf_Addr align, offset;
124 1.9.82.3 yamt int i;
125 1.8 christos
126 1.9.82.3 yamt for (align = i = 0; i < eh->e_phnum; i++)
127 1.9.82.3 yamt if (ph[i].p_type == PT_LOAD && ph[i].p_align > align)
128 1.9.82.3 yamt align = ph[i].p_align;
129 1.9.82.1 yamt
130 1.9.82.3 yamt #ifdef PAX_ASLR
131 1.9.82.3 yamt if (pax_aslr_active(l)) {
132 1.9.82.3 yamt size_t pax_align, l2, delta;
133 1.9.82.3 yamt uint32_t r;
134 1.9.82.3 yamt
135 1.9.82.3 yamt pax_align = align;
136 1.9.82.3 yamt
137 1.9.82.3 yamt r = arc4random();
138 1.9.82.3 yamt
139 1.9.82.3 yamt if (pax_align == 0)
140 1.9.82.3 yamt pax_align = PGSHIFT;
141 1.9.82.3 yamt l2 = ilog2(pax_align);
142 1.9.82.3 yamt delta = PAX_ASLR_DELTA(r, l2, PAX_ASLR_DELTA_EXEC_LEN);
143 1.9.82.3 yamt offset = ELF_TRUNC(delta, pax_align) + PAGE_SIZE;
144 1.9.82.3 yamt #ifdef PAX_ASLR_DEBUG
145 1.9.82.3 yamt uprintf("r=0x%x l2=0x%zx PGSHIFT=0x%x Delta=0x%zx\n", r, l2,
146 1.9.82.3 yamt PGSHIFT, delta);
147 1.9.82.3 yamt uprintf("pax offset=0x%llx entry=0x%llx\n",
148 1.9.82.3 yamt (unsigned long long)offset,
149 1.9.82.3 yamt (unsigned long long)eh->e_entry);
150 1.9.82.3 yamt #endif /* PAX_ASLR_DEBUG */
151 1.9.82.3 yamt } else
152 1.9.82.3 yamt #endif /* PAX_ASLR */
153 1.9.82.3 yamt offset = MAX(align, PAGE_SIZE);
154 1.9.82.2 yamt
155 1.9.82.2 yamt for (i = 0; i < eh->e_phnum; i++)
156 1.9.82.3 yamt ph[i].p_vaddr += offset;
157 1.9.82.3 yamt eh->e_entry += offset;
158 1.9.82.2 yamt }
159 1.8 christos
160 1.8 christos /*
161 1.1 fvdl * Copy arguments onto the stack in the normal way, but add some
162 1.1 fvdl * extra information in case of dynamic binding.
163 1.1 fvdl */
164 1.9.82.1 yamt int
165 1.9.82.1 yamt elf_copyargs(struct lwp *l, struct exec_package *pack,
166 1.9.82.1 yamt struct ps_strings *arginfo, char **stackp, void *argp)
167 1.1 fvdl {
168 1.9.82.1 yamt size_t len, vlen;
169 1.9.82.1 yamt AuxInfo ai[ELF_AUX_ENTRIES], *a, *execname;
170 1.1 fvdl struct elf_args *ap;
171 1.9.82.1 yamt int error;
172 1.1 fvdl
173 1.9.82.1 yamt if ((error = copyargs(l, pack, arginfo, stackp, argp)) != 0)
174 1.9.82.1 yamt return error;
175 1.9.82.1 yamt
176 1.9.82.1 yamt a = ai;
177 1.9.82.1 yamt execname = NULL;
178 1.1 fvdl
179 1.1 fvdl /*
180 1.1 fvdl * Push extra arguments on the stack needed by dynamically
181 1.1 fvdl * linked binaries
182 1.1 fvdl */
183 1.9.82.1 yamt if ((ap = (struct elf_args *)pack->ep_emul_arg)) {
184 1.9.82.1 yamt struct vattr *vap = pack->ep_vap;
185 1.9.82.1 yamt
186 1.9.82.1 yamt a->a_type = AT_PHDR;
187 1.9.82.1 yamt a->a_v = ap->arg_phaddr;
188 1.9.82.1 yamt a++;
189 1.9.82.1 yamt
190 1.9.82.1 yamt a->a_type = AT_PHENT;
191 1.9.82.1 yamt a->a_v = ap->arg_phentsize;
192 1.9.82.1 yamt a++;
193 1.9.82.1 yamt
194 1.9.82.1 yamt a->a_type = AT_PHNUM;
195 1.9.82.1 yamt a->a_v = ap->arg_phnum;
196 1.9.82.1 yamt a++;
197 1.1 fvdl
198 1.9.82.1 yamt a->a_type = AT_PAGESZ;
199 1.9.82.1 yamt a->a_v = PAGE_SIZE;
200 1.1 fvdl a++;
201 1.1 fvdl
202 1.9.82.1 yamt a->a_type = AT_BASE;
203 1.9.82.1 yamt a->a_v = ap->arg_interp;
204 1.1 fvdl a++;
205 1.1 fvdl
206 1.9.82.1 yamt a->a_type = AT_FLAGS;
207 1.9.82.1 yamt a->a_v = 0;
208 1.1 fvdl a++;
209 1.1 fvdl
210 1.9.82.1 yamt a->a_type = AT_ENTRY;
211 1.9.82.1 yamt a->a_v = ap->arg_entry;
212 1.1 fvdl a++;
213 1.1 fvdl
214 1.9.82.1 yamt a->a_type = AT_EUID;
215 1.9.82.1 yamt if (vap->va_mode & S_ISUID)
216 1.9.82.1 yamt a->a_v = vap->va_uid;
217 1.9.82.1 yamt else
218 1.9.82.1 yamt a->a_v = kauth_cred_geteuid(l->l_cred);
219 1.1 fvdl a++;
220 1.1 fvdl
221 1.9.82.1 yamt a->a_type = AT_RUID;
222 1.9.82.1 yamt a->a_v = kauth_cred_getuid(l->l_cred);
223 1.1 fvdl a++;
224 1.1 fvdl
225 1.9.82.1 yamt a->a_type = AT_EGID;
226 1.9.82.1 yamt if (vap->va_mode & S_ISGID)
227 1.9.82.1 yamt a->a_v = vap->va_gid;
228 1.9.82.1 yamt else
229 1.9.82.1 yamt a->a_v = kauth_cred_getegid(l->l_cred);
230 1.1 fvdl a++;
231 1.1 fvdl
232 1.9.82.1 yamt a->a_type = AT_RGID;
233 1.9.82.1 yamt a->a_v = kauth_cred_getgid(l->l_cred);
234 1.1 fvdl a++;
235 1.1 fvdl
236 1.9.82.1 yamt if (pack->ep_path) {
237 1.9.82.1 yamt execname = a;
238 1.9.82.1 yamt a->a_type = AT_SUN_EXECNAME;
239 1.9.82.1 yamt a++;
240 1.9.82.1 yamt }
241 1.9.82.1 yamt
242 1.9.82.1 yamt free(ap, M_TEMP);
243 1.9.82.1 yamt pack->ep_emul_arg = NULL;
244 1.1 fvdl }
245 1.9.82.1 yamt
246 1.9.82.1 yamt a->a_type = AT_NULL;
247 1.9.82.1 yamt a->a_v = 0;
248 1.9.82.1 yamt a++;
249 1.9.82.1 yamt
250 1.9.82.1 yamt vlen = (a - ai) * sizeof(AuxInfo);
251 1.9.82.1 yamt
252 1.9.82.1 yamt if (execname) {
253 1.9.82.1 yamt char *path = pack->ep_path;
254 1.9.82.1 yamt execname->a_v = (uintptr_t)(*stackp + vlen);
255 1.9.82.1 yamt len = strlen(path) + 1;
256 1.9.82.1 yamt if ((error = copyout(path, (*stackp + vlen), len)) != 0)
257 1.9.82.1 yamt return error;
258 1.9.82.1 yamt len = ALIGN(len);
259 1.9.82.1 yamt } else
260 1.9.82.1 yamt len = 0;
261 1.9.82.1 yamt
262 1.9.82.1 yamt if ((error = copyout(ai, *stackp, vlen)) != 0)
263 1.9.82.1 yamt return error;
264 1.9.82.1 yamt *stackp += vlen + len;
265 1.9.82.1 yamt
266 1.9.82.1 yamt return 0;
267 1.1 fvdl }
268 1.1 fvdl
269 1.1 fvdl /*
270 1.1 fvdl * elf_check_header():
271 1.1 fvdl *
272 1.1 fvdl * Check header for validity; return 0 of ok ENOEXEC if error
273 1.1 fvdl */
274 1.1 fvdl int
275 1.9.82.1 yamt elf_check_header(Elf_Ehdr *eh, int type)
276 1.1 fvdl {
277 1.3 thorpej
278 1.9.82.1 yamt if (memcmp(eh->e_ident, ELFMAG, SELFMAG) != 0 ||
279 1.9.82.1 yamt eh->e_ident[EI_CLASS] != ELFCLASS)
280 1.1 fvdl return ENOEXEC;
281 1.1 fvdl
282 1.1 fvdl switch (eh->e_machine) {
283 1.9.82.1 yamt
284 1.9.82.1 yamt ELFDEFNNAME(MACHDEP_ID_CASES)
285 1.1 fvdl
286 1.1 fvdl default:
287 1.1 fvdl return ENOEXEC;
288 1.1 fvdl }
289 1.1 fvdl
290 1.9.82.1 yamt if (ELF_EHDR_FLAGS_OK(eh) == 0)
291 1.9.82.1 yamt return ENOEXEC;
292 1.9.82.1 yamt
293 1.1 fvdl if (eh->e_type != type)
294 1.1 fvdl return ENOEXEC;
295 1.1 fvdl
296 1.9.82.3 yamt if (eh->e_shnum > MAXSHNUM || eh->e_phnum > MAXPHNUM)
297 1.9.82.1 yamt return ENOEXEC;
298 1.9.82.1 yamt
299 1.1 fvdl return 0;
300 1.1 fvdl }
301 1.1 fvdl
302 1.1 fvdl /*
303 1.1 fvdl * elf_load_psection():
304 1.9.82.1 yamt *
305 1.1 fvdl * Load a psection at the appropriate address
306 1.1 fvdl */
307 1.9.82.1 yamt void
308 1.9.82.1 yamt elf_load_psection(struct exec_vmcmd_set *vcset, struct vnode *vp,
309 1.9.82.1 yamt const Elf_Phdr *ph, Elf_Addr *addr, u_long *size, int *prot, int flags)
310 1.1 fvdl {
311 1.9.82.1 yamt u_long msize, psize, rm, rf;
312 1.1 fvdl long diff, offset;
313 1.1 fvdl
314 1.1 fvdl /*
315 1.9.82.1 yamt * If the user specified an address, then we load there.
316 1.9.82.1 yamt */
317 1.9.82.1 yamt if (*addr == ELFDEFNNAME(NO_ADDR))
318 1.9.82.1 yamt *addr = ph->p_vaddr;
319 1.9.82.1 yamt
320 1.9.82.1 yamt if (ph->p_align > 1) {
321 1.9.82.1 yamt /*
322 1.9.82.1 yamt * Make sure we are virtually aligned as we are supposed to be.
323 1.9.82.1 yamt */
324 1.9.82.1 yamt diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align);
325 1.9.82.1 yamt KASSERT(*addr - diff == ELF_TRUNC(*addr, ph->p_align));
326 1.9.82.1 yamt /*
327 1.9.82.1 yamt * But make sure to not map any pages before the start of the
328 1.9.82.1 yamt * psection by limiting the difference to within a page.
329 1.9.82.1 yamt */
330 1.9.82.1 yamt diff &= PAGE_MASK;
331 1.9.82.1 yamt } else
332 1.9.82.1 yamt diff = 0;
333 1.1 fvdl
334 1.9.82.1 yamt *prot |= (ph->p_flags & PF_R) ? VM_PROT_READ : 0;
335 1.9.82.1 yamt *prot |= (ph->p_flags & PF_W) ? VM_PROT_WRITE : 0;
336 1.9.82.1 yamt *prot |= (ph->p_flags & PF_X) ? VM_PROT_EXECUTE : 0;
337 1.1 fvdl
338 1.9.82.1 yamt /*
339 1.9.82.1 yamt * Adjust everything so it all starts on a page boundary.
340 1.9.82.1 yamt */
341 1.9.82.1 yamt *addr -= diff;
342 1.1 fvdl offset = ph->p_offset - diff;
343 1.1 fvdl *size = ph->p_filesz + diff;
344 1.1 fvdl msize = ph->p_memsz + diff;
345 1.1 fvdl
346 1.9.82.1 yamt if (ph->p_align >= PAGE_SIZE) {
347 1.9.82.1 yamt if ((ph->p_flags & PF_W) != 0) {
348 1.9.82.1 yamt /*
349 1.9.82.1 yamt * Because the pagedvn pager can't handle zero fill
350 1.9.82.1 yamt * of the last data page if it's not page aligned we
351 1.9.82.1 yamt * map the last page readvn.
352 1.9.82.1 yamt */
353 1.9.82.1 yamt psize = trunc_page(*size);
354 1.9.82.1 yamt } else {
355 1.9.82.1 yamt psize = round_page(*size);
356 1.9.82.1 yamt }
357 1.9.82.1 yamt } else {
358 1.9.82.1 yamt psize = *size;
359 1.9.82.1 yamt }
360 1.9.82.1 yamt
361 1.9.82.1 yamt if (psize > 0) {
362 1.9.82.1 yamt NEW_VMCMD2(vcset, ph->p_align < PAGE_SIZE ?
363 1.9.82.1 yamt vmcmd_map_readvn : vmcmd_map_pagedvn, psize, *addr, vp,
364 1.9.82.1 yamt offset, *prot, flags);
365 1.9.82.1 yamt flags &= VMCMD_RELATIVE;
366 1.9.82.1 yamt }
367 1.9.82.1 yamt if (psize < *size) {
368 1.9.82.1 yamt NEW_VMCMD2(vcset, vmcmd_map_readvn, *size - psize,
369 1.9.82.1 yamt *addr + psize, vp, offset + psize, *prot, flags);
370 1.9.82.1 yamt }
371 1.1 fvdl
372 1.1 fvdl /*
373 1.9.82.1 yamt * Check if we need to extend the size of the segment (does
374 1.9.82.1 yamt * bss extend page the next page boundary)?
375 1.9.82.1 yamt */
376 1.1 fvdl rm = round_page(*addr + msize);
377 1.1 fvdl rf = round_page(*addr + *size);
378 1.1 fvdl
379 1.1 fvdl if (rm != rf) {
380 1.9.82.1 yamt NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP,
381 1.9.82.1 yamt 0, *prot, flags & VMCMD_RELATIVE);
382 1.1 fvdl *size = msize;
383 1.1 fvdl }
384 1.1 fvdl }
385 1.1 fvdl
386 1.1 fvdl /*
387 1.1 fvdl * elf_load_file():
388 1.1 fvdl *
389 1.1 fvdl * Load a file (interpreter/library) pointed to by path
390 1.1 fvdl * [stolen from coff_load_shlib()]. Made slightly generic
391 1.1 fvdl * so it might be used externally.
392 1.1 fvdl */
393 1.1 fvdl int
394 1.9.82.1 yamt elf_load_file(struct lwp *l, struct exec_package *epp, char *path,
395 1.9.82.1 yamt struct exec_vmcmd_set *vcset, u_long *entryoff, struct elf_args *ap,
396 1.9.82.1 yamt Elf_Addr *last)
397 1.1 fvdl {
398 1.1 fvdl int error, i;
399 1.9.82.1 yamt struct vnode *vp;
400 1.9.82.1 yamt struct vattr attr;
401 1.9.82.1 yamt Elf_Ehdr eh;
402 1.9.82.1 yamt Elf_Phdr *ph = NULL;
403 1.9.82.1 yamt const Elf_Phdr *ph0;
404 1.9.82.1 yamt const Elf_Phdr *base_ph;
405 1.9.82.1 yamt const Elf_Phdr *last_ph;
406 1.1 fvdl u_long phsize;
407 1.9.82.1 yamt Elf_Addr addr = *last;
408 1.9.82.1 yamt struct proc *p;
409 1.9.82.1 yamt
410 1.9.82.1 yamt p = l->l_proc;
411 1.1 fvdl
412 1.1 fvdl /*
413 1.9.82.1 yamt * 1. open file
414 1.9.82.1 yamt * 2. read filehdr
415 1.9.82.1 yamt * 3. map text, data, and bss out of it using VM_*
416 1.9.82.1 yamt */
417 1.9.82.1 yamt vp = epp->ep_interp;
418 1.9.82.1 yamt if (vp == NULL) {
419 1.9.82.1 yamt error = emul_find_interp(l, epp, path);
420 1.9.82.1 yamt if (error != 0)
421 1.9.82.1 yamt return error;
422 1.9.82.1 yamt vp = epp->ep_interp;
423 1.9.82.1 yamt }
424 1.9.82.1 yamt /* We'll tidy this ourselves - otherwise we have locking issues */
425 1.9.82.1 yamt epp->ep_interp = NULL;
426 1.9.82.1 yamt vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
427 1.9.82.1 yamt
428 1.9.82.1 yamt /*
429 1.9.82.1 yamt * Similarly, if it's not marked as executable, or it's not a regular
430 1.9.82.1 yamt * file, we don't allow it to be used.
431 1.9.82.1 yamt */
432 1.9.82.1 yamt if (vp->v_type != VREG) {
433 1.9.82.1 yamt error = EACCES;
434 1.9.82.1 yamt goto badunlock;
435 1.1 fvdl }
436 1.9.82.1 yamt if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred)) != 0)
437 1.9.82.1 yamt goto badunlock;
438 1.9.82.1 yamt
439 1.9.82.1 yamt /* get attributes */
440 1.9.82.1 yamt if ((error = VOP_GETATTR(vp, &attr, l->l_cred)) != 0)
441 1.9.82.1 yamt goto badunlock;
442 1.9.82.1 yamt
443 1.9.82.1 yamt /*
444 1.9.82.1 yamt * Check mount point. Though we're not trying to exec this binary,
445 1.9.82.1 yamt * we will be executing code from it, so if the mount point
446 1.9.82.1 yamt * disallows execution or set-id-ness, we punt or kill the set-id.
447 1.9.82.1 yamt */
448 1.9.82.1 yamt if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
449 1.9.82.1 yamt error = EACCES;
450 1.9.82.1 yamt goto badunlock;
451 1.9.82.1 yamt }
452 1.9.82.1 yamt if (vp->v_mount->mnt_flag & MNT_NOSUID)
453 1.9.82.1 yamt epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID);
454 1.9.82.1 yamt
455 1.9.82.1 yamt #ifdef notyet /* XXX cgd 960926 */
456 1.9.82.1 yamt XXX cgd 960926: (maybe) VOP_OPEN it (and VOP_CLOSE in copyargs?)
457 1.9.82.1 yamt #endif
458 1.9.82.1 yamt
459 1.9.82.1 yamt error = vn_marktext(vp);
460 1.9.82.1 yamt if (error)
461 1.9.82.1 yamt goto badunlock;
462 1.9.82.1 yamt
463 1.9.82.3 yamt VOP_UNLOCK(vp);
464 1.9.82.1 yamt
465 1.9.82.1 yamt if ((error = exec_read_from(l, vp, 0, &eh, sizeof(eh))) != 0)
466 1.9.82.1 yamt goto bad;
467 1.9.82.1 yamt
468 1.9.82.1 yamt if ((error = elf_check_header(&eh, ET_DYN)) != 0)
469 1.1 fvdl goto bad;
470 1.1 fvdl
471 1.9.82.1 yamt if (eh.e_phnum > MAXPHNUM || eh.e_phnum == 0) {
472 1.9.82.1 yamt error = ENOEXEC;
473 1.1 fvdl goto bad;
474 1.9.82.1 yamt }
475 1.1 fvdl
476 1.9.82.1 yamt phsize = eh.e_phnum * sizeof(Elf_Phdr);
477 1.9.82.1 yamt ph = kmem_alloc(phsize, KM_SLEEP);
478 1.1 fvdl
479 1.9.82.1 yamt if ((error = exec_read_from(l, vp, eh.e_phoff, ph, phsize)) != 0)
480 1.1 fvdl goto bad;
481 1.1 fvdl
482 1.9.82.1 yamt #ifdef ELF_INTERP_NON_RELOCATABLE
483 1.1 fvdl /*
484 1.9.82.1 yamt * Evil hack: Only MIPS should be non-relocatable, and the
485 1.9.82.1 yamt * psections should have a high address (typically 0x5ffe0000).
486 1.9.82.1 yamt * If it's now relocatable, it should be linked at 0 and the
487 1.9.82.1 yamt * psections should have zeros in the upper part of the address.
488 1.9.82.1 yamt * Otherwise, force the load at the linked address.
489 1.9.82.1 yamt */
490 1.9.82.1 yamt if (*last == ELF_LINK_ADDR && (ph->p_vaddr & 0xffff0000) == 0)
491 1.9.82.1 yamt *last = ELFDEFNNAME(NO_ADDR);
492 1.9.82.1 yamt #endif
493 1.1 fvdl
494 1.9.82.1 yamt /*
495 1.9.82.1 yamt * If no position to load the interpreter was set by a probe
496 1.9.82.1 yamt * function, pick the same address that a non-fixed mmap(0, ..)
497 1.9.82.1 yamt * would (i.e. something safely out of the way).
498 1.9.82.1 yamt */
499 1.9.82.1 yamt if (*last == ELFDEFNNAME(NO_ADDR)) {
500 1.9.82.1 yamt u_long limit = 0;
501 1.9.82.1 yamt /*
502 1.9.82.1 yamt * Find the start and ending addresses of the psections to
503 1.9.82.1 yamt * be loaded. This will give us the size.
504 1.9.82.1 yamt */
505 1.9.82.1 yamt for (i = 0, ph0 = ph, base_ph = NULL; i < eh.e_phnum;
506 1.9.82.1 yamt i++, ph0++) {
507 1.9.82.1 yamt if (ph0->p_type == PT_LOAD) {
508 1.9.82.1 yamt u_long psize = ph0->p_vaddr + ph0->p_memsz;
509 1.9.82.1 yamt if (base_ph == NULL)
510 1.9.82.1 yamt base_ph = ph0;
511 1.9.82.1 yamt if (psize > limit)
512 1.9.82.1 yamt limit = psize;
513 1.9.82.1 yamt }
514 1.9.82.1 yamt }
515 1.9.82.1 yamt
516 1.9.82.1 yamt if (base_ph == NULL) {
517 1.9.82.1 yamt error = ENOEXEC;
518 1.9.82.1 yamt goto bad;
519 1.9.82.1 yamt }
520 1.9.82.1 yamt
521 1.9.82.1 yamt /*
522 1.9.82.1 yamt * Now compute the size and load address.
523 1.9.82.1 yamt */
524 1.9.82.1 yamt addr = (*epp->ep_esch->es_emul->e_vm_default_addr)(p,
525 1.9.82.1 yamt epp->ep_daddr,
526 1.9.82.1 yamt round_page(limit) - trunc_page(base_ph->p_vaddr));
527 1.9.82.1 yamt } else
528 1.9.82.1 yamt addr = *last; /* may be ELF_LINK_ADDR */
529 1.9.82.1 yamt
530 1.9.82.1 yamt /*
531 1.9.82.1 yamt * Load all the necessary sections
532 1.9.82.1 yamt */
533 1.9.82.1 yamt for (i = 0, ph0 = ph, base_ph = NULL, last_ph = NULL;
534 1.9.82.1 yamt i < eh.e_phnum; i++, ph0++) {
535 1.9.82.1 yamt switch (ph0->p_type) {
536 1.9.82.1 yamt case PT_LOAD: {
537 1.9.82.1 yamt u_long size;
538 1.9.82.1 yamt int prot = 0;
539 1.9.82.1 yamt int flags;
540 1.9.82.1 yamt
541 1.9.82.1 yamt if (base_ph == NULL) {
542 1.9.82.1 yamt /*
543 1.9.82.1 yamt * First encountered psection is always the
544 1.9.82.1 yamt * base psection. Make sure it's aligned
545 1.9.82.1 yamt * properly (align down for topdown and align
546 1.9.82.1 yamt * upwards for not topdown).
547 1.9.82.1 yamt */
548 1.9.82.1 yamt base_ph = ph0;
549 1.9.82.1 yamt flags = VMCMD_BASE;
550 1.9.82.1 yamt if (addr == ELF_LINK_ADDR)
551 1.9.82.1 yamt addr = ph0->p_vaddr;
552 1.9.82.1 yamt if (p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN)
553 1.9.82.1 yamt addr = ELF_TRUNC(addr, ph0->p_align);
554 1.9.82.1 yamt else
555 1.9.82.1 yamt addr = ELF_ROUND(addr, ph0->p_align);
556 1.9.82.1 yamt } else {
557 1.9.82.1 yamt u_long limit = round_page(last_ph->p_vaddr
558 1.9.82.1 yamt + last_ph->p_memsz);
559 1.9.82.1 yamt u_long base = trunc_page(ph0->p_vaddr);
560 1.9.82.1 yamt
561 1.9.82.1 yamt /*
562 1.9.82.1 yamt * If there is a gap in between the psections,
563 1.9.82.1 yamt * map it as inaccessible so nothing else
564 1.9.82.1 yamt * mmap'ed will be placed there.
565 1.9.82.1 yamt */
566 1.9.82.1 yamt if (limit != base) {
567 1.9.82.1 yamt NEW_VMCMD2(vcset, vmcmd_map_zero,
568 1.9.82.1 yamt base - limit,
569 1.9.82.1 yamt limit - base_ph->p_vaddr, NULLVP,
570 1.9.82.1 yamt 0, VM_PROT_NONE, VMCMD_RELATIVE);
571 1.9.82.1 yamt }
572 1.9.82.1 yamt
573 1.9.82.1 yamt addr = ph0->p_vaddr - base_ph->p_vaddr;
574 1.9.82.1 yamt flags = VMCMD_RELATIVE;
575 1.9.82.1 yamt }
576 1.9.82.1 yamt last_ph = ph0;
577 1.9.82.1 yamt elf_load_psection(vcset, vp, &ph[i], &addr,
578 1.9.82.1 yamt &size, &prot, flags);
579 1.9.82.1 yamt /*
580 1.9.82.1 yamt * If entry is within this psection then this
581 1.9.82.1 yamt * must contain the .text section. *entryoff is
582 1.9.82.1 yamt * relative to the base psection.
583 1.9.82.1 yamt */
584 1.9.82.1 yamt if (eh.e_entry >= ph0->p_vaddr &&
585 1.9.82.1 yamt eh.e_entry < (ph0->p_vaddr + size)) {
586 1.9.82.1 yamt *entryoff = eh.e_entry - base_ph->p_vaddr;
587 1.1 fvdl }
588 1.1 fvdl addr += size;
589 1.1 fvdl break;
590 1.9.82.1 yamt }
591 1.1 fvdl
592 1.9.82.1 yamt case PT_DYNAMIC:
593 1.9.82.1 yamt case PT_PHDR:
594 1.9.82.1 yamt break;
595 1.9.82.1 yamt
596 1.9.82.1 yamt case PT_NOTE:
597 1.1 fvdl break;
598 1.1 fvdl
599 1.1 fvdl default:
600 1.1 fvdl break;
601 1.1 fvdl }
602 1.1 fvdl }
603 1.1 fvdl
604 1.9.82.1 yamt kmem_free(ph, phsize);
605 1.9.82.1 yamt /*
606 1.9.82.1 yamt * This value is ignored if TOPDOWN.
607 1.9.82.1 yamt */
608 1.9.82.1 yamt *last = addr;
609 1.9.82.1 yamt vrele(vp);
610 1.9.82.1 yamt return 0;
611 1.9.82.1 yamt
612 1.9.82.1 yamt badunlock:
613 1.9.82.3 yamt VOP_UNLOCK(vp);
614 1.9.82.1 yamt
615 1.1 fvdl bad:
616 1.1 fvdl if (ph != NULL)
617 1.9.82.1 yamt kmem_free(ph, phsize);
618 1.9.82.1 yamt #ifdef notyet /* XXX cgd 960926 */
619 1.9.82.1 yamt (maybe) VOP_CLOSE it
620 1.9.82.1 yamt #endif
621 1.9.82.1 yamt vrele(vp);
622 1.1 fvdl return error;
623 1.1 fvdl }
624 1.1 fvdl
625 1.1 fvdl /*
626 1.1 fvdl * exec_elf_makecmds(): Prepare an Elf binary's exec package
627 1.1 fvdl *
628 1.1 fvdl * First, set of the various offsets/lengths in the exec package.
629 1.1 fvdl *
630 1.1 fvdl * Then, mark the text image busy (so it can be demand paged) or error
631 1.1 fvdl * out if this is not possible. Finally, set up vmcmds for the
632 1.1 fvdl * text, data, bss, and stack segments.
633 1.1 fvdl */
634 1.1 fvdl int
635 1.9.82.1 yamt exec_elf_makecmds(struct lwp *l, struct exec_package *epp)
636 1.1 fvdl {
637 1.9.82.1 yamt Elf_Ehdr *eh = epp->ep_hdr;
638 1.9.82.1 yamt Elf_Phdr *ph, *pp;
639 1.9.82.1 yamt Elf_Addr phdr = 0, pos = 0;
640 1.9.82.1 yamt int error, i, nload;
641 1.9.82.1 yamt char *interp = NULL;
642 1.9.82.1 yamt u_long phsize;
643 1.9.82.1 yamt struct proc *p;
644 1.9.82.1 yamt bool is_dyn;
645 1.1 fvdl
646 1.9.82.1 yamt if (epp->ep_hdrvalid < sizeof(Elf_Ehdr))
647 1.1 fvdl return ENOEXEC;
648 1.1 fvdl
649 1.9.82.1 yamt is_dyn = elf_check_header(eh, ET_DYN) == 0;
650 1.9.82.1 yamt /*
651 1.9.82.1 yamt * XXX allow for executing shared objects. It seems silly
652 1.9.82.1 yamt * but other ELF-based systems allow it as well.
653 1.9.82.1 yamt */
654 1.9.82.1 yamt if (elf_check_header(eh, ET_EXEC) != 0 && !is_dyn)
655 1.1 fvdl return ENOEXEC;
656 1.1 fvdl
657 1.9.82.1 yamt if (eh->e_phnum > MAXPHNUM || eh->e_phnum == 0)
658 1.9.82.1 yamt return ENOEXEC;
659 1.9.82.1 yamt
660 1.9.82.1 yamt error = vn_marktext(epp->ep_vp);
661 1.9.82.1 yamt if (error)
662 1.9.82.1 yamt return error;
663 1.9.82.1 yamt
664 1.1 fvdl /*
665 1.9.82.1 yamt * Allocate space to hold all the program headers, and read them
666 1.9.82.1 yamt * from the file
667 1.9.82.1 yamt */
668 1.9.82.1 yamt p = l->l_proc;
669 1.9.82.1 yamt phsize = eh->e_phnum * sizeof(Elf_Phdr);
670 1.9.82.1 yamt ph = kmem_alloc(phsize, KM_SLEEP);
671 1.1 fvdl
672 1.9.82.1 yamt if ((error = exec_read_from(l, epp->ep_vp, eh->e_phoff, ph, phsize)) !=
673 1.9.82.1 yamt 0)
674 1.1 fvdl goto bad;
675 1.1 fvdl
676 1.9.82.1 yamt epp->ep_taddr = epp->ep_tsize = ELFDEFNNAME(NO_ADDR);
677 1.9.82.1 yamt epp->ep_daddr = epp->ep_dsize = ELFDEFNNAME(NO_ADDR);
678 1.1 fvdl
679 1.1 fvdl for (i = 0; i < eh->e_phnum; i++) {
680 1.1 fvdl pp = &ph[i];
681 1.9.82.1 yamt if (pp->p_type == PT_INTERP) {
682 1.9.82.1 yamt if (pp->p_filesz >= MAXPATHLEN) {
683 1.9.82.1 yamt error = ENOEXEC;
684 1.1 fvdl goto bad;
685 1.9.82.1 yamt }
686 1.9.82.1 yamt interp = PNBUF_GET();
687 1.9.82.1 yamt interp[0] = '\0';
688 1.9.82.1 yamt if ((error = exec_read_from(l, epp->ep_vp,
689 1.9.82.1 yamt pp->p_offset, interp, pp->p_filesz)) != 0)
690 1.1 fvdl goto bad;
691 1.1 fvdl break;
692 1.1 fvdl }
693 1.1 fvdl }
694 1.1 fvdl
695 1.1 fvdl /*
696 1.1 fvdl * On the same architecture, we may be emulating different systems.
697 1.9.82.1 yamt * See which one will accept this executable.
698 1.1 fvdl *
699 1.1 fvdl * Probe functions would normally see if the interpreter (if any)
700 1.1 fvdl * exists. Emulation packages may possibly replace the interpreter in
701 1.9.82.1 yamt * interp[] with a changed path (/emul/xxx/<path>).
702 1.1 fvdl */
703 1.9.82.1 yamt pos = ELFDEFNNAME(NO_ADDR);
704 1.9.82.1 yamt if (epp->ep_esch->u.elf_probe_func) {
705 1.9.82.1 yamt vaddr_t startp = (vaddr_t)pos;
706 1.1 fvdl
707 1.9.82.1 yamt error = (*epp->ep_esch->u.elf_probe_func)(l, epp, eh, interp,
708 1.9.82.1 yamt &startp);
709 1.1 fvdl if (error)
710 1.1 fvdl goto bad;
711 1.9.82.1 yamt pos = (Elf_Addr)startp;
712 1.1 fvdl }
713 1.1 fvdl
714 1.9.82.1 yamt #if defined(PAX_MPROTECT) || defined(PAX_SEGVGUARD) || defined(PAX_ASLR)
715 1.9.82.1 yamt p->p_pax = epp->ep_pax_flags;
716 1.9.82.1 yamt #endif /* PAX_MPROTECT || PAX_SEGVGUARD || PAX_ASLR */
717 1.9.82.1 yamt
718 1.9.82.1 yamt if (is_dyn)
719 1.9.82.3 yamt elf_placedynexec(l, epp, eh, ph);
720 1.9.82.1 yamt
721 1.1 fvdl /*
722 1.9.82.1 yamt * Load all the necessary sections
723 1.9.82.1 yamt */
724 1.4 fvdl for (i = nload = 0; i < eh->e_phnum; i++) {
725 1.9.82.1 yamt Elf_Addr addr = ELFDEFNNAME(NO_ADDR);
726 1.9.82.1 yamt u_long size = 0;
727 1.1 fvdl int prot = 0;
728 1.1 fvdl
729 1.1 fvdl pp = &ph[i];
730 1.1 fvdl
731 1.1 fvdl switch (ph[i].p_type) {
732 1.9.82.1 yamt case PT_LOAD:
733 1.4 fvdl /*
734 1.4 fvdl * XXX
735 1.4 fvdl * Can handle only 2 sections: text and data
736 1.4 fvdl */
737 1.9.82.1 yamt if (nload++ == 2) {
738 1.9.82.1 yamt error = ENOEXEC;
739 1.4 fvdl goto bad;
740 1.9.82.1 yamt }
741 1.1 fvdl elf_load_psection(&epp->ep_vmcmds, epp->ep_vp,
742 1.9.82.1 yamt &ph[i], &addr, &size, &prot, VMCMD_FIXED);
743 1.9.82.1 yamt
744 1.4 fvdl /*
745 1.4 fvdl * Decide whether it's text or data by looking
746 1.4 fvdl * at the entry point.
747 1.4 fvdl */
748 1.9.82.1 yamt if (eh->e_entry >= addr &&
749 1.9.82.1 yamt eh->e_entry < (addr + size)) {
750 1.4 fvdl epp->ep_taddr = addr;
751 1.4 fvdl epp->ep_tsize = size;
752 1.9.82.1 yamt if (epp->ep_daddr == ELFDEFNNAME(NO_ADDR)) {
753 1.9.82.1 yamt epp->ep_daddr = addr;
754 1.9.82.1 yamt epp->ep_dsize = size;
755 1.9.82.1 yamt }
756 1.4 fvdl } else {
757 1.4 fvdl epp->ep_daddr = addr;
758 1.4 fvdl epp->ep_dsize = size;
759 1.4 fvdl }
760 1.1 fvdl break;
761 1.1 fvdl
762 1.9.82.1 yamt case PT_SHLIB:
763 1.9.82.1 yamt /* SCO has these sections. */
764 1.9.82.1 yamt case PT_INTERP:
765 1.9.82.1 yamt /* Already did this one. */
766 1.9.82.1 yamt case PT_DYNAMIC:
767 1.1 fvdl break;
768 1.9.82.1 yamt case PT_NOTE:
769 1.9.82.1 yamt break;
770 1.9.82.1 yamt case PT_PHDR:
771 1.4 fvdl /* Note address of program headers (in text segment) */
772 1.4 fvdl phdr = pp->p_vaddr;
773 1.7 christos break;
774 1.4 fvdl
775 1.1 fvdl default:
776 1.1 fvdl /*
777 1.9.82.1 yamt * Not fatal; we don't need to understand everything.
778 1.1 fvdl */
779 1.1 fvdl break;
780 1.1 fvdl }
781 1.1 fvdl }
782 1.5 fvdl
783 1.5 fvdl /*
784 1.9.82.1 yamt * Check if we found a dynamically linked binary and arrange to load
785 1.9.82.1 yamt * its interpreter
786 1.5 fvdl */
787 1.9.82.1 yamt if (interp) {
788 1.1 fvdl struct elf_args *ap;
789 1.9.82.1 yamt int j = epp->ep_vmcmds.evs_used;
790 1.9.82.1 yamt u_long interp_offset;
791 1.1 fvdl
792 1.9.82.1 yamt ap = (struct elf_args *)malloc(sizeof(struct elf_args),
793 1.9.82.1 yamt M_TEMP, M_WAITOK);
794 1.9.82.1 yamt if ((error = elf_load_file(l, epp, interp,
795 1.9.82.1 yamt &epp->ep_vmcmds, &interp_offset, ap, &pos)) != 0) {
796 1.9.82.1 yamt free(ap, M_TEMP);
797 1.1 fvdl goto bad;
798 1.1 fvdl }
799 1.9.82.1 yamt ap->arg_interp = epp->ep_vmcmds.evs_cmds[j].ev_addr;
800 1.9.82.1 yamt epp->ep_entry = ap->arg_interp + interp_offset;
801 1.4 fvdl ap->arg_phaddr = phdr;
802 1.1 fvdl
803 1.1 fvdl ap->arg_phentsize = eh->e_phentsize;
804 1.1 fvdl ap->arg_phnum = eh->e_phnum;
805 1.1 fvdl ap->arg_entry = eh->e_entry;
806 1.1 fvdl
807 1.1 fvdl epp->ep_emul_arg = ap;
808 1.9.82.1 yamt
809 1.9.82.1 yamt PNBUF_PUT(interp);
810 1.1 fvdl } else
811 1.1 fvdl epp->ep_entry = eh->e_entry;
812 1.1 fvdl
813 1.8 christos #ifdef ELF_MAP_PAGE_ZERO
814 1.8 christos /* Dell SVR4 maps page zero, yeuch! */
815 1.9.82.1 yamt NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, PAGE_SIZE, 0,
816 1.9.82.1 yamt epp->ep_vp, 0, VM_PROT_READ);
817 1.8 christos #endif
818 1.9.82.1 yamt kmem_free(ph, phsize);
819 1.9.82.1 yamt return (*epp->ep_esch->es_setup_stack)(l, epp);
820 1.1 fvdl
821 1.1 fvdl bad:
822 1.9.82.1 yamt if (interp)
823 1.9.82.1 yamt PNBUF_PUT(interp);
824 1.9.82.1 yamt kmem_free(ph, phsize);
825 1.1 fvdl kill_vmcmds(&epp->ep_vmcmds);
826 1.9.82.1 yamt return error;
827 1.9.82.1 yamt }
828 1.9.82.1 yamt
829 1.9.82.1 yamt int
830 1.9.82.1 yamt netbsd_elf_signature(struct lwp *l, struct exec_package *epp,
831 1.9.82.1 yamt Elf_Ehdr *eh)
832 1.9.82.1 yamt {
833 1.9.82.1 yamt size_t i;
834 1.9.82.3 yamt Elf_Shdr *sh;
835 1.9.82.3 yamt Elf_Nhdr *np;
836 1.9.82.3 yamt size_t shsize;
837 1.9.82.1 yamt int error;
838 1.9.82.1 yamt int isnetbsd = 0;
839 1.9.82.1 yamt char *ndata;
840 1.9.82.1 yamt
841 1.9.82.1 yamt epp->ep_pax_flags = 0;
842 1.9.82.3 yamt if (eh->e_shnum > MAXSHNUM || eh->e_shnum == 0)
843 1.9.82.1 yamt return ENOEXEC;
844 1.9.82.1 yamt
845 1.9.82.3 yamt shsize = eh->e_shnum * sizeof(Elf_Shdr);
846 1.9.82.3 yamt sh = kmem_alloc(shsize, KM_SLEEP);
847 1.9.82.3 yamt error = exec_read_from(l, epp->ep_vp, eh->e_shoff, sh, shsize);
848 1.9.82.1 yamt if (error)
849 1.9.82.1 yamt goto out;
850 1.9.82.1 yamt
851 1.9.82.3 yamt np = kmem_alloc(MAXNOTESIZE, KM_SLEEP);
852 1.9.82.3 yamt for (i = 0; i < eh->e_shnum; i++) {
853 1.9.82.3 yamt Elf_Shdr *shp = &sh[i];
854 1.9.82.3 yamt
855 1.9.82.3 yamt if (shp->sh_type != SHT_NOTE ||
856 1.9.82.3 yamt shp->sh_size > MAXNOTESIZE ||
857 1.9.82.3 yamt shp->sh_size < sizeof(Elf_Nhdr) + ELF_NOTE_NETBSD_NAMESZ)
858 1.9.82.1 yamt continue;
859 1.9.82.1 yamt
860 1.9.82.3 yamt error = exec_read_from(l, epp->ep_vp, shp->sh_offset, np,
861 1.9.82.3 yamt shp->sh_size);
862 1.9.82.1 yamt if (error)
863 1.9.82.3 yamt continue;
864 1.9.82.1 yamt
865 1.9.82.1 yamt ndata = (char *)(np + 1);
866 1.9.82.1 yamt switch (np->n_type) {
867 1.9.82.1 yamt case ELF_NOTE_TYPE_NETBSD_TAG:
868 1.9.82.1 yamt if (np->n_namesz != ELF_NOTE_NETBSD_NAMESZ ||
869 1.9.82.1 yamt np->n_descsz != ELF_NOTE_NETBSD_DESCSZ ||
870 1.9.82.1 yamt memcmp(ndata, ELF_NOTE_NETBSD_NAME,
871 1.9.82.1 yamt ELF_NOTE_NETBSD_NAMESZ))
872 1.9.82.3 yamt goto bad;
873 1.9.82.1 yamt isnetbsd = 1;
874 1.9.82.1 yamt break;
875 1.9.82.1 yamt
876 1.9.82.1 yamt case ELF_NOTE_TYPE_PAX_TAG:
877 1.9.82.1 yamt if (np->n_namesz != ELF_NOTE_PAX_NAMESZ ||
878 1.9.82.1 yamt np->n_descsz != ELF_NOTE_PAX_DESCSZ ||
879 1.9.82.1 yamt memcmp(ndata, ELF_NOTE_PAX_NAME,
880 1.9.82.3 yamt ELF_NOTE_PAX_NAMESZ)) {
881 1.9.82.3 yamt bad:
882 1.9.82.3 yamt #ifdef DIAGNOSTIC
883 1.9.82.3 yamt printf("%s: bad tag %d: "
884 1.9.82.3 yamt "[%d %d, %d %d, %*.*s %*.*s]\n",
885 1.9.82.3 yamt epp->ep_kname,
886 1.9.82.3 yamt np->n_type,
887 1.9.82.3 yamt np->n_namesz, ELF_NOTE_PAX_NAMESZ,
888 1.9.82.3 yamt np->n_descsz, ELF_NOTE_PAX_DESCSZ,
889 1.9.82.3 yamt ELF_NOTE_PAX_NAMESZ,
890 1.9.82.3 yamt ELF_NOTE_PAX_NAMESZ,
891 1.9.82.3 yamt ndata,
892 1.9.82.3 yamt ELF_NOTE_PAX_NAMESZ,
893 1.9.82.3 yamt ELF_NOTE_PAX_NAMESZ,
894 1.9.82.3 yamt ELF_NOTE_PAX_NAME);
895 1.9.82.3 yamt #endif
896 1.9.82.3 yamt continue;
897 1.9.82.3 yamt }
898 1.9.82.1 yamt (void)memcpy(&epp->ep_pax_flags,
899 1.9.82.1 yamt ndata + ELF_NOTE_PAX_NAMESZ,
900 1.9.82.1 yamt sizeof(epp->ep_pax_flags));
901 1.9.82.1 yamt break;
902 1.9.82.1 yamt
903 1.9.82.1 yamt default:
904 1.9.82.3 yamt #ifdef DIAGNOSTIC
905 1.9.82.3 yamt printf("%s: unknown note type %d\n", epp->ep_kname,
906 1.9.82.3 yamt np->n_type);
907 1.9.82.3 yamt #endif
908 1.9.82.1 yamt break;
909 1.9.82.1 yamt }
910 1.9.82.1 yamt }
911 1.9.82.3 yamt kmem_free(np, MAXNOTESIZE);
912 1.9.82.1 yamt
913 1.9.82.1 yamt error = isnetbsd ? 0 : ENOEXEC;
914 1.9.82.1 yamt out:
915 1.9.82.3 yamt kmem_free(sh, shsize);
916 1.9.82.1 yamt return error;
917 1.9.82.1 yamt }
918 1.9.82.1 yamt
919 1.9.82.1 yamt int
920 1.9.82.1 yamt netbsd_elf_probe(struct lwp *l, struct exec_package *epp, void *eh, char *itp,
921 1.9.82.1 yamt vaddr_t *pos)
922 1.9.82.1 yamt {
923 1.9.82.1 yamt int error;
924 1.9.82.1 yamt
925 1.9.82.1 yamt if ((error = netbsd_elf_signature(l, epp, eh)) != 0)
926 1.9.82.1 yamt return error;
927 1.9.82.2 yamt #ifdef ELF_MD_PROBE_FUNC
928 1.9.82.2 yamt if ((error = ELF_MD_PROBE_FUNC(l, epp, eh, itp, pos)) != 0)
929 1.9.82.2 yamt return error;
930 1.9.82.2 yamt #elif defined(ELF_INTERP_NON_RELOCATABLE)
931 1.9.82.1 yamt *pos = ELF_LINK_ADDR;
932 1.9.82.1 yamt #endif
933 1.9.82.1 yamt return 0;
934 1.1 fvdl }
935