exec_elf.c revision 1.86 1 /* $NetBSD: exec_elf.c,v 1.86 2016/09/15 17:45:44 christos Exp $ */
2
3 /*-
4 * Copyright (c) 1994, 2000, 2005, 2015 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Christos Zoulas and Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1996 Christopher G. Demetriou
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. The name of the author may not be used to endorse or promote products
45 * derived from this software without specific prior written permission
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 */
58
59 #include <sys/cdefs.h>
60 __KERNEL_RCSID(1, "$NetBSD: exec_elf.c,v 1.86 2016/09/15 17:45:44 christos Exp $");
61
62 #ifdef _KERNEL_OPT
63 #include "opt_pax.h"
64 #endif /* _KERNEL_OPT */
65
66 #include <sys/param.h>
67 #include <sys/proc.h>
68 #include <sys/kmem.h>
69 #include <sys/namei.h>
70 #include <sys/vnode.h>
71 #include <sys/exec.h>
72 #include <sys/exec_elf.h>
73 #include <sys/syscall.h>
74 #include <sys/signalvar.h>
75 #include <sys/mount.h>
76 #include <sys/stat.h>
77 #include <sys/kauth.h>
78 #include <sys/bitops.h>
79
80 #include <sys/cpu.h>
81 #include <machine/reg.h>
82
83 #include <compat/common/compat_util.h>
84
85 #include <sys/pax.h>
86 #include <uvm/uvm_param.h>
87
88 extern struct emul emul_netbsd;
89
90 #define elf_check_header ELFNAME(check_header)
91 #define elf_copyargs ELFNAME(copyargs)
92 #define elf_load_interp ELFNAME(load_interp)
93 #define elf_load_psection ELFNAME(load_psection)
94 #define exec_elf_makecmds ELFNAME2(exec,makecmds)
95 #define netbsd_elf_signature ELFNAME2(netbsd,signature)
96 #define netbsd_elf_probe ELFNAME2(netbsd,probe)
97 #define coredump ELFNAMEEND(coredump)
98 #define elf_free_emul_arg ELFNAME(free_emul_arg)
99
100 static int
101 elf_load_interp(struct lwp *, struct exec_package *, char *,
102 struct exec_vmcmd_set *, u_long *, Elf_Addr *);
103 static void
104 elf_load_psection(struct exec_vmcmd_set *, struct vnode *, const Elf_Phdr *,
105 Elf_Addr *, u_long *, int);
106
107 int netbsd_elf_signature(struct lwp *, struct exec_package *, Elf_Ehdr *);
108 int netbsd_elf_probe(struct lwp *, struct exec_package *, void *, char *,
109 vaddr_t *);
110
111 static void elf_free_emul_arg(void *);
112
113 #ifdef DEBUG_ELF
114 #define DPRINTF(a, ...) printf("%s: " a "\n", __func__, ##__VA_ARGS__)
115 #else
116 #define DPRINTF(a, ...)
117
118 /* round up and down to page boundaries. */
119 #define ELF_ROUND(a, b) (((a) + (b) - 1) & ~((b) - 1))
120 #define ELF_TRUNC(a, b) ((a) & ~((b) - 1))
121
122 static void
123 elf_placedynexec(struct exec_package *epp, Elf_Ehdr *eh, Elf_Phdr *ph)
124 {
125 Elf_Addr align, offset;
126 int i;
127
128 for (align = i = 0; i < eh->e_phnum; i++)
129 if (ph[i].p_type == PT_LOAD && ph[i].p_align > align)
130 align = ph[i].p_align;
131
132 offset = (Elf_Addr)pax_aslr_exec_offset(epp, align);
133 offset += epp->ep_vm_minaddr;
134
135 for (i = 0; i < eh->e_phnum; i++)
136 ph[i].p_vaddr += offset;
137 epp->ep_entryoffset = offset;
138 eh->e_entry += offset;
139 }
140
141 /*
142 * Copy arguments onto the stack in the normal way, but add some
143 * extra information in case of dynamic binding.
144 */
145 int
146 elf_copyargs(struct lwp *l, struct exec_package *pack,
147 struct ps_strings *arginfo, char **stackp, void *argp)
148 {
149 size_t len, vlen;
150 AuxInfo ai[ELF_AUX_ENTRIES], *a, *execname;
151 struct elf_args *ap;
152 int error;
153
154 if ((error = copyargs(l, pack, arginfo, stackp, argp)) != 0)
155 return error;
156
157 a = ai;
158 execname = NULL;
159
160 memset(ai, 0, sizeof(ai));
161
162 /*
163 * Push extra arguments on the stack needed by dynamically
164 * linked binaries
165 */
166 if ((ap = (struct elf_args *)pack->ep_emul_arg)) {
167 struct vattr *vap = pack->ep_vap;
168
169 a->a_type = AT_PHDR;
170 a->a_v = ap->arg_phaddr;
171 a++;
172
173 a->a_type = AT_PHENT;
174 a->a_v = ap->arg_phentsize;
175 a++;
176
177 a->a_type = AT_PHNUM;
178 a->a_v = ap->arg_phnum;
179 a++;
180
181 a->a_type = AT_PAGESZ;
182 a->a_v = PAGE_SIZE;
183 a++;
184
185 a->a_type = AT_BASE;
186 a->a_v = ap->arg_interp;
187 a++;
188
189 a->a_type = AT_FLAGS;
190 a->a_v = 0;
191 a++;
192
193 a->a_type = AT_ENTRY;
194 a->a_v = ap->arg_entry;
195 a++;
196
197 a->a_type = AT_EUID;
198 if (vap->va_mode & S_ISUID)
199 a->a_v = vap->va_uid;
200 else
201 a->a_v = kauth_cred_geteuid(l->l_cred);
202 a++;
203
204 a->a_type = AT_RUID;
205 a->a_v = kauth_cred_getuid(l->l_cred);
206 a++;
207
208 a->a_type = AT_EGID;
209 if (vap->va_mode & S_ISGID)
210 a->a_v = vap->va_gid;
211 else
212 a->a_v = kauth_cred_getegid(l->l_cred);
213 a++;
214
215 a->a_type = AT_RGID;
216 a->a_v = kauth_cred_getgid(l->l_cred);
217 a++;
218
219 a->a_type = AT_STACKBASE;
220 a->a_v = l->l_proc->p_stackbase;
221 a++;
222
223 if (pack->ep_path) {
224 execname = a;
225 a->a_type = AT_SUN_EXECNAME;
226 a++;
227 }
228
229 exec_free_emul_arg(pack);
230 }
231
232 a->a_type = AT_NULL;
233 a->a_v = 0;
234 a++;
235
236 vlen = (a - ai) * sizeof(ai[0]);
237
238 KASSERT(vlen <= sizeof(ai));
239
240 if (execname) {
241 char *path = pack->ep_path;
242 execname->a_v = (uintptr_t)(*stackp + vlen);
243 len = strlen(path) + 1;
244 if ((error = copyout(path, (*stackp + vlen), len)) != 0)
245 return error;
246 len = ALIGN(len);
247 } else
248 len = 0;
249
250 if ((error = copyout(ai, *stackp, vlen)) != 0)
251 return error;
252 *stackp += vlen + len;
253
254 return 0;
255 }
256
257 /*
258 * elf_check_header():
259 *
260 * Check header for validity; return 0 if ok, ENOEXEC if error
261 */
262 int
263 elf_check_header(Elf_Ehdr *eh)
264 {
265
266 if (memcmp(eh->e_ident, ELFMAG, SELFMAG) != 0 ||
267 eh->e_ident[EI_CLASS] != ELFCLASS) {
268 DPRINTF("bad magic %#x%x%x", eh->e_ident[0], eh->e_ident[1],
269 eh->e_ident[2]);
270 return ENOEXEC;
271 }
272
273 switch (eh->e_machine) {
274
275 ELFDEFNNAME(MACHDEP_ID_CASES)
276
277 default:
278 DPRINTF("bad machine %#x", eh->e_machine);
279 return ENOEXEC;
280 }
281
282 if (ELF_EHDR_FLAGS_OK(eh) == 0) {
283 DPRINTF("bad flags %#x", eh->e_flags);
284 return ENOEXEC;
285 }
286
287 if (eh->e_shnum > ELF_MAXSHNUM || eh->e_phnum > ELF_MAXPHNUM) {
288 DPRINTF("bad shnum/phnum %#x/%#x", eh->e_shnum, eh->e_phnum);
289 return ENOEXEC;
290 }
291
292 return 0;
293 }
294
295 /*
296 * elf_load_psection():
297 *
298 * Load a psection at the appropriate address
299 */
300 static void
301 elf_load_psection(struct exec_vmcmd_set *vcset, struct vnode *vp,
302 const Elf_Phdr *ph, Elf_Addr *addr, u_long *size, int flags)
303 {
304 u_long msize, psize, rm, rf;
305 long diff, offset;
306 int vmprot = 0;
307
308 /*
309 * If the user specified an address, then we load there.
310 */
311 if (*addr == ELFDEFNNAME(NO_ADDR))
312 *addr = ph->p_vaddr;
313
314 if (ph->p_align > 1) {
315 /*
316 * Make sure we are virtually aligned as we are supposed to be.
317 */
318 diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align);
319 KASSERT(*addr - diff == ELF_TRUNC(*addr, ph->p_align));
320 /*
321 * But make sure to not map any pages before the start of the
322 * psection by limiting the difference to within a page.
323 */
324 diff &= PAGE_MASK;
325 } else
326 diff = 0;
327
328 vmprot |= (ph->p_flags & PF_R) ? VM_PROT_READ : 0;
329 vmprot |= (ph->p_flags & PF_W) ? VM_PROT_WRITE : 0;
330 vmprot |= (ph->p_flags & PF_X) ? VM_PROT_EXECUTE : 0;
331
332 /*
333 * Adjust everything so it all starts on a page boundary.
334 */
335 *addr -= diff;
336 offset = ph->p_offset - diff;
337 *size = ph->p_filesz + diff;
338 msize = ph->p_memsz + diff;
339
340 if (ph->p_align >= PAGE_SIZE) {
341 if ((ph->p_flags & PF_W) != 0) {
342 /*
343 * Because the pagedvn pager can't handle zero fill
344 * of the last data page if it's not page aligned we
345 * map the last page readvn.
346 */
347 psize = trunc_page(*size);
348 } else {
349 psize = round_page(*size);
350 }
351 } else {
352 psize = *size;
353 }
354
355 if (psize > 0) {
356 NEW_VMCMD2(vcset, ph->p_align < PAGE_SIZE ?
357 vmcmd_map_readvn : vmcmd_map_pagedvn, psize, *addr, vp,
358 offset, vmprot, flags);
359 flags &= VMCMD_RELATIVE;
360 }
361 if (psize < *size) {
362 NEW_VMCMD2(vcset, vmcmd_map_readvn, *size - psize,
363 *addr + psize, vp, offset + psize, vmprot, flags);
364 }
365
366 /*
367 * Check if we need to extend the size of the segment (does
368 * bss extend page the next page boundary)?
369 */
370 rm = round_page(*addr + msize);
371 rf = round_page(*addr + *size);
372
373 if (rm != rf) {
374 NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP,
375 0, vmprot, flags & VMCMD_RELATIVE);
376 *size = msize;
377 }
378 }
379
380 /*
381 * elf_load_interp():
382 *
383 * Load an interpreter pointed to by path.
384 */
385 static int
386 elf_load_interp(struct lwp *l, struct exec_package *epp, char *path,
387 struct exec_vmcmd_set *vcset, u_long *entryoff, Elf_Addr *last)
388 {
389 int error, i;
390 struct vnode *vp;
391 struct vattr attr;
392 Elf_Ehdr eh;
393 Elf_Phdr *ph = NULL;
394 const Elf_Phdr *base_ph;
395 const Elf_Phdr *last_ph;
396 u_long phsize;
397 Elf_Addr addr = *last;
398 struct proc *p;
399 bool use_topdown;
400
401 p = l->l_proc;
402
403 KASSERT(p->p_vmspace);
404 KASSERT(p->p_vmspace != proc0.p_vmspace);
405
406 #ifdef __USE_TOPDOWN_VM
407 use_topdown = epp->ep_flags & EXEC_TOPDOWN_VM;
408 #else
409 use_topdown = false;
410 #endif
411
412 /*
413 * 1. open file
414 * 2. read filehdr
415 * 3. map text, data, and bss out of it using VM_*
416 */
417 vp = epp->ep_interp;
418 if (vp == NULL) {
419 error = emul_find_interp(l, epp, path);
420 if (error != 0)
421 return error;
422 vp = epp->ep_interp;
423 }
424 /* We'll tidy this ourselves - otherwise we have locking issues */
425 epp->ep_interp = NULL;
426 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
427
428 /*
429 * Similarly, if it's not marked as executable, or it's not a regular
430 * file, we don't allow it to be used.
431 */
432 if (vp->v_type != VREG) {
433 error = EACCES;
434 goto badunlock;
435 }
436 if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred)) != 0)
437 goto badunlock;
438
439 /* get attributes */
440 if ((error = VOP_GETATTR(vp, &attr, l->l_cred)) != 0)
441 goto badunlock;
442
443 /*
444 * Check mount point. Though we're not trying to exec this binary,
445 * we will be executing code from it, so if the mount point
446 * disallows execution or set-id-ness, we punt or kill the set-id.
447 */
448 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
449 error = EACCES;
450 goto badunlock;
451 }
452 if (vp->v_mount->mnt_flag & MNT_NOSUID)
453 epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID);
454
455 error = vn_marktext(vp);
456 if (error)
457 goto badunlock;
458
459 VOP_UNLOCK(vp);
460
461 if ((error = exec_read_from(l, vp, 0, &eh, sizeof(eh))) != 0)
462 goto bad;
463
464 if ((error = elf_check_header(&eh)) != 0)
465 goto bad;
466 if (eh.e_type != ET_DYN || eh.e_phnum == 0) {
467 DPRINTF("bad interpreter type %#x", eh.e_type);
468 error = ENOEXEC;
469 goto bad;
470 }
471
472 phsize = eh.e_phnum * sizeof(Elf_Phdr);
473 ph = kmem_alloc(phsize, KM_SLEEP);
474
475 if ((error = exec_read_from(l, vp, eh.e_phoff, ph, phsize)) != 0)
476 goto bad;
477
478 #ifdef ELF_INTERP_NON_RELOCATABLE
479 /*
480 * Evil hack: Only MIPS should be non-relocatable, and the
481 * psections should have a high address (typically 0x5ffe0000).
482 * If it's now relocatable, it should be linked at 0 and the
483 * psections should have zeros in the upper part of the address.
484 * Otherwise, force the load at the linked address.
485 */
486 if (*last == ELF_LINK_ADDR && (ph->p_vaddr & 0xffff0000) == 0)
487 *last = ELFDEFNNAME(NO_ADDR);
488 #endif
489
490 /*
491 * If no position to load the interpreter was set by a probe
492 * function, pick the same address that a non-fixed mmap(0, ..)
493 * would (i.e. something safely out of the way).
494 */
495 if (*last == ELFDEFNNAME(NO_ADDR)) {
496 u_long limit = 0;
497 /*
498 * Find the start and ending addresses of the psections to
499 * be loaded. This will give us the size.
500 */
501 for (i = 0, base_ph = NULL; i < eh.e_phnum; i++) {
502 if (ph[i].p_type == PT_LOAD) {
503 u_long psize = ph[i].p_vaddr + ph[i].p_memsz;
504 if (base_ph == NULL)
505 base_ph = &ph[i];
506 if (psize > limit)
507 limit = psize;
508 }
509 }
510
511 if (base_ph == NULL) {
512 DPRINTF("no interpreter loadable sections");
513 error = ENOEXEC;
514 goto bad;
515 }
516
517 /*
518 * Now compute the size and load address.
519 */
520 addr = (*epp->ep_esch->es_emul->e_vm_default_addr)(p,
521 epp->ep_daddr,
522 round_page(limit) - trunc_page(base_ph->p_vaddr),
523 use_topdown);
524 addr += (Elf_Addr)pax_aslr_rtld_offset(epp, base_ph->p_align,
525 use_topdown);
526 } else {
527 addr = *last; /* may be ELF_LINK_ADDR */
528 }
529
530 /*
531 * Load all the necessary sections
532 */
533 for (i = 0, base_ph = NULL, last_ph = NULL; i < eh.e_phnum; i++) {
534 switch (ph[i].p_type) {
535 case PT_LOAD: {
536 u_long size;
537 int flags;
538
539 if (base_ph == NULL) {
540 /*
541 * First encountered psection is always the
542 * base psection. Make sure it's aligned
543 * properly (align down for topdown and align
544 * upwards for not topdown).
545 */
546 base_ph = &ph[i];
547 flags = VMCMD_BASE;
548 if (addr == ELF_LINK_ADDR)
549 addr = ph[i].p_vaddr;
550 if (use_topdown)
551 addr = ELF_TRUNC(addr, ph[i].p_align);
552 else
553 addr = ELF_ROUND(addr, ph[i].p_align);
554 } else {
555 u_long limit = round_page(last_ph->p_vaddr
556 + last_ph->p_memsz);
557 u_long base = trunc_page(ph[i].p_vaddr);
558
559 /*
560 * If there is a gap in between the psections,
561 * map it as inaccessible so nothing else
562 * mmap'ed will be placed there.
563 */
564 if (limit != base) {
565 NEW_VMCMD2(vcset, vmcmd_map_zero,
566 base - limit,
567 limit - base_ph->p_vaddr, NULLVP,
568 0, VM_PROT_NONE, VMCMD_RELATIVE);
569 }
570
571 addr = ph[i].p_vaddr - base_ph->p_vaddr;
572 flags = VMCMD_RELATIVE;
573 }
574 last_ph = &ph[i];
575 elf_load_psection(vcset, vp, &ph[i], &addr,
576 &size, flags);
577 /*
578 * If entry is within this psection then this
579 * must contain the .text section. *entryoff is
580 * relative to the base psection.
581 */
582 if (eh.e_entry >= ph[i].p_vaddr &&
583 eh.e_entry < (ph[i].p_vaddr + size)) {
584 *entryoff = eh.e_entry - base_ph->p_vaddr;
585 }
586 addr += size;
587 break;
588 }
589
590 default:
591 break;
592 }
593 }
594
595 kmem_free(ph, phsize);
596 /*
597 * This value is ignored if TOPDOWN.
598 */
599 *last = addr;
600 vrele(vp);
601 return 0;
602
603 badunlock:
604 VOP_UNLOCK(vp);
605
606 bad:
607 if (ph != NULL)
608 kmem_free(ph, phsize);
609 vrele(vp);
610 return error;
611 }
612
613 /*
614 * exec_elf_makecmds(): Prepare an Elf binary's exec package
615 *
616 * First, set of the various offsets/lengths in the exec package.
617 *
618 * Then, mark the text image busy (so it can be demand paged) or error
619 * out if this is not possible. Finally, set up vmcmds for the
620 * text, data, bss, and stack segments.
621 */
622 int
623 exec_elf_makecmds(struct lwp *l, struct exec_package *epp)
624 {
625 Elf_Ehdr *eh = epp->ep_hdr;
626 Elf_Phdr *ph, *pp;
627 Elf_Addr phdr = 0, computed_phdr = 0, pos = 0, end_text = 0;
628 int error, i;
629 char *interp = NULL;
630 u_long phsize;
631 struct elf_args *ap;
632 bool is_dyn = false;
633
634 if (epp->ep_hdrvalid < sizeof(Elf_Ehdr)) {
635 DPRINTF("small header %#x", epp->ep_hdrvalid);
636 return ENOEXEC;
637 }
638 if ((error = elf_check_header(eh)) != 0)
639 return error;
640
641 if (eh->e_type == ET_DYN)
642 /* PIE, and some libs have an entry point */
643 is_dyn = true;
644 else if (eh->e_type != ET_EXEC) {
645 DPRINTF("bad type %#x", eh->e_type);
646 return ENOEXEC;
647 }
648
649 if (eh->e_phnum == 0) {
650 DPRINTF("no program headers");
651 return ENOEXEC;
652 }
653
654 error = vn_marktext(epp->ep_vp);
655 if (error)
656 return error;
657
658 /*
659 * Allocate space to hold all the program headers, and read them
660 * from the file
661 */
662 phsize = eh->e_phnum * sizeof(Elf_Phdr);
663 ph = kmem_alloc(phsize, KM_SLEEP);
664
665 if ((error = exec_read_from(l, epp->ep_vp, eh->e_phoff, ph, phsize)) !=
666 0)
667 goto bad;
668
669 epp->ep_taddr = epp->ep_tsize = ELFDEFNNAME(NO_ADDR);
670 epp->ep_daddr = epp->ep_dsize = ELFDEFNNAME(NO_ADDR);
671
672 for (i = 0; i < eh->e_phnum; i++) {
673 pp = &ph[i];
674 if (pp->p_type == PT_INTERP) {
675 if (pp->p_filesz < 2 || pp->p_filesz > MAXPATHLEN) {
676 DPRINTF("bad interpreter namelen %#jx",
677 (uintmax_t)pp->p_filesz);
678 error = ENOEXEC;
679 goto bad;
680 }
681 interp = PNBUF_GET();
682 if ((error = exec_read_from(l, epp->ep_vp,
683 pp->p_offset, interp, pp->p_filesz)) != 0)
684 goto bad;
685 /* Ensure interp is NUL-terminated and of the expected length */
686 if (strnlen(interp, pp->p_filesz) != pp->p_filesz - 1) {
687 DPRINTF("bad interpreter name");
688 error = ENOEXEC;
689 goto bad;
690 }
691 break;
692 }
693 }
694
695 /*
696 * On the same architecture, we may be emulating different systems.
697 * See which one will accept this executable.
698 *
699 * Probe functions would normally see if the interpreter (if any)
700 * exists. Emulation packages may possibly replace the interpreter in
701 * interp with a changed path (/emul/xxx/<path>).
702 */
703 pos = ELFDEFNNAME(NO_ADDR);
704 if (epp->ep_esch->u.elf_probe_func) {
705 vaddr_t startp = (vaddr_t)pos;
706
707 error = (*epp->ep_esch->u.elf_probe_func)(l, epp, eh, interp,
708 &startp);
709 if (error)
710 goto bad;
711 pos = (Elf_Addr)startp;
712 }
713
714 if (is_dyn)
715 elf_placedynexec(epp, eh, ph);
716
717 /*
718 * Load all the necessary sections
719 */
720 for (i = 0; i < eh->e_phnum; i++) {
721 Elf_Addr addr = ELFDEFNNAME(NO_ADDR);
722 u_long size = 0;
723
724 switch (ph[i].p_type) {
725 case PT_LOAD:
726 elf_load_psection(&epp->ep_vmcmds, epp->ep_vp,
727 &ph[i], &addr, &size, VMCMD_FIXED);
728
729 /*
730 * Consider this as text segment, if it is executable.
731 * If there is more than one text segment, pick the
732 * largest.
733 */
734 if (ph[i].p_flags & PF_X) {
735 if (epp->ep_taddr == ELFDEFNNAME(NO_ADDR) ||
736 size > epp->ep_tsize) {
737 epp->ep_taddr = addr;
738 epp->ep_tsize = size;
739 }
740 end_text = addr + size;
741 } else {
742 epp->ep_daddr = addr;
743 epp->ep_dsize = size;
744 }
745 if (ph[i].p_offset == 0) {
746 computed_phdr = ph[i].p_vaddr + eh->e_phoff;
747 }
748 break;
749
750 case PT_SHLIB:
751 /* SCO has these sections. */
752 case PT_INTERP:
753 /* Already did this one. */
754 case PT_DYNAMIC:
755 case PT_NOTE:
756 break;
757 case PT_PHDR:
758 /* Note address of program headers (in text segment) */
759 phdr = ph[i].p_vaddr;
760 break;
761
762 default:
763 /*
764 * Not fatal; we don't need to understand everything.
765 */
766 break;
767 }
768 }
769
770 if (epp->ep_vmcmds.evs_used == 0) {
771 /* No VMCMD; there was no PT_LOAD section, or those
772 * sections were empty */
773 DPRINTF("no vmcommands");
774 error = ENOEXEC;
775 goto bad;
776 }
777
778 if (epp->ep_daddr == ELFDEFNNAME(NO_ADDR)) {
779 epp->ep_daddr = round_page(end_text);
780 epp->ep_dsize = 0;
781 }
782
783 /*
784 * Check if we found a dynamically linked binary and arrange to load
785 * its interpreter
786 */
787 if (interp) {
788 u_int nused = epp->ep_vmcmds.evs_used;
789 u_long interp_offset = 0;
790
791 if ((error = elf_load_interp(l, epp, interp,
792 &epp->ep_vmcmds, &interp_offset, &pos)) != 0) {
793 goto bad;
794 }
795 if (epp->ep_vmcmds.evs_used == nused) {
796 /* elf_load_interp() has not set up any new VMCMD */
797 DPRINTF("no vmcommands for interpreter");
798 error = ENOEXEC;
799 goto bad;
800 }
801
802 ap = kmem_alloc(sizeof(*ap), KM_SLEEP);
803 ap->arg_interp = epp->ep_vmcmds.evs_cmds[nused].ev_addr;
804 epp->ep_entryoffset = interp_offset;
805 epp->ep_entry = ap->arg_interp + interp_offset;
806 PNBUF_PUT(interp);
807 interp = NULL;
808 } else {
809 epp->ep_entry = eh->e_entry;
810 if (epp->ep_flags & EXEC_FORCEAUX) {
811 ap = kmem_alloc(sizeof(*ap), KM_SLEEP);
812 ap->arg_interp = (vaddr_t)NULL;
813 } else
814 ap = NULL;
815 }
816
817 if (ap) {
818 ap->arg_phaddr = phdr ? phdr : computed_phdr;
819 ap->arg_phentsize = eh->e_phentsize;
820 ap->arg_phnum = eh->e_phnum;
821 ap->arg_entry = eh->e_entry;
822 epp->ep_emul_arg = ap;
823 epp->ep_emul_arg_free = elf_free_emul_arg;
824 }
825
826 #ifdef ELF_MAP_PAGE_ZERO
827 /* Dell SVR4 maps page zero, yeuch! */
828 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, PAGE_SIZE, 0,
829 epp->ep_vp, 0, VM_PROT_READ);
830 #endif
831
832 error = (*epp->ep_esch->es_setup_stack)(l, epp);
833 if (error)
834 goto bad;
835
836 kmem_free(ph, phsize);
837 return 0;
838
839 bad:
840 if (interp)
841 PNBUF_PUT(interp);
842 exec_free_emul_arg(epp);
843 kmem_free(ph, phsize);
844 kill_vmcmds(&epp->ep_vmcmds);
845 return error;
846 }
847
848 int
849 netbsd_elf_signature(struct lwp *l, struct exec_package *epp,
850 Elf_Ehdr *eh)
851 {
852 size_t i;
853 Elf_Shdr *sh;
854 Elf_Nhdr *np;
855 size_t shsize, nsize;
856 int error;
857 int isnetbsd = 0;
858 char *ndata, *ndesc;
859
860 #ifdef DIAGNOSTIC
861 const char *badnote;
862 #define BADNOTE(n) badnote = (n)
863 #else
864 #define BADNOTE(n)
865 #endif
866
867 epp->ep_pax_flags = 0;
868 if (eh->e_shnum > ELF_MAXSHNUM || eh->e_shnum == 0) {
869 DPRINTF("no signature %#x", eh->e_shnum);
870 return ENOEXEC;
871 }
872
873 shsize = eh->e_shnum * sizeof(Elf_Shdr);
874 sh = kmem_alloc(shsize, KM_SLEEP);
875 error = exec_read_from(l, epp->ep_vp, eh->e_shoff, sh, shsize);
876 if (error)
877 goto out;
878
879 np = kmem_alloc(ELF_MAXNOTESIZE, KM_SLEEP);
880 for (i = 0; i < eh->e_shnum; i++) {
881 Elf_Shdr *shp = &sh[i];
882
883 if (shp->sh_type != SHT_NOTE ||
884 shp->sh_size > ELF_MAXNOTESIZE ||
885 shp->sh_size < sizeof(Elf_Nhdr) + ELF_NOTE_NETBSD_NAMESZ)
886 continue;
887
888 error = exec_read_from(l, epp->ep_vp, shp->sh_offset, np,
889 shp->sh_size);
890 if (error)
891 continue;
892
893 /* Point to the note, skip the header */
894 ndata = (char *)(np + 1);
895
896 /*
897 * Padding is present if necessary to ensure 4-byte alignment.
898 * The actual section size is therefore:
899 * header size + 4-byte aligned name + 4-byte aligned desc
900 * Ensure this size is consistent with what is indicated
901 * in sh_size. The first check avoids integer overflows.
902 *
903 * Binaries from before NetBSD 1.6 have two notes in the same
904 * note section. The second note was never used, so as long as
905 * the section is at least as big as it should be, it's ok.
906 * These binaries also have a second note section with a note of
907 * type ELF_NOTE_TYPE_NETBSD_TAG, which can be ignored as well.
908 */
909 if (np->n_namesz > shp->sh_size || np->n_descsz > shp->sh_size) {
910 BADNOTE("note size limit");
911 goto bad;
912 }
913 nsize = sizeof(*np) + roundup(np->n_namesz, 4) +
914 roundup(np->n_descsz, 4);
915 if (nsize > shp->sh_size) {
916 BADNOTE("note size");
917 goto bad;
918 }
919 ndesc = ndata + roundup(np->n_namesz, 4);
920
921 switch (np->n_type) {
922 case ELF_NOTE_TYPE_NETBSD_TAG:
923 /* It is us */
924 if (np->n_namesz == ELF_NOTE_NETBSD_NAMESZ &&
925 np->n_descsz == ELF_NOTE_NETBSD_DESCSZ &&
926 memcmp(ndata, ELF_NOTE_NETBSD_NAME,
927 ELF_NOTE_NETBSD_NAMESZ) == 0) {
928 memcpy(&epp->ep_osversion, ndesc,
929 ELF_NOTE_NETBSD_DESCSZ);
930 isnetbsd = 1;
931 break;
932 }
933
934 /*
935 * Ignore SuSE tags; SuSE's n_type is the same the
936 * NetBSD one.
937 */
938 if (np->n_namesz == ELF_NOTE_SUSE_NAMESZ &&
939 memcmp(ndata, ELF_NOTE_SUSE_NAME,
940 ELF_NOTE_SUSE_NAMESZ) == 0)
941 break;
942 /*
943 * Ignore old GCC
944 */
945 if (np->n_namesz == ELF_NOTE_OGCC_NAMESZ &&
946 memcmp(ndata, ELF_NOTE_OGCC_NAME,
947 ELF_NOTE_OGCC_NAMESZ) == 0)
948 break;
949 BADNOTE("NetBSD tag");
950 goto bad;
951
952 case ELF_NOTE_TYPE_PAX_TAG:
953 if (np->n_namesz == ELF_NOTE_PAX_NAMESZ &&
954 np->n_descsz == ELF_NOTE_PAX_DESCSZ &&
955 memcmp(ndata, ELF_NOTE_PAX_NAME,
956 ELF_NOTE_PAX_NAMESZ) == 0) {
957 uint32_t flags;
958 memcpy(&flags, ndesc, sizeof(flags));
959 /* Convert the flags and insert them into
960 * the exec package. */
961 pax_setup_elf_flags(epp, flags);
962 break;
963 }
964 BADNOTE("PaX tag");
965 goto bad;
966
967 case ELF_NOTE_TYPE_MARCH_TAG:
968 /* Copy the machine arch into the package. */
969 if (np->n_namesz == ELF_NOTE_MARCH_NAMESZ
970 && memcmp(ndata, ELF_NOTE_MARCH_NAME,
971 ELF_NOTE_MARCH_NAMESZ) == 0) {
972 /* Do not truncate the buffer */
973 if (np->n_descsz > sizeof(epp->ep_machine_arch)) {
974 BADNOTE("description size limit");
975 goto bad;
976 }
977 /*
978 * Ensure ndesc is NUL-terminated and of the
979 * expected length.
980 */
981 if (strnlen(ndesc, np->n_descsz) + 1 !=
982 np->n_descsz) {
983 BADNOTE("description size");
984 goto bad;
985 }
986 strlcpy(epp->ep_machine_arch, ndesc,
987 sizeof(epp->ep_machine_arch));
988 break;
989 }
990 BADNOTE("march tag");
991 goto bad;
992
993 case ELF_NOTE_TYPE_MCMODEL_TAG:
994 /* arch specific check for code model */
995 #ifdef ELF_MD_MCMODEL_CHECK
996 if (np->n_namesz == ELF_NOTE_MCMODEL_NAMESZ
997 && memcmp(ndata, ELF_NOTE_MCMODEL_NAME,
998 ELF_NOTE_MCMODEL_NAMESZ) == 0) {
999 ELF_MD_MCMODEL_CHECK(epp, ndesc, np->n_descsz);
1000 break;
1001 }
1002 BADNOTE("mcmodel tag");
1003 goto bad;
1004 #endif
1005 break;
1006
1007 case ELF_NOTE_TYPE_SUSE_VERSION_TAG:
1008 break;
1009
1010 case ELF_NOTE_TYPE_GO_BUILDID_TAG:
1011 break;
1012
1013 default:
1014 BADNOTE("unknown tag");
1015 bad:
1016 #ifdef DIAGNOSTIC
1017 /* Ignore GNU tags */
1018 if (np->n_namesz == ELF_NOTE_GNU_NAMESZ &&
1019 memcmp(ndata, ELF_NOTE_GNU_NAME,
1020 ELF_NOTE_GNU_NAMESZ) == 0)
1021 break;
1022
1023 int ns = MIN(np->n_namesz, shp->sh_size - sizeof(*np));
1024 printf("%s: Unknown elf note type %d (%s): "
1025 "[namesz=%d, descsz=%d name=%-*.*s]\n",
1026 epp->ep_kname, np->n_type, badnote, np->n_namesz,
1027 np->n_descsz, ns, ns, ndata);
1028 #endif
1029 break;
1030 }
1031 }
1032 kmem_free(np, ELF_MAXNOTESIZE);
1033
1034 error = isnetbsd ? 0 : ENOEXEC;
1035 #ifdef DEBUG_ELF
1036 if (error)
1037 DPRINTF("not netbsd");
1038 #endif
1039 out:
1040 kmem_free(sh, shsize);
1041 return error;
1042 }
1043
1044 int
1045 netbsd_elf_probe(struct lwp *l, struct exec_package *epp, void *eh, char *itp,
1046 vaddr_t *pos)
1047 {
1048 int error;
1049
1050 if ((error = netbsd_elf_signature(l, epp, eh)) != 0)
1051 return error;
1052 #ifdef ELF_MD_PROBE_FUNC
1053 if ((error = ELF_MD_PROBE_FUNC(l, epp, eh, itp, pos)) != 0)
1054 return error;
1055 #elif defined(ELF_INTERP_NON_RELOCATABLE)
1056 *pos = ELF_LINK_ADDR;
1057 #endif
1058 epp->ep_flags |= EXEC_FORCEAUX;
1059 return 0;
1060 }
1061
1062 void
1063 elf_free_emul_arg(void *arg)
1064 {
1065 struct elf_args *ap = arg;
1066 KASSERT(ap != NULL);
1067 kmem_free(ap, sizeof(*ap));
1068 }
1069