exec_elf32.c revision 1.93
1/*	$NetBSD: exec_elf32.c,v 1.93 2003/08/06 01:02:27 manu Exp $	*/
2
3/*-
4 * Copyright (c) 1994, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Christos Zoulas.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the NetBSD
21 *	Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 *    contributors may be used to endorse or promote products derived
24 *    from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39/*
40 * Copyright (c) 1996 Christopher G. Demetriou
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 *    notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 *    notice, this list of conditions and the following disclaimer in the
50 *    documentation and/or other materials provided with the distribution.
51 * 3. The name of the author may not be used to endorse or promote products
52 *    derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66#include <sys/cdefs.h>
67__KERNEL_RCSID(1, "$NetBSD: exec_elf32.c,v 1.93 2003/08/06 01:02:27 manu Exp $");
68
69/* If not included by exec_elf64.c, ELFSIZE won't be defined. */
70#ifndef ELFSIZE
71#define	ELFSIZE		32
72#endif
73
74#include <sys/param.h>
75#include <sys/proc.h>
76#include <sys/malloc.h>
77#include <sys/namei.h>
78#include <sys/vnode.h>
79#include <sys/exec.h>
80#include <sys/exec_elf.h>
81#include <sys/syscall.h>
82#include <sys/signalvar.h>
83#include <sys/mount.h>
84#include <sys/stat.h>
85
86#include <machine/cpu.h>
87#include <machine/reg.h>
88
89extern const struct emul emul_netbsd;
90
91int	ELFNAME(load_file)(struct proc *, struct exec_package *, char *,
92	    struct exec_vmcmd_set *, u_long *, struct elf_args *, Elf_Addr *);
93void	ELFNAME(load_psection)(struct exec_vmcmd_set *, struct vnode *,
94	    const Elf_Phdr *, Elf_Addr *, u_long *, int *, int);
95
96int ELFNAME2(netbsd,signature)(struct proc *, struct exec_package *,
97    Elf_Ehdr *);
98int ELFNAME2(netbsd,probe)(struct proc *, struct exec_package *,
99    void *, char *, vaddr_t *);
100
101/* round up and down to page boundaries. */
102#define	ELF_ROUND(a, b)		(((a) + (b) - 1) & ~((b) - 1))
103#define	ELF_TRUNC(a, b)		((a) & ~((b) - 1))
104
105#define MAXPHNUM	50
106
107/*
108 * Copy arguments onto the stack in the normal way, but add some
109 * extra information in case of dynamic binding.
110 */
111int
112ELFNAME(copyargs)(struct proc *p, struct exec_package *pack,
113    struct ps_strings *arginfo, char **stackp, void *argp)
114{
115	size_t len;
116	AuxInfo ai[ELF_AUX_ENTRIES], *a;
117	struct elf_args *ap;
118	int error;
119
120	if ((error = copyargs(p, pack, arginfo, stackp, argp)) != 0)
121		return error;
122
123	a = ai;
124
125	/*
126	 * Push extra arguments on the stack needed by dynamically
127	 * linked binaries
128	 */
129	if ((ap = (struct elf_args *)pack->ep_emul_arg)) {
130		struct vattr *vap = pack->ep_vap;
131
132		a->a_type = AT_PHDR;
133		a->a_v = ap->arg_phaddr;
134		a++;
135
136		a->a_type = AT_PHENT;
137		a->a_v = ap->arg_phentsize;
138		a++;
139
140		a->a_type = AT_PHNUM;
141		a->a_v = ap->arg_phnum;
142		a++;
143
144		a->a_type = AT_PAGESZ;
145		a->a_v = PAGE_SIZE;
146		a++;
147
148		a->a_type = AT_BASE;
149		a->a_v = ap->arg_interp;
150		a++;
151
152		a->a_type = AT_FLAGS;
153		a->a_v = 0;
154		a++;
155
156		a->a_type = AT_ENTRY;
157		a->a_v = ap->arg_entry;
158		a++;
159
160		a->a_type = AT_EUID;
161		if (vap->va_mode & S_ISUID)
162			a->a_v = vap->va_uid;
163		else
164			a->a_v = p->p_ucred->cr_uid;
165		a++;
166
167		a->a_type = AT_RUID;
168		a->a_v = p->p_cred->p_ruid;
169		a++;
170
171		a->a_type = AT_EGID;
172		if (vap->va_mode & S_ISGID)
173			a->a_v = vap->va_gid;
174		else
175			a->a_v = p->p_ucred->cr_gid;
176		a++;
177
178		a->a_type = AT_RGID;
179		a->a_v = p->p_cred->p_rgid;
180		a++;
181
182		free(ap, M_TEMP);
183		pack->ep_emul_arg = NULL;
184	}
185
186	a->a_type = AT_NULL;
187	a->a_v = 0;
188	a++;
189
190	len = (a - ai) * sizeof(AuxInfo);
191	if ((error = copyout(ai, *stackp, len)) != 0)
192		return error;
193	*stackp += len;
194
195	return 0;
196}
197
198/*
199 * elf_check_header():
200 *
201 * Check header for validity; return 0 of ok ENOEXEC if error
202 */
203int
204ELFNAME(check_header)(Elf_Ehdr *eh, int type)
205{
206
207	if (memcmp(eh->e_ident, ELFMAG, SELFMAG) != 0 ||
208	    eh->e_ident[EI_CLASS] != ELFCLASS)
209		return (ENOEXEC);
210
211	switch (eh->e_machine) {
212
213	ELFDEFNNAME(MACHDEP_ID_CASES)
214
215	default:
216		return (ENOEXEC);
217	}
218
219	if (ELF_EHDR_FLAGS_OK(eh) == 0)
220		return (ENOEXEC);
221
222	if (eh->e_type != type)
223		return (ENOEXEC);
224
225	if (eh->e_shnum > 512 ||
226	    eh->e_phnum > 128)
227		return (ENOEXEC);
228
229	return (0);
230}
231
232/*
233 * elf_load_psection():
234 *
235 * Load a psection at the appropriate address
236 */
237void
238ELFNAME(load_psection)(struct exec_vmcmd_set *vcset, struct vnode *vp,
239    const Elf_Phdr *ph, Elf_Addr *addr, u_long *size, int *prot, int flags)
240{
241	u_long msize, psize, rm, rf;
242	long diff, offset;
243
244	/*
245	 * If the user specified an address, then we load there.
246	 */
247	if (*addr == ELFDEFNNAME(NO_ADDR))
248		*addr = ph->p_vaddr;
249
250	if (ph->p_align > 1) {
251		/*
252		 * Make sure we are virtually aligned as we are supposed to be.
253		 */
254		diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align);
255		KASSERT(*addr - diff == ELF_TRUNC(*addr, ph->p_align));
256		/*
257		 * But make sure to not map any pages before the start of the
258		 * psection by limiting the difference to within a page.
259		 */
260		diff &= PAGE_MASK;
261	} else
262		diff = 0;
263
264	*prot |= (ph->p_flags & PF_R) ? VM_PROT_READ : 0;
265	*prot |= (ph->p_flags & PF_W) ? VM_PROT_WRITE : 0;
266	*prot |= (ph->p_flags & PF_X) ? VM_PROT_EXECUTE : 0;
267
268	/*
269	 * Adjust everything so it all starts on a page boundary.
270	 */
271	*addr -= diff;
272	offset = ph->p_offset - diff;
273	*size = ph->p_filesz + diff;
274	msize = ph->p_memsz + diff;
275
276	if (ph->p_align >= PAGE_SIZE) {
277		if ((ph->p_flags & PF_W) != 0) {
278			/*
279			 * Because the pagedvn pager can't handle zero fill
280			 * of the last data page if it's not page aligned we
281			 * map the last page readvn.
282			 */
283			psize = trunc_page(*size);
284		} else {
285			psize = round_page(*size);
286		}
287	} else {
288		psize = *size;
289	}
290
291	if (psize > 0) {
292		NEW_VMCMD2(vcset, ph->p_align < PAGE_SIZE ?
293		    vmcmd_map_readvn : vmcmd_map_pagedvn, psize, *addr, vp,
294		    offset, *prot, flags);
295		flags &= VMCMD_RELATIVE;
296	}
297	if (psize < *size) {
298		NEW_VMCMD2(vcset, vmcmd_map_readvn, *size - psize,
299		    *addr + psize, vp, offset + psize, *prot, flags);
300	}
301
302	/*
303	 * Check if we need to extend the size of the segment (does
304	 * bss extend page the next page boundary)?
305	 */
306	rm = round_page(*addr + msize);
307	rf = round_page(*addr + *size);
308
309	if (rm != rf) {
310		NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP,
311		    0, *prot, flags & VMCMD_RELATIVE);
312		*size = msize;
313	}
314}
315
316/*
317 * elf_load_file():
318 *
319 * Load a file (interpreter/library) pointed to by path
320 * [stolen from coff_load_shlib()]. Made slightly generic
321 * so it might be used externally.
322 */
323int
324ELFNAME(load_file)(struct proc *p, struct exec_package *epp, char *path,
325    struct exec_vmcmd_set *vcset, u_long *entryoff, struct elf_args *ap,
326    Elf_Addr *last)
327{
328	int error, i;
329	struct nameidata nd;
330	struct vnode *vp;
331	struct vattr attr;
332	Elf_Ehdr eh;
333	Elf_Phdr *ph = NULL;
334	const Elf_Phdr *ph0;
335	const Elf_Phdr *base_ph;
336	const Elf_Phdr *last_ph;
337	u_long phsize;
338	Elf_Addr addr = *last;
339
340	/*
341	 * 1. open file
342	 * 2. read filehdr
343	 * 3. map text, data, and bss out of it using VM_*
344	 */
345	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, p);
346	if ((error = namei(&nd)) != 0)
347		return error;
348	vp = nd.ni_vp;
349
350	/*
351	 * Similarly, if it's not marked as executable, or it's not a regular
352	 * file, we don't allow it to be used.
353	 */
354	if (vp->v_type != VREG) {
355		error = EACCES;
356		goto badunlock;
357	}
358	if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0)
359		goto badunlock;
360
361	/* get attributes */
362	if ((error = VOP_GETATTR(vp, &attr, p->p_ucred, p)) != 0)
363		goto badunlock;
364
365	/*
366	 * Check mount point.  Though we're not trying to exec this binary,
367	 * we will be executing code from it, so if the mount point
368	 * disallows execution or set-id-ness, we punt or kill the set-id.
369	 */
370	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
371		error = EACCES;
372		goto badunlock;
373	}
374	if (vp->v_mount->mnt_flag & MNT_NOSUID)
375		epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID);
376
377#ifdef notyet /* XXX cgd 960926 */
378	XXX cgd 960926: (maybe) VOP_OPEN it (and VOP_CLOSE in copyargs?)
379#endif
380
381	error = vn_marktext(vp);
382	if (error)
383		goto badunlock;
384
385	VOP_UNLOCK(vp, 0);
386
387	if ((error = exec_read_from(p, vp, 0, &eh, sizeof(eh))) != 0)
388		goto bad;
389
390	if ((error = ELFNAME(check_header)(&eh, ET_DYN)) != 0)
391		goto bad;
392
393	if (eh.e_phnum > MAXPHNUM)
394		goto bad;
395
396	phsize = eh.e_phnum * sizeof(Elf_Phdr);
397	ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK);
398
399	if ((error = exec_read_from(p, vp, eh.e_phoff, ph, phsize)) != 0)
400		goto bad;
401
402	/* this breaks on, e.g., OpenBSD-compatible mips shared binaries. */
403#ifndef ELF_INTERP_NON_RELOCATABLE
404	/*
405	 * If no position to load the interpreter was set by a probe
406	 * function, pick the same address that a non-fixed mmap(0, ..)
407	 * would (i.e. something safely out of the way).
408	 */
409	if (*last == ELFDEFNNAME(NO_ADDR)) {
410		u_long limit = 0;
411		/*
412		 * Find the start and ending addresses of the psections to
413		 * be loaded.  This will give us the size.
414		 */
415		for (i = 0, ph0 = ph, base_ph = NULL; i < eh.e_phnum;
416		     i++, ph0++) {
417			if (ph0->p_type == PT_LOAD) {
418				u_long psize = ph0->p_vaddr + ph0->p_memsz;
419				if (base_ph == NULL)
420					base_ph = ph0;
421				if (psize > limit)
422					limit = psize;
423			}
424		}
425
426		/*
427		 * Now compute the size and load address.
428		 */
429		addr = VM_DEFAULT_ADDRESS(epp->ep_daddr,
430		    round_page(limit) - trunc_page(base_ph->p_vaddr));
431	} else
432#endif	/* !ELF_INTERP_NON_RELOCATABLE */
433		addr = *last;
434
435	/*
436	 * Load all the necessary sections
437	 */
438	for (i = 0, ph0 = ph, base_ph = NULL, last_ph = NULL;
439	     i < eh.e_phnum; i++, ph0++) {
440		switch (ph0->p_type) {
441		case PT_LOAD: {
442			u_long size;
443			int prot = 0;
444			int flags;
445
446			if (base_ph == NULL) {
447				/*
448				 * First encountered psection is always the
449				 * base psection.  Make sure it's aligned
450				 * properly (align down for topdown and align
451				 * upwards for not topdown).
452				 */
453				base_ph = ph0;
454				flags = VMCMD_BASE;
455				if (p->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN)
456					addr = ELF_TRUNC(addr, ph0->p_align);
457				else
458					addr = ELF_ROUND(addr, ph0->p_align);
459			} else {
460				u_long limit = round_page(last_ph->p_vaddr
461				    + last_ph->p_memsz);
462				u_long base = trunc_page(ph0->p_vaddr);
463
464				/*
465				 * If there is a gap in between the psections,
466				 * map it as inaccessible so nothing else
467				 * mmap'ed will be placed there.
468				 */
469				if (limit != base) {
470					NEW_VMCMD2(vcset, vmcmd_map_zero,
471					    base - limit,
472					    limit - base_ph->p_vaddr, NULLVP,
473					    0, VM_PROT_NONE, VMCMD_RELATIVE);
474				}
475
476				addr = ph0->p_vaddr - base_ph->p_vaddr;
477				flags = VMCMD_RELATIVE;
478			}
479			last_ph = ph0;
480			ELFNAME(load_psection)(vcset, vp, &ph[i], &addr,
481			    &size, &prot, flags);
482			/*
483			 * If entry is within this psection then this
484			 * must contain the .text section.  *entryoff is
485			 * relative to the base psection.
486			 */
487			if (eh.e_entry >= ph0->p_vaddr &&
488			    eh.e_entry < (ph0->p_vaddr + size)) {
489				*entryoff = eh.e_entry - base_ph->p_vaddr;
490			}
491			addr += size;
492			break;
493		}
494
495		case PT_DYNAMIC:
496		case PT_PHDR:
497		case PT_NOTE:
498			break;
499
500		default:
501			break;
502		}
503	}
504
505	free(ph, M_TEMP);
506	/*
507	 * This value is ignored if TOPDOWN.
508	 */
509	*last = addr;
510	vrele(vp);
511	return 0;
512
513badunlock:
514	VOP_UNLOCK(vp, 0);
515
516bad:
517	if (ph != NULL)
518		free(ph, M_TEMP);
519#ifdef notyet /* XXX cgd 960926 */
520	(maybe) VOP_CLOSE it
521#endif
522	vrele(vp);
523	return error;
524}
525
526/*
527 * exec_elf_makecmds(): Prepare an Elf binary's exec package
528 *
529 * First, set of the various offsets/lengths in the exec package.
530 *
531 * Then, mark the text image busy (so it can be demand paged) or error
532 * out if this is not possible.  Finally, set up vmcmds for the
533 * text, data, bss, and stack segments.
534 */
535int
536ELFNAME2(exec,makecmds)(struct proc *p, struct exec_package *epp)
537{
538	Elf_Ehdr *eh = epp->ep_hdr;
539	Elf_Phdr *ph, *pp;
540	Elf_Addr phdr = 0, pos = 0;
541	int error, i, nload;
542	char *interp = NULL;
543	u_long phsize;
544
545	if (epp->ep_hdrvalid < sizeof(Elf_Ehdr))
546		return ENOEXEC;
547
548	/*
549	 * XXX allow for executing shared objects. It seems silly
550	 * but other ELF-based systems allow it as well.
551	 */
552	if (ELFNAME(check_header)(eh, ET_EXEC) != 0 &&
553	    ELFNAME(check_header)(eh, ET_DYN) != 0)
554		return ENOEXEC;
555
556	if (eh->e_phnum > MAXPHNUM)
557		return ENOEXEC;
558
559	error = vn_marktext(epp->ep_vp);
560	if (error)
561		return (error);
562
563	/*
564	 * Allocate space to hold all the program headers, and read them
565	 * from the file
566	 */
567	phsize = eh->e_phnum * sizeof(Elf_Phdr);
568	ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK);
569
570	if ((error = exec_read_from(p, epp->ep_vp, eh->e_phoff, ph, phsize)) !=
571	    0)
572		goto bad;
573
574	epp->ep_taddr = epp->ep_tsize = ELFDEFNNAME(NO_ADDR);
575	epp->ep_daddr = epp->ep_dsize = ELFDEFNNAME(NO_ADDR);
576
577	MALLOC(interp, char *, MAXPATHLEN, M_TEMP, M_WAITOK);
578	interp[0] = '\0';
579
580	for (i = 0; i < eh->e_phnum; i++) {
581		pp = &ph[i];
582		if (pp->p_type == PT_INTERP) {
583			if (pp->p_filesz >= MAXPATHLEN)
584				goto bad;
585			if ((error = exec_read_from(p, epp->ep_vp,
586			    pp->p_offset, interp, pp->p_filesz)) != 0)
587				goto bad;
588			break;
589		}
590	}
591
592	/*
593	 * On the same architecture, we may be emulating different systems.
594	 * See which one will accept this executable. This currently only
595	 * applies to SVR4, and IBCS2 on the i386 and Linux on the i386
596	 * and the Alpha.
597	 *
598	 * Probe functions would normally see if the interpreter (if any)
599	 * exists. Emulation packages may possibly replace the interpreter in
600	 * interp[] with a changed path (/emul/xxx/<path>).
601	 */
602	if (!epp->ep_esch->u.elf_probe_func) {
603		pos = ELFDEFNNAME(NO_ADDR);
604	} else {
605		vaddr_t startp = 0;
606
607		error = (*epp->ep_esch->u.elf_probe_func)(p, epp, eh, interp,
608							  &startp);
609		pos = (Elf_Addr)startp;
610		if (error)
611			goto bad;
612	}
613
614	/*
615	 * Load all the necessary sections
616	 */
617	for (i = nload = 0; i < eh->e_phnum; i++) {
618		Elf_Addr  addr = ELFDEFNNAME(NO_ADDR);
619		u_long size = 0;
620		int prot = 0;
621
622		pp = &ph[i];
623
624		switch (ph[i].p_type) {
625		case PT_LOAD:
626			/*
627			 * XXX
628			 * Can handle only 2 sections: text and data
629			 */
630			if (nload++ == 2)
631				goto bad;
632			ELFNAME(load_psection)(&epp->ep_vmcmds, epp->ep_vp,
633			    &ph[i], &addr, &size, &prot, VMCMD_FIXED);
634
635			/*
636			 * Decide whether it's text or data by looking
637			 * at the entry point.
638			 */
639			if (eh->e_entry >= addr &&
640			    eh->e_entry < (addr + size)) {
641				epp->ep_taddr = addr;
642				epp->ep_tsize = size;
643				if (epp->ep_daddr == ELFDEFNNAME(NO_ADDR)) {
644					epp->ep_daddr = addr;
645					epp->ep_dsize = size;
646				}
647			} else {
648				epp->ep_daddr = addr;
649				epp->ep_dsize = size;
650			}
651			break;
652
653		case PT_SHLIB:
654			/* SCO has these sections. */
655		case PT_INTERP:
656			/* Already did this one. */
657		case PT_DYNAMIC:
658		case PT_NOTE:
659			break;
660
661		case PT_PHDR:
662			/* Note address of program headers (in text segment) */
663			phdr = pp->p_vaddr;
664			break;
665
666		default:
667			/*
668			 * Not fatal; we don't need to understand everything.
669			 */
670			break;
671		}
672	}
673
674	/*
675	 * Check if we found a dynamically linked binary and arrange to load
676	 * its interpreter
677	 */
678	if (interp[0]) {
679		struct elf_args *ap;
680		int i = epp->ep_vmcmds.evs_used;
681		u_long interp_offset;
682
683		MALLOC(ap, struct elf_args *, sizeof(struct elf_args),
684		    M_TEMP, M_WAITOK);
685		if ((error = ELFNAME(load_file)(p, epp, interp,
686		    &epp->ep_vmcmds, &interp_offset, ap, &pos)) != 0) {
687			FREE(ap, M_TEMP);
688			goto bad;
689		}
690		ap->arg_interp = epp->ep_vmcmds.evs_cmds[i].ev_addr;
691		epp->ep_entry = ap->arg_interp + interp_offset;
692		ap->arg_phaddr = phdr;
693
694		ap->arg_phentsize = eh->e_phentsize;
695		ap->arg_phnum = eh->e_phnum;
696		ap->arg_entry = eh->e_entry;
697
698		epp->ep_emul_arg = ap;
699	} else
700		epp->ep_entry = eh->e_entry;
701
702#ifdef ELF_MAP_PAGE_ZERO
703	/* Dell SVR4 maps page zero, yeuch! */
704	NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, PAGE_SIZE, 0,
705	    epp->ep_vp, 0, VM_PROT_READ);
706#endif
707	FREE(interp, M_TEMP);
708	free(ph, M_TEMP);
709	return exec_elf_setup_stack(p, epp);
710
711bad:
712	if (interp)
713		FREE(interp, M_TEMP);
714	free(ph, M_TEMP);
715	kill_vmcmds(&epp->ep_vmcmds);
716	return ENOEXEC;
717}
718
719int
720ELFNAME2(netbsd,signature)(struct proc *p, struct exec_package *epp,
721    Elf_Ehdr *eh)
722{
723	size_t i;
724	Elf_Phdr *ph;
725	size_t phsize;
726	int error;
727
728	if (eh->e_phnum > MAXPHNUM)
729		return ENOEXEC;
730
731	phsize = eh->e_phnum * sizeof(Elf_Phdr);
732	ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK);
733	error = exec_read_from(p, epp->ep_vp, eh->e_phoff, ph, phsize);
734	if (error)
735		goto out;
736
737	for (i = 0; i < eh->e_phnum; i++) {
738		Elf_Phdr *ephp = &ph[i];
739		Elf_Nhdr *np;
740
741		if (ephp->p_type != PT_NOTE ||
742		    ephp->p_filesz > 1024 ||
743		    ephp->p_filesz < sizeof(Elf_Nhdr) + ELF_NOTE_NETBSD_NAMESZ)
744			continue;
745
746		np = (Elf_Nhdr *)malloc(ephp->p_filesz, M_TEMP, M_WAITOK);
747		error = exec_read_from(p, epp->ep_vp, ephp->p_offset, np,
748		    ephp->p_filesz);
749		if (error)
750			goto next;
751
752		if (np->n_type != ELF_NOTE_TYPE_NETBSD_TAG ||
753		    np->n_namesz != ELF_NOTE_NETBSD_NAMESZ ||
754		    np->n_descsz != ELF_NOTE_NETBSD_DESCSZ ||
755		    memcmp((caddr_t)(np + 1), ELF_NOTE_NETBSD_NAME,
756		    ELF_NOTE_NETBSD_NAMESZ))
757			goto next;
758
759		error = 0;
760		free(np, M_TEMP);
761		goto out;
762
763	next:
764		free(np, M_TEMP);
765		continue;
766	}
767
768	error = ENOEXEC;
769out:
770	free(ph, M_TEMP);
771	return (error);
772}
773
774int
775ELFNAME2(netbsd,probe)(struct proc *p, struct exec_package *epp,
776    void *eh, char *itp, vaddr_t *pos)
777{
778	int error;
779
780	if ((error = ELFNAME2(netbsd,signature)(p, epp, eh)) != 0)
781		return error;
782	*pos = ELFDEFNNAME(NO_ADDR);
783	return 0;
784}
785