Home | History | Annotate | Line # | Download | only in kern
exec_subr.c revision 1.61.12.1
      1  1.61.12.1       jym /*	$NetBSD: exec_subr.c,v 1.61.12.1 2009/05/13 17:21:56 jym Exp $	*/
      2        1.8       cgd 
      3        1.1       cgd /*
      4       1.10       cgd  * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
      5        1.1       cgd  * All rights reserved.
      6        1.1       cgd  *
      7        1.1       cgd  * Redistribution and use in source and binary forms, with or without
      8        1.1       cgd  * modification, are permitted provided that the following conditions
      9        1.1       cgd  * are met:
     10        1.1       cgd  * 1. Redistributions of source code must retain the above copyright
     11        1.1       cgd  *    notice, this list of conditions and the following disclaimer.
     12        1.1       cgd  * 2. Redistributions in binary form must reproduce the above copyright
     13        1.1       cgd  *    notice, this list of conditions and the following disclaimer in the
     14        1.1       cgd  *    documentation and/or other materials provided with the distribution.
     15        1.1       cgd  * 3. All advertising materials mentioning features or use of this software
     16        1.1       cgd  *    must display the following acknowledgement:
     17        1.1       cgd  *      This product includes software developed by Christopher G. Demetriou.
     18        1.1       cgd  * 4. The name of the author may not be used to endorse or promote products
     19        1.5       jtc  *    derived from this software without specific prior written permission
     20        1.1       cgd  *
     21        1.1       cgd  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22        1.1       cgd  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     23        1.1       cgd  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     24        1.1       cgd  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     25        1.1       cgd  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     26        1.1       cgd  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     27        1.1       cgd  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28        1.1       cgd  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     29        1.1       cgd  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     30        1.1       cgd  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     31        1.1       cgd  */
     32       1.29     lukem 
     33       1.29     lukem #include <sys/cdefs.h>
     34  1.61.12.1       jym __KERNEL_RCSID(0, "$NetBSD: exec_subr.c,v 1.61.12.1 2009/05/13 17:21:56 jym Exp $");
     35       1.48      elad 
     36       1.48      elad #include "opt_pax.h"
     37       1.12       mrg 
     38        1.1       cgd #include <sys/param.h>
     39        1.1       cgd #include <sys/systm.h>
     40        1.1       cgd #include <sys/proc.h>
     41       1.59      yamt #include <sys/kmem.h>
     42        1.1       cgd #include <sys/vnode.h>
     43        1.4       cgd #include <sys/filedesc.h>
     44        1.1       cgd #include <sys/exec.h>
     45        1.1       cgd #include <sys/mman.h>
     46       1.38  christos #include <sys/resourcevar.h>
     47       1.45   thorpej #include <sys/device.h>
     48        1.1       cgd 
     49       1.48      elad #ifdef PAX_MPROTECT
     50       1.48      elad #include <sys/pax.h>
     51       1.48      elad #endif /* PAX_MPROTECT */
     52       1.48      elad 
     53       1.11       mrg #include <uvm/uvm.h>
     54       1.11       mrg 
     55       1.45   thorpej #define	VMCMD_EVCNT_DECL(name)					\
     56       1.45   thorpej static struct evcnt vmcmd_ev_##name =				\
     57       1.45   thorpej     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "vmcmd", #name);	\
     58       1.45   thorpej EVCNT_ATTACH_STATIC(vmcmd_ev_##name)
     59       1.45   thorpej 
     60       1.45   thorpej #define	VMCMD_EVCNT_INCR(name)					\
     61       1.45   thorpej     vmcmd_ev_##name.ev_count++
     62       1.45   thorpej 
     63       1.45   thorpej VMCMD_EVCNT_DECL(calls);
     64       1.45   thorpej VMCMD_EVCNT_DECL(extends);
     65       1.45   thorpej VMCMD_EVCNT_DECL(kills);
     66       1.10       cgd 
     67        1.1       cgd /*
     68        1.1       cgd  * new_vmcmd():
     69        1.1       cgd  *	create a new vmcmd structure and fill in its fields based
     70        1.1       cgd  *	on function call arguments.  make sure objects ref'd by
     71        1.1       cgd  *	the vmcmd are 'held'.
     72        1.1       cgd  */
     73        1.1       cgd 
     74        1.1       cgd void
     75       1.22   thorpej new_vmcmd(struct exec_vmcmd_set *evsp,
     76       1.46  christos     int (*proc)(struct lwp * l, struct exec_vmcmd *),
     77       1.22   thorpej     u_long len, u_long addr, struct vnode *vp, u_long offset,
     78       1.22   thorpej     u_int prot, int flags)
     79        1.1       cgd {
     80        1.1       cgd 	struct exec_vmcmd    *vcp;
     81        1.1       cgd 
     82       1.45   thorpej 	VMCMD_EVCNT_INCR(calls);
     83       1.45   thorpej 
     84        1.1       cgd 	if (evsp->evs_used >= evsp->evs_cnt)
     85        1.1       cgd 		vmcmdset_extend(evsp);
     86        1.1       cgd 	vcp = &evsp->evs_cmds[evsp->evs_used++];
     87        1.1       cgd 	vcp->ev_proc = proc;
     88        1.1       cgd 	vcp->ev_len = len;
     89        1.1       cgd 	vcp->ev_addr = addr;
     90        1.1       cgd 	if ((vcp->ev_vp = vp) != NULL)
     91        1.1       cgd 		vref(vp);
     92        1.1       cgd 	vcp->ev_offset = offset;
     93        1.1       cgd 	vcp->ev_prot = prot;
     94       1.25        tv 	vcp->ev_flags = flags;
     95        1.1       cgd }
     96        1.1       cgd 
     97        1.1       cgd void
     98       1.22   thorpej vmcmdset_extend(struct exec_vmcmd_set *evsp)
     99        1.1       cgd {
    100        1.1       cgd 	struct exec_vmcmd *nvcp;
    101        1.1       cgd 	u_int ocnt;
    102        1.1       cgd 
    103        1.1       cgd #ifdef DIAGNOSTIC
    104        1.1       cgd 	if (evsp->evs_used < evsp->evs_cnt)
    105        1.1       cgd 		panic("vmcmdset_extend: not necessary");
    106        1.1       cgd #endif
    107        1.1       cgd 
    108        1.1       cgd 	/* figure out number of entries in new set */
    109       1.45   thorpej 	if ((ocnt = evsp->evs_cnt) != 0) {
    110       1.45   thorpej 		evsp->evs_cnt += ocnt;
    111       1.45   thorpej 		VMCMD_EVCNT_INCR(extends);
    112       1.45   thorpej 	} else
    113       1.45   thorpej 		evsp->evs_cnt = EXEC_DEFAULT_VMCMD_SETSIZE;
    114        1.1       cgd 
    115        1.1       cgd 	/* allocate it */
    116       1.59      yamt 	nvcp = kmem_alloc(evsp->evs_cnt * sizeof(struct exec_vmcmd), KM_SLEEP);
    117        1.1       cgd 
    118        1.1       cgd 	/* free the old struct, if there was one, and record the new one */
    119        1.1       cgd 	if (ocnt) {
    120       1.23   thorpej 		memcpy(nvcp, evsp->evs_cmds,
    121       1.23   thorpej 		    (ocnt * sizeof(struct exec_vmcmd)));
    122       1.59      yamt 		kmem_free(evsp->evs_cmds, ocnt * sizeof(struct exec_vmcmd));
    123        1.1       cgd 	}
    124        1.1       cgd 	evsp->evs_cmds = nvcp;
    125        1.1       cgd }
    126        1.1       cgd 
    127        1.1       cgd void
    128       1.22   thorpej kill_vmcmds(struct exec_vmcmd_set *evsp)
    129        1.1       cgd {
    130        1.1       cgd 	struct exec_vmcmd *vcp;
    131       1.30   thorpej 	u_int i;
    132        1.1       cgd 
    133       1.45   thorpej 	VMCMD_EVCNT_INCR(kills);
    134       1.45   thorpej 
    135        1.1       cgd 	if (evsp->evs_cnt == 0)
    136        1.1       cgd 		return;
    137        1.1       cgd 
    138        1.1       cgd 	for (i = 0; i < evsp->evs_used; i++) {
    139        1.1       cgd 		vcp = &evsp->evs_cmds[i];
    140       1.40       chs 		if (vcp->ev_vp != NULL)
    141        1.1       cgd 			vrele(vcp->ev_vp);
    142        1.1       cgd 	}
    143       1.59      yamt 	kmem_free(evsp->evs_cmds, evsp->evs_cnt * sizeof(struct exec_vmcmd));
    144        1.1       cgd 	evsp->evs_used = evsp->evs_cnt = 0;
    145        1.1       cgd }
    146        1.1       cgd 
    147        1.1       cgd /*
    148        1.1       cgd  * vmcmd_map_pagedvn():
    149        1.1       cgd  *	handle vmcmd which specifies that a vnode should be mmap'd.
    150        1.1       cgd  *	appropriate for handling demand-paged text and data segments.
    151        1.1       cgd  */
    152        1.1       cgd 
    153        1.1       cgd int
    154       1.46  christos vmcmd_map_pagedvn(struct lwp *l, struct exec_vmcmd *cmd)
    155        1.1       cgd {
    156       1.27       chs 	struct uvm_object *uobj;
    157       1.50       chs 	struct vnode *vp = cmd->ev_vp;
    158       1.46  christos 	struct proc *p = l->l_proc;
    159       1.27       chs 	int error;
    160       1.48      elad 	vm_prot_t prot, maxprot;
    161       1.27       chs 
    162       1.55        ad 	KASSERT(vp->v_iflag & VI_TEXT);
    163       1.11       mrg 
    164       1.11       mrg 	/*
    165       1.11       mrg 	 * map the vnode in using uvm_map.
    166       1.11       mrg 	 */
    167       1.11       mrg 
    168       1.11       mrg         if (cmd->ev_len == 0)
    169       1.11       mrg                 return(0);
    170       1.11       mrg         if (cmd->ev_offset & PAGE_MASK)
    171       1.11       mrg                 return(EINVAL);
    172       1.11       mrg 	if (cmd->ev_addr & PAGE_MASK)
    173       1.18       chs 		return(EINVAL);
    174       1.18       chs 	if (cmd->ev_len & PAGE_MASK)
    175       1.11       mrg 		return(EINVAL);
    176       1.11       mrg 
    177       1.54     pooka 	prot = cmd->ev_prot;
    178       1.54     pooka 	maxprot = UVM_PROT_ALL;
    179       1.54     pooka #ifdef PAX_MPROTECT
    180       1.54     pooka 	pax_mprotect(l, &prot, &maxprot);
    181       1.54     pooka #endif /* PAX_MPROTECT */
    182       1.54     pooka 
    183       1.11       mrg 	/*
    184       1.53     pooka 	 * check the file system's opinion about mmapping the file
    185       1.11       mrg 	 */
    186       1.11       mrg 
    187       1.60        ad 	error = VOP_MMAP(vp, prot, l->l_cred);
    188       1.53     pooka 	if (error)
    189       1.53     pooka 		return error;
    190       1.50       chs 
    191       1.55        ad 	if ((vp->v_vflag & VV_MAPPED) == 0) {
    192       1.50       chs 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    193       1.55        ad 		vp->v_vflag |= VV_MAPPED;
    194       1.50       chs 		VOP_UNLOCK(vp, 0);
    195       1.50       chs 	}
    196       1.11       mrg 
    197       1.11       mrg 	/*
    198       1.53     pooka 	 * do the map, reference the object for this map entry
    199       1.11       mrg 	 */
    200       1.53     pooka 	uobj = &vp->v_uobj;
    201       1.53     pooka 	vref(vp);
    202       1.11       mrg 
    203       1.43     perry 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
    204       1.24   thorpej 		uobj, cmd->ev_offset, 0,
    205       1.48      elad 		UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
    206       1.34    atatat 			UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
    207       1.27       chs 	if (error) {
    208       1.27       chs 		uobj->pgops->pgo_detach(uobj);
    209       1.27       chs 	}
    210       1.27       chs 	return error;
    211        1.1       cgd }
    212        1.1       cgd 
    213        1.1       cgd /*
    214        1.1       cgd  * vmcmd_map_readvn():
    215        1.1       cgd  *	handle vmcmd which specifies that a vnode should be read from.
    216        1.1       cgd  *	appropriate for non-demand-paged text/data segments, i.e. impure
    217        1.1       cgd  *	objects (a la OMAGIC and NMAGIC).
    218        1.1       cgd  */
    219        1.1       cgd int
    220       1.46  christos vmcmd_map_readvn(struct lwp *l, struct exec_vmcmd *cmd)
    221        1.1       cgd {
    222       1.46  christos 	struct proc *p = l->l_proc;
    223        1.1       cgd 	int error;
    224       1.17        ws 	long diff;
    225        1.1       cgd 
    226       1.11       mrg 	if (cmd->ev_len == 0)
    227       1.27       chs 		return 0;
    228       1.27       chs 
    229       1.17        ws 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    230       1.17        ws 	cmd->ev_addr -= diff;			/* required by uvm_map */
    231       1.17        ws 	cmd->ev_offset -= diff;
    232       1.17        ws 	cmd->ev_len += diff;
    233       1.17        ws 
    234       1.43     perry 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    235       1.24   thorpej 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
    236       1.13     chuck 			UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
    237       1.11       mrg 			UVM_ADV_NORMAL,
    238       1.34    atatat 			UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
    239       1.11       mrg 
    240        1.1       cgd 	if (error)
    241        1.1       cgd 		return error;
    242       1.19      matt 
    243       1.46  christos 	return vmcmd_readvn(l, cmd);
    244       1.19      matt }
    245       1.19      matt 
    246       1.19      matt int
    247       1.46  christos vmcmd_readvn(struct lwp *l, struct exec_vmcmd *cmd)
    248       1.19      matt {
    249       1.46  christos 	struct proc *p = l->l_proc;
    250       1.19      matt 	int error;
    251       1.48      elad 	vm_prot_t prot, maxprot;
    252        1.1       cgd 
    253       1.52  christos 	error = vn_rdwr(UIO_READ, cmd->ev_vp, (void *)cmd->ev_addr,
    254       1.10       cgd 	    cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
    255       1.49        ad 	    l->l_cred, NULL, l);
    256        1.1       cgd 	if (error)
    257        1.1       cgd 		return error;
    258       1.32      matt 
    259       1.48      elad 	prot = cmd->ev_prot;
    260       1.48      elad 	maxprot = VM_PROT_ALL;
    261       1.48      elad #ifdef PAX_MPROTECT
    262       1.48      elad 	pax_mprotect(l, &prot, &maxprot);
    263       1.48      elad #endif /* PAX_MPROTECT */
    264       1.48      elad 
    265       1.32      matt #ifdef PMAP_NEED_PROCWR
    266       1.32      matt 	/*
    267       1.32      matt 	 * we had to write the process, make sure the pages are synched
    268       1.32      matt 	 * with the instruction cache.
    269       1.32      matt 	 */
    270       1.48      elad 	if (prot & VM_PROT_EXECUTE)
    271       1.32      matt 		pmap_procwr(p, cmd->ev_addr, cmd->ev_len);
    272       1.32      matt #endif
    273        1.1       cgd 
    274       1.48      elad 	/*
    275       1.48      elad 	 * we had to map in the area at PROT_ALL so that vn_rdwr()
    276       1.48      elad 	 * could write to it.   however, the caller seems to want
    277       1.48      elad 	 * it mapped read-only, so now we are going to have to call
    278       1.48      elad 	 * uvm_map_protect() to fix up the protection.  ICK.
    279       1.48      elad 	 */
    280       1.48      elad 	if (maxprot != VM_PROT_ALL) {
    281       1.48      elad 		error = uvm_map_protect(&p->p_vmspace->vm_map,
    282       1.48      elad 				trunc_page(cmd->ev_addr),
    283       1.48      elad 				round_page(cmd->ev_addr + cmd->ev_len),
    284       1.51   thorpej 				maxprot, true);
    285       1.48      elad 		if (error)
    286       1.48      elad 			return (error);
    287       1.48      elad 	}
    288       1.27       chs 
    289       1.48      elad 	if (prot != maxprot) {
    290       1.48      elad 		error = uvm_map_protect(&p->p_vmspace->vm_map,
    291       1.13     chuck 				trunc_page(cmd->ev_addr),
    292       1.13     chuck 				round_page(cmd->ev_addr + cmd->ev_len),
    293       1.51   thorpej 				prot, false);
    294       1.48      elad 		if (error)
    295       1.48      elad 			return (error);
    296       1.13     chuck 	}
    297       1.48      elad 
    298       1.27       chs 	return 0;
    299        1.1       cgd }
    300        1.1       cgd 
    301        1.1       cgd /*
    302        1.1       cgd  * vmcmd_map_zero():
    303        1.1       cgd  *	handle vmcmd which specifies a zero-filled address space region.  The
    304        1.1       cgd  *	address range must be first allocated, then protected appropriately.
    305        1.1       cgd  */
    306        1.1       cgd 
    307        1.1       cgd int
    308       1.46  christos vmcmd_map_zero(struct lwp *l, struct exec_vmcmd *cmd)
    309        1.1       cgd {
    310       1.46  christos 	struct proc *p = l->l_proc;
    311        1.1       cgd 	int error;
    312       1.17        ws 	long diff;
    313       1.48      elad 	vm_prot_t prot, maxprot;
    314        1.1       cgd 
    315       1.17        ws 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    316       1.17        ws 	cmd->ev_addr -= diff;			/* required by uvm_map */
    317       1.17        ws 	cmd->ev_len += diff;
    318       1.17        ws 
    319       1.48      elad 	prot = cmd->ev_prot;
    320       1.48      elad 	maxprot = UVM_PROT_ALL;
    321       1.48      elad #ifdef PAX_MPROTECT
    322       1.48      elad 	pax_mprotect(l, &prot, &maxprot);
    323       1.48      elad #endif /* PAX_MPROTECT */
    324       1.48      elad 
    325       1.43     perry 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    326       1.24   thorpej 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
    327       1.48      elad 			UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
    328       1.11       mrg 			UVM_ADV_NORMAL,
    329       1.34    atatat 			UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
    330  1.61.12.1       jym 	if (cmd->ev_flags & VMCMD_STACK)
    331  1.61.12.1       jym 		curproc->p_vmspace->vm_issize += atop(round_page(cmd->ev_len));
    332       1.27       chs 	return error;
    333        1.1       cgd }
    334       1.28  christos 
    335       1.28  christos /*
    336       1.28  christos  * exec_read_from():
    337       1.28  christos  *
    338       1.28  christos  *	Read from vnode into buffer at offset.
    339       1.28  christos  */
    340       1.28  christos int
    341       1.46  christos exec_read_from(struct lwp *l, struct vnode *vp, u_long off, void *bf,
    342       1.28  christos     size_t size)
    343       1.28  christos {
    344       1.28  christos 	int error;
    345       1.28  christos 	size_t resid;
    346       1.28  christos 
    347       1.44  christos 	if ((error = vn_rdwr(UIO_READ, vp, bf, size, off, UIO_SYSSPACE,
    348       1.49        ad 	    0, l->l_cred, &resid, NULL)) != 0)
    349       1.28  christos 		return error;
    350       1.28  christos 	/*
    351       1.28  christos 	 * See if we got all of it
    352       1.28  christos 	 */
    353       1.28  christos 	if (resid != 0)
    354       1.28  christos 		return ENOEXEC;
    355       1.28  christos 	return 0;
    356       1.28  christos }
    357       1.28  christos 
    358       1.38  christos /*
    359       1.38  christos  * exec_setup_stack(): Set up the stack segment for an elf
    360       1.38  christos  * executable.
    361       1.38  christos  *
    362       1.38  christos  * Note that the ep_ssize parameter must be set to be the current stack
    363       1.38  christos  * limit; this is adjusted in the body of execve() to yield the
    364       1.38  christos  * appropriate stack segment usage once the argument length is
    365       1.38  christos  * calculated.
    366       1.38  christos  *
    367       1.38  christos  * This function returns an int for uniformity with other (future) formats'
    368       1.38  christos  * stack setup functions.  They might have errors to return.
    369       1.38  christos  */
    370       1.38  christos 
    371       1.38  christos int
    372       1.46  christos exec_setup_stack(struct lwp *l, struct exec_package *epp)
    373       1.38  christos {
    374       1.38  christos 	u_long max_stack_size;
    375       1.38  christos 	u_long access_linear_min, access_size;
    376       1.38  christos 	u_long noaccess_linear_min, noaccess_size;
    377       1.38  christos 
    378       1.38  christos #ifndef	USRSTACK32
    379       1.38  christos #define USRSTACK32	(0x00000000ffffffffL&~PGOFSET)
    380       1.38  christos #endif
    381       1.38  christos 
    382       1.38  christos 	if (epp->ep_flags & EXEC_32) {
    383       1.38  christos 		epp->ep_minsaddr = USRSTACK32;
    384       1.38  christos 		max_stack_size = MAXSSIZ;
    385       1.38  christos 	} else {
    386       1.38  christos 		epp->ep_minsaddr = USRSTACK;
    387       1.38  christos 		max_stack_size = MAXSSIZ;
    388       1.38  christos 	}
    389       1.57  christos 
    390       1.57  christos #ifdef PAX_ASLR
    391       1.57  christos 	pax_aslr_stack(l, epp, &max_stack_size);
    392       1.57  christos #endif /* PAX_ASLR */
    393       1.57  christos 
    394       1.57  christos 	l->l_proc->p_stackbase = epp->ep_minsaddr;
    395       1.57  christos 
    396       1.43     perry 	epp->ep_maxsaddr = (u_long)STACK_GROW(epp->ep_minsaddr,
    397       1.38  christos 		max_stack_size);
    398       1.46  christos 	epp->ep_ssize = l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur;
    399       1.38  christos 
    400       1.38  christos 	/*
    401       1.38  christos 	 * set up commands for stack.  note that this takes *two*, one to
    402       1.38  christos 	 * map the part of the stack which we can access, and one to map
    403       1.38  christos 	 * the part which we can't.
    404       1.38  christos 	 *
    405       1.38  christos 	 * arguably, it could be made into one, but that would require the
    406       1.38  christos 	 * addition of another mapping proc, which is unnecessary
    407       1.38  christos 	 */
    408       1.38  christos 	access_size = epp->ep_ssize;
    409       1.38  christos 	access_linear_min = (u_long)STACK_ALLOC(epp->ep_minsaddr, access_size);
    410       1.38  christos 	noaccess_size = max_stack_size - access_size;
    411       1.43     perry 	noaccess_linear_min = (u_long)STACK_ALLOC(STACK_GROW(epp->ep_minsaddr,
    412       1.38  christos 	    access_size), noaccess_size);
    413       1.39      yamt 	if (noaccess_size > 0) {
    414  1.61.12.1       jym 		NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, noaccess_size,
    415  1.61.12.1       jym 		    noaccess_linear_min, NULL, 0, VM_PROT_NONE, VMCMD_STACK);
    416       1.39      yamt 	}
    417       1.39      yamt 	KASSERT(access_size > 0);
    418  1.61.12.1       jym 	NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, access_size,
    419  1.61.12.1       jym 	    access_linear_min, NULL, 0, VM_PROT_READ | VM_PROT_WRITE,
    420  1.61.12.1       jym 	    VMCMD_STACK);
    421       1.38  christos 
    422       1.38  christos 	return 0;
    423       1.38  christos }
    424