Home | History | Annotate | Line # | Download | only in kern
exec_subr.c revision 1.52.2.2
      1  1.52.2.2        ad /*	$NetBSD: exec_subr.c,v 1.52.2.2 2007/06/17 21:31:18 ad Exp $	*/
      2       1.8       cgd 
      3       1.1       cgd /*
      4      1.10       cgd  * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
      5       1.1       cgd  * All rights reserved.
      6       1.1       cgd  *
      7       1.1       cgd  * Redistribution and use in source and binary forms, with or without
      8       1.1       cgd  * modification, are permitted provided that the following conditions
      9       1.1       cgd  * are met:
     10       1.1       cgd  * 1. Redistributions of source code must retain the above copyright
     11       1.1       cgd  *    notice, this list of conditions and the following disclaimer.
     12       1.1       cgd  * 2. Redistributions in binary form must reproduce the above copyright
     13       1.1       cgd  *    notice, this list of conditions and the following disclaimer in the
     14       1.1       cgd  *    documentation and/or other materials provided with the distribution.
     15       1.1       cgd  * 3. All advertising materials mentioning features or use of this software
     16       1.1       cgd  *    must display the following acknowledgement:
     17       1.1       cgd  *      This product includes software developed by Christopher G. Demetriou.
     18       1.1       cgd  * 4. The name of the author may not be used to endorse or promote products
     19       1.5       jtc  *    derived from this software without specific prior written permission
     20       1.1       cgd  *
     21       1.1       cgd  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22       1.1       cgd  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     23       1.1       cgd  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     24       1.1       cgd  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     25       1.1       cgd  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     26       1.1       cgd  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     27       1.1       cgd  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28       1.1       cgd  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     29       1.1       cgd  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     30       1.1       cgd  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     31       1.1       cgd  */
     32      1.29     lukem 
     33      1.29     lukem #include <sys/cdefs.h>
     34  1.52.2.2        ad __KERNEL_RCSID(0, "$NetBSD: exec_subr.c,v 1.52.2.2 2007/06/17 21:31:18 ad Exp $");
     35      1.48      elad 
     36      1.48      elad #include "opt_pax.h"
     37      1.12       mrg 
     38       1.1       cgd #include <sys/param.h>
     39       1.1       cgd #include <sys/systm.h>
     40       1.1       cgd #include <sys/proc.h>
     41       1.1       cgd #include <sys/malloc.h>
     42       1.1       cgd #include <sys/vnode.h>
     43       1.4       cgd #include <sys/filedesc.h>
     44       1.1       cgd #include <sys/exec.h>
     45       1.1       cgd #include <sys/mman.h>
     46      1.38  christos #include <sys/resourcevar.h>
     47      1.45   thorpej #include <sys/device.h>
     48       1.1       cgd 
     49      1.48      elad #ifdef PAX_MPROTECT
     50      1.48      elad #include <sys/pax.h>
     51      1.48      elad #endif /* PAX_MPROTECT */
     52      1.48      elad 
     53      1.11       mrg #include <uvm/uvm.h>
     54      1.11       mrg 
     55      1.45   thorpej #define	VMCMD_EVCNT_DECL(name)					\
     56      1.45   thorpej static struct evcnt vmcmd_ev_##name =				\
     57      1.45   thorpej     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "vmcmd", #name);	\
     58      1.45   thorpej EVCNT_ATTACH_STATIC(vmcmd_ev_##name)
     59      1.45   thorpej 
     60      1.45   thorpej #define	VMCMD_EVCNT_INCR(name)					\
     61      1.45   thorpej     vmcmd_ev_##name.ev_count++
     62      1.45   thorpej 
     63      1.45   thorpej VMCMD_EVCNT_DECL(calls);
     64      1.45   thorpej VMCMD_EVCNT_DECL(extends);
     65      1.45   thorpej VMCMD_EVCNT_DECL(kills);
     66      1.10       cgd 
     67       1.1       cgd /*
     68       1.1       cgd  * new_vmcmd():
     69       1.1       cgd  *	create a new vmcmd structure and fill in its fields based
     70       1.1       cgd  *	on function call arguments.  make sure objects ref'd by
     71       1.1       cgd  *	the vmcmd are 'held'.
     72       1.1       cgd  */
     73       1.1       cgd 
     74       1.1       cgd void
     75      1.22   thorpej new_vmcmd(struct exec_vmcmd_set *evsp,
     76      1.46  christos     int (*proc)(struct lwp * l, struct exec_vmcmd *),
     77      1.22   thorpej     u_long len, u_long addr, struct vnode *vp, u_long offset,
     78      1.22   thorpej     u_int prot, int flags)
     79       1.1       cgd {
     80       1.1       cgd 	struct exec_vmcmd    *vcp;
     81       1.1       cgd 
     82      1.45   thorpej 	VMCMD_EVCNT_INCR(calls);
     83      1.45   thorpej 
     84       1.1       cgd 	if (evsp->evs_used >= evsp->evs_cnt)
     85       1.1       cgd 		vmcmdset_extend(evsp);
     86       1.1       cgd 	vcp = &evsp->evs_cmds[evsp->evs_used++];
     87       1.1       cgd 	vcp->ev_proc = proc;
     88       1.1       cgd 	vcp->ev_len = len;
     89       1.1       cgd 	vcp->ev_addr = addr;
     90       1.1       cgd 	if ((vcp->ev_vp = vp) != NULL)
     91       1.1       cgd 		vref(vp);
     92       1.1       cgd 	vcp->ev_offset = offset;
     93       1.1       cgd 	vcp->ev_prot = prot;
     94      1.25        tv 	vcp->ev_flags = flags;
     95       1.1       cgd }
     96       1.1       cgd 
     97       1.1       cgd void
     98      1.22   thorpej vmcmdset_extend(struct exec_vmcmd_set *evsp)
     99       1.1       cgd {
    100       1.1       cgd 	struct exec_vmcmd *nvcp;
    101       1.1       cgd 	u_int ocnt;
    102       1.1       cgd 
    103       1.1       cgd #ifdef DIAGNOSTIC
    104       1.1       cgd 	if (evsp->evs_used < evsp->evs_cnt)
    105       1.1       cgd 		panic("vmcmdset_extend: not necessary");
    106       1.1       cgd #endif
    107       1.1       cgd 
    108       1.1       cgd 	/* figure out number of entries in new set */
    109      1.45   thorpej 	if ((ocnt = evsp->evs_cnt) != 0) {
    110      1.45   thorpej 		evsp->evs_cnt += ocnt;
    111      1.45   thorpej 		VMCMD_EVCNT_INCR(extends);
    112      1.45   thorpej 	} else
    113      1.45   thorpej 		evsp->evs_cnt = EXEC_DEFAULT_VMCMD_SETSIZE;
    114       1.1       cgd 
    115       1.1       cgd 	/* allocate it */
    116      1.23   thorpej 	nvcp = malloc(evsp->evs_cnt * sizeof(struct exec_vmcmd),
    117      1.23   thorpej 	    M_EXEC, M_WAITOK);
    118       1.1       cgd 
    119       1.1       cgd 	/* free the old struct, if there was one, and record the new one */
    120       1.1       cgd 	if (ocnt) {
    121      1.23   thorpej 		memcpy(nvcp, evsp->evs_cmds,
    122      1.23   thorpej 		    (ocnt * sizeof(struct exec_vmcmd)));
    123      1.23   thorpej 		free(evsp->evs_cmds, M_EXEC);
    124       1.1       cgd 	}
    125       1.1       cgd 	evsp->evs_cmds = nvcp;
    126       1.1       cgd }
    127       1.1       cgd 
    128       1.1       cgd void
    129      1.22   thorpej kill_vmcmds(struct exec_vmcmd_set *evsp)
    130       1.1       cgd {
    131       1.1       cgd 	struct exec_vmcmd *vcp;
    132      1.30   thorpej 	u_int i;
    133       1.1       cgd 
    134      1.45   thorpej 	VMCMD_EVCNT_INCR(kills);
    135      1.45   thorpej 
    136       1.1       cgd 	if (evsp->evs_cnt == 0)
    137       1.1       cgd 		return;
    138       1.1       cgd 
    139       1.1       cgd 	for (i = 0; i < evsp->evs_used; i++) {
    140       1.1       cgd 		vcp = &evsp->evs_cmds[i];
    141      1.40       chs 		if (vcp->ev_vp != NULL)
    142       1.1       cgd 			vrele(vcp->ev_vp);
    143       1.1       cgd 	}
    144       1.1       cgd 	evsp->evs_used = evsp->evs_cnt = 0;
    145      1.23   thorpej 	free(evsp->evs_cmds, M_EXEC);
    146       1.1       cgd }
    147       1.1       cgd 
    148       1.1       cgd /*
    149       1.1       cgd  * vmcmd_map_pagedvn():
    150       1.1       cgd  *	handle vmcmd which specifies that a vnode should be mmap'd.
    151       1.1       cgd  *	appropriate for handling demand-paged text and data segments.
    152       1.1       cgd  */
    153       1.1       cgd 
    154       1.1       cgd int
    155      1.46  christos vmcmd_map_pagedvn(struct lwp *l, struct exec_vmcmd *cmd)
    156       1.1       cgd {
    157      1.27       chs 	struct uvm_object *uobj;
    158      1.50       chs 	struct vnode *vp = cmd->ev_vp;
    159      1.46  christos 	struct proc *p = l->l_proc;
    160      1.27       chs 	int error;
    161      1.48      elad 	vm_prot_t prot, maxprot;
    162      1.27       chs 
    163  1.52.2.2        ad 	KASSERT(vp->v_iflag & VI_TEXT);
    164      1.11       mrg 
    165      1.11       mrg 	/*
    166      1.11       mrg 	 * map the vnode in using uvm_map.
    167      1.11       mrg 	 */
    168      1.11       mrg 
    169      1.11       mrg         if (cmd->ev_len == 0)
    170      1.11       mrg                 return(0);
    171      1.11       mrg         if (cmd->ev_offset & PAGE_MASK)
    172      1.11       mrg                 return(EINVAL);
    173      1.11       mrg 	if (cmd->ev_addr & PAGE_MASK)
    174      1.18       chs 		return(EINVAL);
    175      1.18       chs 	if (cmd->ev_len & PAGE_MASK)
    176      1.11       mrg 		return(EINVAL);
    177      1.11       mrg 
    178      1.11       mrg 	/*
    179      1.11       mrg 	 * first, attach to the object
    180      1.11       mrg 	 */
    181      1.11       mrg 
    182      1.50       chs         uobj = uvn_attach(vp, VM_PROT_READ|VM_PROT_EXECUTE);
    183      1.11       mrg         if (uobj == NULL)
    184      1.11       mrg                 return(ENOMEM);
    185      1.50       chs 	VREF(vp);
    186      1.50       chs 
    187  1.52.2.2        ad 	if ((vp->v_vflag & VV_MAPPED) == 0) {
    188      1.50       chs 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    189  1.52.2.1        ad 		mutex_enter(&vp->v_interlock);
    190  1.52.2.2        ad 		vp->v_vflag |= VV_MAPPED;
    191  1.52.2.2        ad 		vp->v_iflag |= VI_MAPPED;
    192  1.52.2.1        ad 		mutex_exit(&vp->v_interlock);
    193      1.50       chs 		VOP_UNLOCK(vp, 0);
    194      1.50       chs 	}
    195      1.11       mrg 
    196      1.48      elad 	prot = cmd->ev_prot;
    197      1.48      elad 	maxprot = UVM_PROT_ALL;
    198      1.48      elad #ifdef PAX_MPROTECT
    199      1.48      elad 	pax_mprotect(l, &prot, &maxprot);
    200      1.48      elad #endif /* PAX_MPROTECT */
    201      1.48      elad 
    202      1.11       mrg 	/*
    203      1.11       mrg 	 * do the map
    204      1.11       mrg 	 */
    205      1.11       mrg 
    206      1.43     perry 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
    207      1.24   thorpej 		uobj, cmd->ev_offset, 0,
    208      1.48      elad 		UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
    209      1.34    atatat 			UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
    210      1.27       chs 	if (error) {
    211      1.27       chs 		uobj->pgops->pgo_detach(uobj);
    212      1.27       chs 	}
    213      1.27       chs 	return error;
    214       1.1       cgd }
    215       1.1       cgd 
    216       1.1       cgd /*
    217       1.1       cgd  * vmcmd_map_readvn():
    218       1.1       cgd  *	handle vmcmd which specifies that a vnode should be read from.
    219       1.1       cgd  *	appropriate for non-demand-paged text/data segments, i.e. impure
    220       1.1       cgd  *	objects (a la OMAGIC and NMAGIC).
    221       1.1       cgd  */
    222       1.1       cgd int
    223      1.46  christos vmcmd_map_readvn(struct lwp *l, struct exec_vmcmd *cmd)
    224       1.1       cgd {
    225      1.46  christos 	struct proc *p = l->l_proc;
    226       1.1       cgd 	int error;
    227      1.17        ws 	long diff;
    228       1.1       cgd 
    229      1.11       mrg 	if (cmd->ev_len == 0)
    230      1.27       chs 		return 0;
    231      1.27       chs 
    232      1.17        ws 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    233      1.17        ws 	cmd->ev_addr -= diff;			/* required by uvm_map */
    234      1.17        ws 	cmd->ev_offset -= diff;
    235      1.17        ws 	cmd->ev_len += diff;
    236      1.17        ws 
    237      1.43     perry 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    238      1.24   thorpej 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
    239      1.13     chuck 			UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
    240      1.11       mrg 			UVM_ADV_NORMAL,
    241      1.34    atatat 			UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
    242      1.11       mrg 
    243       1.1       cgd 	if (error)
    244       1.1       cgd 		return error;
    245      1.19      matt 
    246      1.46  christos 	return vmcmd_readvn(l, cmd);
    247      1.19      matt }
    248      1.19      matt 
    249      1.19      matt int
    250      1.46  christos vmcmd_readvn(struct lwp *l, struct exec_vmcmd *cmd)
    251      1.19      matt {
    252      1.46  christos 	struct proc *p = l->l_proc;
    253      1.19      matt 	int error;
    254      1.48      elad 	vm_prot_t prot, maxprot;
    255       1.1       cgd 
    256      1.52  christos 	error = vn_rdwr(UIO_READ, cmd->ev_vp, (void *)cmd->ev_addr,
    257      1.10       cgd 	    cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
    258      1.49        ad 	    l->l_cred, NULL, l);
    259       1.1       cgd 	if (error)
    260       1.1       cgd 		return error;
    261      1.32      matt 
    262      1.48      elad 	prot = cmd->ev_prot;
    263      1.48      elad 	maxprot = VM_PROT_ALL;
    264      1.48      elad #ifdef PAX_MPROTECT
    265      1.48      elad 	pax_mprotect(l, &prot, &maxprot);
    266      1.48      elad #endif /* PAX_MPROTECT */
    267      1.48      elad 
    268      1.32      matt #ifdef PMAP_NEED_PROCWR
    269      1.32      matt 	/*
    270      1.32      matt 	 * we had to write the process, make sure the pages are synched
    271      1.32      matt 	 * with the instruction cache.
    272      1.32      matt 	 */
    273      1.48      elad 	if (prot & VM_PROT_EXECUTE)
    274      1.32      matt 		pmap_procwr(p, cmd->ev_addr, cmd->ev_len);
    275      1.32      matt #endif
    276       1.1       cgd 
    277      1.48      elad 	/*
    278      1.48      elad 	 * we had to map in the area at PROT_ALL so that vn_rdwr()
    279      1.48      elad 	 * could write to it.   however, the caller seems to want
    280      1.48      elad 	 * it mapped read-only, so now we are going to have to call
    281      1.48      elad 	 * uvm_map_protect() to fix up the protection.  ICK.
    282      1.48      elad 	 */
    283      1.48      elad 	if (maxprot != VM_PROT_ALL) {
    284      1.48      elad 		error = uvm_map_protect(&p->p_vmspace->vm_map,
    285      1.48      elad 				trunc_page(cmd->ev_addr),
    286      1.48      elad 				round_page(cmd->ev_addr + cmd->ev_len),
    287      1.51   thorpej 				maxprot, true);
    288      1.48      elad 		if (error)
    289      1.48      elad 			return (error);
    290      1.48      elad 	}
    291      1.27       chs 
    292      1.48      elad 	if (prot != maxprot) {
    293      1.48      elad 		error = uvm_map_protect(&p->p_vmspace->vm_map,
    294      1.13     chuck 				trunc_page(cmd->ev_addr),
    295      1.13     chuck 				round_page(cmd->ev_addr + cmd->ev_len),
    296      1.51   thorpej 				prot, false);
    297      1.48      elad 		if (error)
    298      1.48      elad 			return (error);
    299      1.13     chuck 	}
    300      1.48      elad 
    301      1.27       chs 	return 0;
    302       1.1       cgd }
    303       1.1       cgd 
    304       1.1       cgd /*
    305       1.1       cgd  * vmcmd_map_zero():
    306       1.1       cgd  *	handle vmcmd which specifies a zero-filled address space region.  The
    307       1.1       cgd  *	address range must be first allocated, then protected appropriately.
    308       1.1       cgd  */
    309       1.1       cgd 
    310       1.1       cgd int
    311      1.46  christos vmcmd_map_zero(struct lwp *l, struct exec_vmcmd *cmd)
    312       1.1       cgd {
    313      1.46  christos 	struct proc *p = l->l_proc;
    314       1.1       cgd 	int error;
    315      1.17        ws 	long diff;
    316      1.48      elad 	vm_prot_t prot, maxprot;
    317       1.1       cgd 
    318      1.17        ws 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    319      1.17        ws 	cmd->ev_addr -= diff;			/* required by uvm_map */
    320      1.17        ws 	cmd->ev_len += diff;
    321      1.17        ws 
    322      1.48      elad 	prot = cmd->ev_prot;
    323      1.48      elad 	maxprot = UVM_PROT_ALL;
    324      1.48      elad #ifdef PAX_MPROTECT
    325      1.48      elad 	pax_mprotect(l, &prot, &maxprot);
    326      1.48      elad #endif /* PAX_MPROTECT */
    327      1.48      elad 
    328      1.43     perry 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    329      1.24   thorpej 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
    330      1.48      elad 			UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
    331      1.11       mrg 			UVM_ADV_NORMAL,
    332      1.34    atatat 			UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
    333      1.27       chs 	return error;
    334       1.1       cgd }
    335      1.28  christos 
    336      1.28  christos /*
    337      1.28  christos  * exec_read_from():
    338      1.28  christos  *
    339      1.28  christos  *	Read from vnode into buffer at offset.
    340      1.28  christos  */
    341      1.28  christos int
    342      1.46  christos exec_read_from(struct lwp *l, struct vnode *vp, u_long off, void *bf,
    343      1.28  christos     size_t size)
    344      1.28  christos {
    345      1.28  christos 	int error;
    346      1.28  christos 	size_t resid;
    347      1.28  christos 
    348      1.44  christos 	if ((error = vn_rdwr(UIO_READ, vp, bf, size, off, UIO_SYSSPACE,
    349      1.49        ad 	    0, l->l_cred, &resid, NULL)) != 0)
    350      1.28  christos 		return error;
    351      1.28  christos 	/*
    352      1.28  christos 	 * See if we got all of it
    353      1.28  christos 	 */
    354      1.28  christos 	if (resid != 0)
    355      1.28  christos 		return ENOEXEC;
    356      1.28  christos 	return 0;
    357      1.28  christos }
    358      1.28  christos 
    359      1.38  christos /*
    360      1.38  christos  * exec_setup_stack(): Set up the stack segment for an elf
    361      1.38  christos  * executable.
    362      1.38  christos  *
    363      1.38  christos  * Note that the ep_ssize parameter must be set to be the current stack
    364      1.38  christos  * limit; this is adjusted in the body of execve() to yield the
    365      1.38  christos  * appropriate stack segment usage once the argument length is
    366      1.38  christos  * calculated.
    367      1.38  christos  *
    368      1.38  christos  * This function returns an int for uniformity with other (future) formats'
    369      1.38  christos  * stack setup functions.  They might have errors to return.
    370      1.38  christos  */
    371      1.38  christos 
    372      1.38  christos int
    373      1.46  christos exec_setup_stack(struct lwp *l, struct exec_package *epp)
    374      1.38  christos {
    375      1.38  christos 	u_long max_stack_size;
    376      1.38  christos 	u_long access_linear_min, access_size;
    377      1.38  christos 	u_long noaccess_linear_min, noaccess_size;
    378      1.38  christos 
    379      1.38  christos #ifndef	USRSTACK32
    380      1.38  christos #define USRSTACK32	(0x00000000ffffffffL&~PGOFSET)
    381      1.38  christos #endif
    382      1.38  christos 
    383      1.38  christos 	if (epp->ep_flags & EXEC_32) {
    384      1.38  christos 		epp->ep_minsaddr = USRSTACK32;
    385      1.38  christos 		max_stack_size = MAXSSIZ;
    386      1.38  christos 	} else {
    387      1.38  christos 		epp->ep_minsaddr = USRSTACK;
    388      1.38  christos 		max_stack_size = MAXSSIZ;
    389      1.38  christos 	}
    390      1.43     perry 	epp->ep_maxsaddr = (u_long)STACK_GROW(epp->ep_minsaddr,
    391      1.38  christos 		max_stack_size);
    392      1.46  christos 	epp->ep_ssize = l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur;
    393      1.38  christos 
    394      1.38  christos 	/*
    395      1.38  christos 	 * set up commands for stack.  note that this takes *two*, one to
    396      1.38  christos 	 * map the part of the stack which we can access, and one to map
    397      1.38  christos 	 * the part which we can't.
    398      1.38  christos 	 *
    399      1.38  christos 	 * arguably, it could be made into one, but that would require the
    400      1.38  christos 	 * addition of another mapping proc, which is unnecessary
    401      1.38  christos 	 */
    402      1.38  christos 	access_size = epp->ep_ssize;
    403      1.38  christos 	access_linear_min = (u_long)STACK_ALLOC(epp->ep_minsaddr, access_size);
    404      1.38  christos 	noaccess_size = max_stack_size - access_size;
    405      1.43     perry 	noaccess_linear_min = (u_long)STACK_ALLOC(STACK_GROW(epp->ep_minsaddr,
    406      1.38  christos 	    access_size), noaccess_size);
    407      1.39      yamt 	if (noaccess_size > 0) {
    408      1.39      yamt 		NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, noaccess_size,
    409      1.40       chs 		    noaccess_linear_min, NULL, 0, VM_PROT_NONE);
    410      1.39      yamt 	}
    411      1.39      yamt 	KASSERT(access_size > 0);
    412      1.38  christos 	NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, access_size,
    413      1.40       chs 	    access_linear_min, NULL, 0, VM_PROT_READ | VM_PROT_WRITE);
    414      1.38  christos 
    415      1.38  christos 	return 0;
    416      1.38  christos }
    417