exec_subr.c revision 1.72 1 1.72 maxv /* $NetBSD: exec_subr.c,v 1.72 2015/09/26 16:12:24 maxv Exp $ */
2 1.8 cgd
3 1.1 cgd /*
4 1.10 cgd * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
5 1.1 cgd * All rights reserved.
6 1.1 cgd *
7 1.1 cgd * Redistribution and use in source and binary forms, with or without
8 1.1 cgd * modification, are permitted provided that the following conditions
9 1.1 cgd * are met:
10 1.1 cgd * 1. Redistributions of source code must retain the above copyright
11 1.1 cgd * notice, this list of conditions and the following disclaimer.
12 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 cgd * notice, this list of conditions and the following disclaimer in the
14 1.1 cgd * documentation and/or other materials provided with the distribution.
15 1.1 cgd * 3. All advertising materials mentioning features or use of this software
16 1.1 cgd * must display the following acknowledgement:
17 1.1 cgd * This product includes software developed by Christopher G. Demetriou.
18 1.1 cgd * 4. The name of the author may not be used to endorse or promote products
19 1.5 jtc * derived from this software without specific prior written permission
20 1.1 cgd *
21 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 1.1 cgd * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 1.1 cgd * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 1.1 cgd * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 1.1 cgd * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 1.1 cgd * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 1.1 cgd * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 1.1 cgd * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 1.1 cgd * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 1.1 cgd * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 1.1 cgd */
32 1.29 lukem
33 1.29 lukem #include <sys/cdefs.h>
34 1.72 maxv __KERNEL_RCSID(0, "$NetBSD: exec_subr.c,v 1.72 2015/09/26 16:12:24 maxv Exp $");
35 1.48 elad
36 1.48 elad #include "opt_pax.h"
37 1.12 mrg
38 1.1 cgd #include <sys/param.h>
39 1.1 cgd #include <sys/systm.h>
40 1.1 cgd #include <sys/proc.h>
41 1.59 yamt #include <sys/kmem.h>
42 1.1 cgd #include <sys/vnode.h>
43 1.4 cgd #include <sys/filedesc.h>
44 1.1 cgd #include <sys/exec.h>
45 1.1 cgd #include <sys/mman.h>
46 1.38 christos #include <sys/resourcevar.h>
47 1.45 thorpej #include <sys/device.h>
48 1.1 cgd
49 1.69 matt #if defined(PAX_ASLR) || defined(PAX_MPROTECT)
50 1.48 elad #include <sys/pax.h>
51 1.69 matt #endif /* PAX_ASLR || PAX_MPROTECT */
52 1.48 elad
53 1.67 uebayasi #include <uvm/uvm_extern.h>
54 1.11 mrg
55 1.45 thorpej #define VMCMD_EVCNT_DECL(name) \
56 1.45 thorpej static struct evcnt vmcmd_ev_##name = \
57 1.45 thorpej EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "vmcmd", #name); \
58 1.45 thorpej EVCNT_ATTACH_STATIC(vmcmd_ev_##name)
59 1.45 thorpej
60 1.45 thorpej #define VMCMD_EVCNT_INCR(name) \
61 1.45 thorpej vmcmd_ev_##name.ev_count++
62 1.45 thorpej
63 1.45 thorpej VMCMD_EVCNT_DECL(calls);
64 1.45 thorpej VMCMD_EVCNT_DECL(extends);
65 1.45 thorpej VMCMD_EVCNT_DECL(kills);
66 1.10 cgd
67 1.68 christos #ifdef DEBUG_STACK
68 1.68 christos #define DPRINTF(a) uprintf a
69 1.68 christos #else
70 1.68 christos #define DPRINTF(a)
71 1.68 christos #endif
72 1.68 christos
73 1.1 cgd /*
74 1.1 cgd * new_vmcmd():
75 1.1 cgd * create a new vmcmd structure and fill in its fields based
76 1.1 cgd * on function call arguments. make sure objects ref'd by
77 1.1 cgd * the vmcmd are 'held'.
78 1.1 cgd */
79 1.1 cgd
80 1.1 cgd void
81 1.22 thorpej new_vmcmd(struct exec_vmcmd_set *evsp,
82 1.46 christos int (*proc)(struct lwp * l, struct exec_vmcmd *),
83 1.63 matt vsize_t len, vaddr_t addr, struct vnode *vp, u_long offset,
84 1.22 thorpej u_int prot, int flags)
85 1.1 cgd {
86 1.71 maxv struct exec_vmcmd *vcp;
87 1.1 cgd
88 1.45 thorpej VMCMD_EVCNT_INCR(calls);
89 1.66 yamt KASSERT(proc != vmcmd_map_pagedvn || (vp->v_iflag & VI_TEXT));
90 1.66 yamt KASSERT(vp == NULL || vp->v_usecount > 0);
91 1.45 thorpej
92 1.1 cgd if (evsp->evs_used >= evsp->evs_cnt)
93 1.1 cgd vmcmdset_extend(evsp);
94 1.1 cgd vcp = &evsp->evs_cmds[evsp->evs_used++];
95 1.1 cgd vcp->ev_proc = proc;
96 1.1 cgd vcp->ev_len = len;
97 1.1 cgd vcp->ev_addr = addr;
98 1.1 cgd if ((vcp->ev_vp = vp) != NULL)
99 1.1 cgd vref(vp);
100 1.1 cgd vcp->ev_offset = offset;
101 1.1 cgd vcp->ev_prot = prot;
102 1.25 tv vcp->ev_flags = flags;
103 1.1 cgd }
104 1.1 cgd
105 1.1 cgd void
106 1.22 thorpej vmcmdset_extend(struct exec_vmcmd_set *evsp)
107 1.1 cgd {
108 1.1 cgd struct exec_vmcmd *nvcp;
109 1.1 cgd u_int ocnt;
110 1.1 cgd
111 1.1 cgd #ifdef DIAGNOSTIC
112 1.1 cgd if (evsp->evs_used < evsp->evs_cnt)
113 1.1 cgd panic("vmcmdset_extend: not necessary");
114 1.1 cgd #endif
115 1.1 cgd
116 1.1 cgd /* figure out number of entries in new set */
117 1.45 thorpej if ((ocnt = evsp->evs_cnt) != 0) {
118 1.45 thorpej evsp->evs_cnt += ocnt;
119 1.45 thorpej VMCMD_EVCNT_INCR(extends);
120 1.45 thorpej } else
121 1.45 thorpej evsp->evs_cnt = EXEC_DEFAULT_VMCMD_SETSIZE;
122 1.1 cgd
123 1.1 cgd /* allocate it */
124 1.59 yamt nvcp = kmem_alloc(evsp->evs_cnt * sizeof(struct exec_vmcmd), KM_SLEEP);
125 1.1 cgd
126 1.1 cgd /* free the old struct, if there was one, and record the new one */
127 1.1 cgd if (ocnt) {
128 1.23 thorpej memcpy(nvcp, evsp->evs_cmds,
129 1.23 thorpej (ocnt * sizeof(struct exec_vmcmd)));
130 1.59 yamt kmem_free(evsp->evs_cmds, ocnt * sizeof(struct exec_vmcmd));
131 1.1 cgd }
132 1.1 cgd evsp->evs_cmds = nvcp;
133 1.1 cgd }
134 1.1 cgd
135 1.1 cgd void
136 1.22 thorpej kill_vmcmds(struct exec_vmcmd_set *evsp)
137 1.1 cgd {
138 1.1 cgd struct exec_vmcmd *vcp;
139 1.30 thorpej u_int i;
140 1.1 cgd
141 1.45 thorpej VMCMD_EVCNT_INCR(kills);
142 1.45 thorpej
143 1.1 cgd if (evsp->evs_cnt == 0)
144 1.1 cgd return;
145 1.1 cgd
146 1.1 cgd for (i = 0; i < evsp->evs_used; i++) {
147 1.1 cgd vcp = &evsp->evs_cmds[i];
148 1.40 chs if (vcp->ev_vp != NULL)
149 1.1 cgd vrele(vcp->ev_vp);
150 1.1 cgd }
151 1.59 yamt kmem_free(evsp->evs_cmds, evsp->evs_cnt * sizeof(struct exec_vmcmd));
152 1.1 cgd evsp->evs_used = evsp->evs_cnt = 0;
153 1.1 cgd }
154 1.1 cgd
155 1.1 cgd /*
156 1.1 cgd * vmcmd_map_pagedvn():
157 1.1 cgd * handle vmcmd which specifies that a vnode should be mmap'd.
158 1.1 cgd * appropriate for handling demand-paged text and data segments.
159 1.1 cgd */
160 1.1 cgd
161 1.1 cgd int
162 1.46 christos vmcmd_map_pagedvn(struct lwp *l, struct exec_vmcmd *cmd)
163 1.1 cgd {
164 1.27 chs struct uvm_object *uobj;
165 1.50 chs struct vnode *vp = cmd->ev_vp;
166 1.46 christos struct proc *p = l->l_proc;
167 1.27 chs int error;
168 1.48 elad vm_prot_t prot, maxprot;
169 1.27 chs
170 1.55 ad KASSERT(vp->v_iflag & VI_TEXT);
171 1.11 mrg
172 1.11 mrg /*
173 1.11 mrg * map the vnode in using uvm_map.
174 1.11 mrg */
175 1.11 mrg
176 1.71 maxv if (cmd->ev_len == 0)
177 1.71 maxv return 0;
178 1.71 maxv if (cmd->ev_offset & PAGE_MASK)
179 1.71 maxv return EINVAL;
180 1.11 mrg if (cmd->ev_addr & PAGE_MASK)
181 1.71 maxv return EINVAL;
182 1.18 chs if (cmd->ev_len & PAGE_MASK)
183 1.71 maxv return EINVAL;
184 1.11 mrg
185 1.54 pooka prot = cmd->ev_prot;
186 1.54 pooka maxprot = UVM_PROT_ALL;
187 1.54 pooka #ifdef PAX_MPROTECT
188 1.54 pooka pax_mprotect(l, &prot, &maxprot);
189 1.54 pooka #endif /* PAX_MPROTECT */
190 1.54 pooka
191 1.11 mrg /*
192 1.53 pooka * check the file system's opinion about mmapping the file
193 1.11 mrg */
194 1.11 mrg
195 1.60 ad error = VOP_MMAP(vp, prot, l->l_cred);
196 1.53 pooka if (error)
197 1.53 pooka return error;
198 1.50 chs
199 1.55 ad if ((vp->v_vflag & VV_MAPPED) == 0) {
200 1.50 chs vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
201 1.55 ad vp->v_vflag |= VV_MAPPED;
202 1.64 hannken VOP_UNLOCK(vp);
203 1.50 chs }
204 1.11 mrg
205 1.11 mrg /*
206 1.53 pooka * do the map, reference the object for this map entry
207 1.11 mrg */
208 1.53 pooka uobj = &vp->v_uobj;
209 1.53 pooka vref(vp);
210 1.11 mrg
211 1.43 perry error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
212 1.24 thorpej uobj, cmd->ev_offset, 0,
213 1.48 elad UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
214 1.34 atatat UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
215 1.27 chs if (error) {
216 1.27 chs uobj->pgops->pgo_detach(uobj);
217 1.27 chs }
218 1.27 chs return error;
219 1.1 cgd }
220 1.1 cgd
221 1.1 cgd /*
222 1.1 cgd * vmcmd_map_readvn():
223 1.1 cgd * handle vmcmd which specifies that a vnode should be read from.
224 1.1 cgd * appropriate for non-demand-paged text/data segments, i.e. impure
225 1.1 cgd * objects (a la OMAGIC and NMAGIC).
226 1.1 cgd */
227 1.1 cgd int
228 1.46 christos vmcmd_map_readvn(struct lwp *l, struct exec_vmcmd *cmd)
229 1.1 cgd {
230 1.46 christos struct proc *p = l->l_proc;
231 1.1 cgd int error;
232 1.17 ws long diff;
233 1.1 cgd
234 1.11 mrg if (cmd->ev_len == 0)
235 1.27 chs return 0;
236 1.27 chs
237 1.17 ws diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
238 1.17 ws cmd->ev_addr -= diff; /* required by uvm_map */
239 1.17 ws cmd->ev_offset -= diff;
240 1.17 ws cmd->ev_len += diff;
241 1.17 ws
242 1.43 perry error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
243 1.24 thorpej round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
244 1.13 chuck UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
245 1.11 mrg UVM_ADV_NORMAL,
246 1.34 atatat UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
247 1.11 mrg
248 1.1 cgd if (error)
249 1.1 cgd return error;
250 1.19 matt
251 1.46 christos return vmcmd_readvn(l, cmd);
252 1.19 matt }
253 1.19 matt
254 1.19 matt int
255 1.46 christos vmcmd_readvn(struct lwp *l, struct exec_vmcmd *cmd)
256 1.19 matt {
257 1.46 christos struct proc *p = l->l_proc;
258 1.19 matt int error;
259 1.48 elad vm_prot_t prot, maxprot;
260 1.1 cgd
261 1.52 christos error = vn_rdwr(UIO_READ, cmd->ev_vp, (void *)cmd->ev_addr,
262 1.10 cgd cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
263 1.49 ad l->l_cred, NULL, l);
264 1.1 cgd if (error)
265 1.1 cgd return error;
266 1.32 matt
267 1.48 elad prot = cmd->ev_prot;
268 1.48 elad maxprot = VM_PROT_ALL;
269 1.48 elad #ifdef PAX_MPROTECT
270 1.48 elad pax_mprotect(l, &prot, &maxprot);
271 1.48 elad #endif /* PAX_MPROTECT */
272 1.48 elad
273 1.32 matt #ifdef PMAP_NEED_PROCWR
274 1.32 matt /*
275 1.32 matt * we had to write the process, make sure the pages are synched
276 1.32 matt * with the instruction cache.
277 1.32 matt */
278 1.48 elad if (prot & VM_PROT_EXECUTE)
279 1.32 matt pmap_procwr(p, cmd->ev_addr, cmd->ev_len);
280 1.32 matt #endif
281 1.1 cgd
282 1.48 elad /*
283 1.48 elad * we had to map in the area at PROT_ALL so that vn_rdwr()
284 1.48 elad * could write to it. however, the caller seems to want
285 1.48 elad * it mapped read-only, so now we are going to have to call
286 1.48 elad * uvm_map_protect() to fix up the protection. ICK.
287 1.48 elad */
288 1.48 elad if (maxprot != VM_PROT_ALL) {
289 1.48 elad error = uvm_map_protect(&p->p_vmspace->vm_map,
290 1.48 elad trunc_page(cmd->ev_addr),
291 1.48 elad round_page(cmd->ev_addr + cmd->ev_len),
292 1.51 thorpej maxprot, true);
293 1.48 elad if (error)
294 1.71 maxv return error;
295 1.48 elad }
296 1.27 chs
297 1.48 elad if (prot != maxprot) {
298 1.48 elad error = uvm_map_protect(&p->p_vmspace->vm_map,
299 1.13 chuck trunc_page(cmd->ev_addr),
300 1.13 chuck round_page(cmd->ev_addr + cmd->ev_len),
301 1.51 thorpej prot, false);
302 1.48 elad if (error)
303 1.71 maxv return error;
304 1.13 chuck }
305 1.48 elad
306 1.27 chs return 0;
307 1.1 cgd }
308 1.1 cgd
309 1.1 cgd /*
310 1.1 cgd * vmcmd_map_zero():
311 1.1 cgd * handle vmcmd which specifies a zero-filled address space region. The
312 1.1 cgd * address range must be first allocated, then protected appropriately.
313 1.1 cgd */
314 1.1 cgd
315 1.1 cgd int
316 1.46 christos vmcmd_map_zero(struct lwp *l, struct exec_vmcmd *cmd)
317 1.1 cgd {
318 1.46 christos struct proc *p = l->l_proc;
319 1.1 cgd int error;
320 1.17 ws long diff;
321 1.48 elad vm_prot_t prot, maxprot;
322 1.1 cgd
323 1.17 ws diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
324 1.17 ws cmd->ev_addr -= diff; /* required by uvm_map */
325 1.17 ws cmd->ev_len += diff;
326 1.17 ws
327 1.48 elad prot = cmd->ev_prot;
328 1.48 elad maxprot = UVM_PROT_ALL;
329 1.48 elad #ifdef PAX_MPROTECT
330 1.48 elad pax_mprotect(l, &prot, &maxprot);
331 1.48 elad #endif /* PAX_MPROTECT */
332 1.48 elad
333 1.43 perry error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
334 1.24 thorpej round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
335 1.48 elad UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
336 1.11 mrg UVM_ADV_NORMAL,
337 1.34 atatat UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
338 1.62 mrg if (cmd->ev_flags & VMCMD_STACK)
339 1.62 mrg curproc->p_vmspace->vm_issize += atop(round_page(cmd->ev_len));
340 1.27 chs return error;
341 1.1 cgd }
342 1.28 christos
343 1.28 christos /*
344 1.28 christos * exec_read_from():
345 1.28 christos *
346 1.28 christos * Read from vnode into buffer at offset.
347 1.28 christos */
348 1.28 christos int
349 1.46 christos exec_read_from(struct lwp *l, struct vnode *vp, u_long off, void *bf,
350 1.28 christos size_t size)
351 1.28 christos {
352 1.28 christos int error;
353 1.28 christos size_t resid;
354 1.28 christos
355 1.44 christos if ((error = vn_rdwr(UIO_READ, vp, bf, size, off, UIO_SYSSPACE,
356 1.49 ad 0, l->l_cred, &resid, NULL)) != 0)
357 1.28 christos return error;
358 1.28 christos /*
359 1.28 christos * See if we got all of it
360 1.28 christos */
361 1.28 christos if (resid != 0)
362 1.28 christos return ENOEXEC;
363 1.28 christos return 0;
364 1.28 christos }
365 1.28 christos
366 1.38 christos /*
367 1.38 christos * exec_setup_stack(): Set up the stack segment for an elf
368 1.38 christos * executable.
369 1.38 christos *
370 1.38 christos * Note that the ep_ssize parameter must be set to be the current stack
371 1.38 christos * limit; this is adjusted in the body of execve() to yield the
372 1.38 christos * appropriate stack segment usage once the argument length is
373 1.38 christos * calculated.
374 1.38 christos *
375 1.38 christos * This function returns an int for uniformity with other (future) formats'
376 1.38 christos * stack setup functions. They might have errors to return.
377 1.38 christos */
378 1.38 christos
379 1.38 christos int
380 1.46 christos exec_setup_stack(struct lwp *l, struct exec_package *epp)
381 1.38 christos {
382 1.63 matt vsize_t max_stack_size;
383 1.63 matt vaddr_t access_linear_min;
384 1.63 matt vsize_t access_size;
385 1.63 matt vaddr_t noaccess_linear_min;
386 1.63 matt vsize_t noaccess_size;
387 1.38 christos
388 1.38 christos #ifndef USRSTACK32
389 1.38 christos #define USRSTACK32 (0x00000000ffffffffL&~PGOFSET)
390 1.38 christos #endif
391 1.68 christos #ifndef MAXSSIZ32
392 1.68 christos #define MAXSSIZ32 (MAXSSIZ >> 2)
393 1.68 christos #endif
394 1.38 christos
395 1.38 christos if (epp->ep_flags & EXEC_32) {
396 1.38 christos epp->ep_minsaddr = USRSTACK32;
397 1.68 christos max_stack_size = MAXSSIZ32;
398 1.38 christos } else {
399 1.38 christos epp->ep_minsaddr = USRSTACK;
400 1.38 christos max_stack_size = MAXSSIZ;
401 1.38 christos }
402 1.68 christos
403 1.68 christos DPRINTF(("ep_minsaddr=%llx max_stack_size=%llx\n",
404 1.68 christos (unsigned long long)epp->ep_minsaddr,
405 1.68 christos (unsigned long long)max_stack_size));
406 1.68 christos
407 1.70 enami epp->ep_ssize = MIN(l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur,
408 1.70 enami max_stack_size);
409 1.57 christos
410 1.57 christos #ifdef PAX_ASLR
411 1.72 maxv pax_aslr_stack(epp, &max_stack_size);
412 1.57 christos #endif /* PAX_ASLR */
413 1.57 christos
414 1.57 christos l->l_proc->p_stackbase = epp->ep_minsaddr;
415 1.57 christos
416 1.63 matt epp->ep_maxsaddr = (vaddr_t)STACK_GROW(epp->ep_minsaddr,
417 1.38 christos max_stack_size);
418 1.38 christos
419 1.71 maxv DPRINTF(("ep_ssize=%llx ep_maxsaddr=%llx\n",
420 1.68 christos (unsigned long long)epp->ep_ssize,
421 1.68 christos (unsigned long long)epp->ep_maxsaddr));
422 1.68 christos
423 1.38 christos /*
424 1.38 christos * set up commands for stack. note that this takes *two*, one to
425 1.38 christos * map the part of the stack which we can access, and one to map
426 1.38 christos * the part which we can't.
427 1.38 christos *
428 1.38 christos * arguably, it could be made into one, but that would require the
429 1.38 christos * addition of another mapping proc, which is unnecessary
430 1.38 christos */
431 1.38 christos access_size = epp->ep_ssize;
432 1.63 matt access_linear_min = (vaddr_t)STACK_ALLOC(epp->ep_minsaddr, access_size);
433 1.38 christos noaccess_size = max_stack_size - access_size;
434 1.63 matt noaccess_linear_min = (vaddr_t)STACK_ALLOC(STACK_GROW(epp->ep_minsaddr,
435 1.38 christos access_size), noaccess_size);
436 1.68 christos
437 1.68 christos DPRINTF(("access_size=%llx, access_linear_min=%llx, "
438 1.68 christos "noaccess_size=%llx, noaccess_linear_min=%llx\n",
439 1.68 christos (unsigned long long)access_size,
440 1.68 christos (unsigned long long)access_linear_min,
441 1.68 christos (unsigned long long)noaccess_size,
442 1.68 christos (unsigned long long)noaccess_linear_min));
443 1.68 christos
444 1.65 christos if (noaccess_size > 0 && noaccess_size <= MAXSSIZ) {
445 1.62 mrg NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, noaccess_size,
446 1.62 mrg noaccess_linear_min, NULL, 0, VM_PROT_NONE, VMCMD_STACK);
447 1.39 yamt }
448 1.65 christos KASSERT(access_size > 0 && access_size <= MAXSSIZ);
449 1.62 mrg NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, access_size,
450 1.62 mrg access_linear_min, NULL, 0, VM_PROT_READ | VM_PROT_WRITE,
451 1.62 mrg VMCMD_STACK);
452 1.38 christos
453 1.38 christos return 0;
454 1.38 christos }
455