1 1.91 riastrad /* $NetBSD: exec_subr.c,v 1.91 2024/12/06 16:48:13 riastradh Exp $ */ 2 1.8 cgd 3 1.1 cgd /* 4 1.10 cgd * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou 5 1.1 cgd * All rights reserved. 6 1.1 cgd * 7 1.1 cgd * Redistribution and use in source and binary forms, with or without 8 1.1 cgd * modification, are permitted provided that the following conditions 9 1.1 cgd * are met: 10 1.1 cgd * 1. Redistributions of source code must retain the above copyright 11 1.1 cgd * notice, this list of conditions and the following disclaimer. 12 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright 13 1.1 cgd * notice, this list of conditions and the following disclaimer in the 14 1.1 cgd * documentation and/or other materials provided with the distribution. 15 1.1 cgd * 3. All advertising materials mentioning features or use of this software 16 1.1 cgd * must display the following acknowledgement: 17 1.1 cgd * This product includes software developed by Christopher G. Demetriou. 18 1.1 cgd * 4. The name of the author may not be used to endorse or promote products 19 1.5 jtc * derived from this software without specific prior written permission 20 1.1 cgd * 21 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 1.1 cgd * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 1.1 cgd * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 1.1 cgd * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 1.1 cgd * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 1.1 cgd * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 1.1 cgd * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 1.1 cgd * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 1.1 cgd * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 1.1 cgd * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 1.1 cgd */ 32 1.29 lukem 33 1.29 lukem #include <sys/cdefs.h> 34 1.91 riastrad __KERNEL_RCSID(0, "$NetBSD: exec_subr.c,v 1.91 2024/12/06 16:48:13 riastradh Exp $"); 35 1.48 elad 36 1.48 elad #include "opt_pax.h" 37 1.12 mrg 38 1.1 cgd #include <sys/param.h> 39 1.89 riastrad #include <sys/types.h> 40 1.89 riastrad 41 1.89 riastrad #include <sys/device.h> 42 1.89 riastrad #include <sys/exec.h> 43 1.89 riastrad #include <sys/filedesc.h> 44 1.59 yamt #include <sys/kmem.h> 45 1.1 cgd #include <sys/mman.h> 46 1.89 riastrad #include <sys/pax.h> 47 1.89 riastrad #include <sys/proc.h> 48 1.38 christos #include <sys/resourcevar.h> 49 1.90 riastrad #include <sys/sdt.h> 50 1.89 riastrad #include <sys/systm.h> 51 1.89 riastrad #include <sys/vnode.h> 52 1.48 elad 53 1.67 uebayasi #include <uvm/uvm_extern.h> 54 1.11 mrg 55 1.45 thorpej #define VMCMD_EVCNT_DECL(name) \ 56 1.45 thorpej static struct evcnt vmcmd_ev_##name = \ 57 1.45 thorpej EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "vmcmd", #name); \ 58 1.45 thorpej EVCNT_ATTACH_STATIC(vmcmd_ev_##name) 59 1.45 thorpej 60 1.45 thorpej #define VMCMD_EVCNT_INCR(name) \ 61 1.45 thorpej vmcmd_ev_##name.ev_count++ 62 1.45 thorpej 63 1.45 thorpej VMCMD_EVCNT_DECL(calls); 64 1.45 thorpej VMCMD_EVCNT_DECL(extends); 65 1.45 thorpej VMCMD_EVCNT_DECL(kills); 66 1.10 cgd 67 1.68 christos #ifdef DEBUG_STACK 68 1.68 christos #define DPRINTF(a) uprintf a 69 1.68 christos #else 70 1.68 christos #define DPRINTF(a) 71 1.68 christos #endif 72 1.68 christos 73 1.82 joerg unsigned int user_stack_guard_size = 1024 * 1024; 74 1.82 joerg unsigned int user_thread_stack_guard_size = 64 * 1024; 75 1.81 joerg 76 1.1 cgd /* 77 1.1 cgd * new_vmcmd(): 78 1.1 cgd * create a new vmcmd structure and fill in its fields based 79 1.1 cgd * on function call arguments. make sure objects ref'd by 80 1.1 cgd * the vmcmd are 'held'. 81 1.1 cgd */ 82 1.1 cgd 83 1.1 cgd void 84 1.22 thorpej new_vmcmd(struct exec_vmcmd_set *evsp, 85 1.46 christos int (*proc)(struct lwp * l, struct exec_vmcmd *), 86 1.63 matt vsize_t len, vaddr_t addr, struct vnode *vp, u_long offset, 87 1.22 thorpej u_int prot, int flags) 88 1.1 cgd { 89 1.71 maxv struct exec_vmcmd *vcp; 90 1.1 cgd 91 1.45 thorpej VMCMD_EVCNT_INCR(calls); 92 1.66 yamt KASSERT(proc != vmcmd_map_pagedvn || (vp->v_iflag & VI_TEXT)); 93 1.84 ad KASSERT(vp == NULL || vrefcnt(vp) > 0); 94 1.45 thorpej 95 1.1 cgd if (evsp->evs_used >= evsp->evs_cnt) 96 1.1 cgd vmcmdset_extend(evsp); 97 1.1 cgd vcp = &evsp->evs_cmds[evsp->evs_used++]; 98 1.1 cgd vcp->ev_proc = proc; 99 1.1 cgd vcp->ev_len = len; 100 1.1 cgd vcp->ev_addr = addr; 101 1.1 cgd if ((vcp->ev_vp = vp) != NULL) 102 1.1 cgd vref(vp); 103 1.1 cgd vcp->ev_offset = offset; 104 1.1 cgd vcp->ev_prot = prot; 105 1.25 tv vcp->ev_flags = flags; 106 1.1 cgd } 107 1.1 cgd 108 1.1 cgd void 109 1.22 thorpej vmcmdset_extend(struct exec_vmcmd_set *evsp) 110 1.1 cgd { 111 1.1 cgd struct exec_vmcmd *nvcp; 112 1.1 cgd u_int ocnt; 113 1.1 cgd 114 1.1 cgd #ifdef DIAGNOSTIC 115 1.1 cgd if (evsp->evs_used < evsp->evs_cnt) 116 1.1 cgd panic("vmcmdset_extend: not necessary"); 117 1.1 cgd #endif 118 1.1 cgd 119 1.1 cgd /* figure out number of entries in new set */ 120 1.45 thorpej if ((ocnt = evsp->evs_cnt) != 0) { 121 1.45 thorpej evsp->evs_cnt += ocnt; 122 1.45 thorpej VMCMD_EVCNT_INCR(extends); 123 1.45 thorpej } else 124 1.45 thorpej evsp->evs_cnt = EXEC_DEFAULT_VMCMD_SETSIZE; 125 1.1 cgd 126 1.1 cgd /* allocate it */ 127 1.59 yamt nvcp = kmem_alloc(evsp->evs_cnt * sizeof(struct exec_vmcmd), KM_SLEEP); 128 1.1 cgd 129 1.1 cgd /* free the old struct, if there was one, and record the new one */ 130 1.1 cgd if (ocnt) { 131 1.23 thorpej memcpy(nvcp, evsp->evs_cmds, 132 1.23 thorpej (ocnt * sizeof(struct exec_vmcmd))); 133 1.59 yamt kmem_free(evsp->evs_cmds, ocnt * sizeof(struct exec_vmcmd)); 134 1.1 cgd } 135 1.1 cgd evsp->evs_cmds = nvcp; 136 1.1 cgd } 137 1.1 cgd 138 1.1 cgd void 139 1.22 thorpej kill_vmcmds(struct exec_vmcmd_set *evsp) 140 1.1 cgd { 141 1.1 cgd struct exec_vmcmd *vcp; 142 1.30 thorpej u_int i; 143 1.1 cgd 144 1.45 thorpej VMCMD_EVCNT_INCR(kills); 145 1.45 thorpej 146 1.1 cgd if (evsp->evs_cnt == 0) 147 1.1 cgd return; 148 1.1 cgd 149 1.1 cgd for (i = 0; i < evsp->evs_used; i++) { 150 1.1 cgd vcp = &evsp->evs_cmds[i]; 151 1.40 chs if (vcp->ev_vp != NULL) 152 1.1 cgd vrele(vcp->ev_vp); 153 1.1 cgd } 154 1.59 yamt kmem_free(evsp->evs_cmds, evsp->evs_cnt * sizeof(struct exec_vmcmd)); 155 1.1 cgd evsp->evs_used = evsp->evs_cnt = 0; 156 1.1 cgd } 157 1.1 cgd 158 1.1 cgd /* 159 1.1 cgd * vmcmd_map_pagedvn(): 160 1.1 cgd * handle vmcmd which specifies that a vnode should be mmap'd. 161 1.1 cgd * appropriate for handling demand-paged text and data segments. 162 1.1 cgd */ 163 1.1 cgd 164 1.78 christos static int 165 1.78 christos vmcmd_get_prot(struct lwp *l, const struct exec_vmcmd *cmd, vm_prot_t *prot, 166 1.78 christos vm_prot_t *maxprot) 167 1.78 christos { 168 1.88 riastrad vm_prot_t extraprot = PROT_MPROTECT_EXTRACT(cmd->ev_prot); 169 1.78 christos 170 1.86 riastrad *prot = cmd->ev_prot & UVM_PROT_ALL; 171 1.86 riastrad *maxprot = PAX_MPROTECT_MAXPROTECT(l, *prot, extraprot, UVM_PROT_ALL); 172 1.78 christos 173 1.78 christos if ((*prot & *maxprot) != *prot) 174 1.90 riastrad return SET_ERROR(EACCES); 175 1.78 christos return PAX_MPROTECT_VALIDATE(l, *prot); 176 1.78 christos } 177 1.78 christos 178 1.1 cgd int 179 1.46 christos vmcmd_map_pagedvn(struct lwp *l, struct exec_vmcmd *cmd) 180 1.1 cgd { 181 1.27 chs struct uvm_object *uobj; 182 1.50 chs struct vnode *vp = cmd->ev_vp; 183 1.46 christos struct proc *p = l->l_proc; 184 1.27 chs int error; 185 1.48 elad vm_prot_t prot, maxprot; 186 1.27 chs 187 1.55 ad KASSERT(vp->v_iflag & VI_TEXT); 188 1.11 mrg 189 1.11 mrg /* 190 1.11 mrg * map the vnode in using uvm_map. 191 1.11 mrg */ 192 1.11 mrg 193 1.71 maxv if (cmd->ev_len == 0) 194 1.71 maxv return 0; 195 1.71 maxv if (cmd->ev_offset & PAGE_MASK) 196 1.90 riastrad return SET_ERROR(EINVAL); 197 1.11 mrg if (cmd->ev_addr & PAGE_MASK) 198 1.90 riastrad return SET_ERROR(EINVAL); 199 1.18 chs if (cmd->ev_len & PAGE_MASK) 200 1.90 riastrad return SET_ERROR(EINVAL); 201 1.11 mrg 202 1.78 christos if ((error = vmcmd_get_prot(l, cmd, &prot, &maxprot)) != 0) 203 1.77 joerg return error; 204 1.54 pooka 205 1.11 mrg /* 206 1.53 pooka * check the file system's opinion about mmapping the file 207 1.11 mrg */ 208 1.11 mrg 209 1.60 ad error = VOP_MMAP(vp, prot, l->l_cred); 210 1.53 pooka if (error) 211 1.53 pooka return error; 212 1.50 chs 213 1.55 ad if ((vp->v_vflag & VV_MAPPED) == 0) { 214 1.50 chs vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 215 1.55 ad vp->v_vflag |= VV_MAPPED; 216 1.64 hannken VOP_UNLOCK(vp); 217 1.50 chs } 218 1.11 mrg 219 1.11 mrg /* 220 1.53 pooka * do the map, reference the object for this map entry 221 1.11 mrg */ 222 1.53 pooka uobj = &vp->v_uobj; 223 1.53 pooka vref(vp); 224 1.11 mrg 225 1.43 perry error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len, 226 1.24 thorpej uobj, cmd->ev_offset, 0, 227 1.48 elad UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY, 228 1.34 atatat UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED)); 229 1.27 chs if (error) { 230 1.27 chs uobj->pgops->pgo_detach(uobj); 231 1.27 chs } 232 1.27 chs return error; 233 1.1 cgd } 234 1.1 cgd 235 1.1 cgd /* 236 1.1 cgd * vmcmd_map_readvn(): 237 1.1 cgd * handle vmcmd which specifies that a vnode should be read from. 238 1.1 cgd * appropriate for non-demand-paged text/data segments, i.e. impure 239 1.1 cgd * objects (a la OMAGIC and NMAGIC). 240 1.1 cgd */ 241 1.1 cgd int 242 1.46 christos vmcmd_map_readvn(struct lwp *l, struct exec_vmcmd *cmd) 243 1.1 cgd { 244 1.46 christos struct proc *p = l->l_proc; 245 1.1 cgd int error; 246 1.17 ws long diff; 247 1.1 cgd 248 1.11 mrg if (cmd->ev_len == 0) 249 1.27 chs return 0; 250 1.27 chs 251 1.17 ws diff = cmd->ev_addr - trunc_page(cmd->ev_addr); 252 1.17 ws cmd->ev_addr -= diff; /* required by uvm_map */ 253 1.17 ws cmd->ev_offset -= diff; 254 1.17 ws cmd->ev_len += diff; 255 1.17 ws 256 1.43 perry error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, 257 1.24 thorpej round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0, 258 1.13 chuck UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY, 259 1.11 mrg UVM_ADV_NORMAL, 260 1.34 atatat UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW)); 261 1.11 mrg 262 1.1 cgd if (error) 263 1.1 cgd return error; 264 1.19 matt 265 1.46 christos return vmcmd_readvn(l, cmd); 266 1.19 matt } 267 1.19 matt 268 1.19 matt int 269 1.46 christos vmcmd_readvn(struct lwp *l, struct exec_vmcmd *cmd) 270 1.19 matt { 271 1.46 christos struct proc *p = l->l_proc; 272 1.19 matt int error; 273 1.48 elad vm_prot_t prot, maxprot; 274 1.1 cgd 275 1.52 christos error = vn_rdwr(UIO_READ, cmd->ev_vp, (void *)cmd->ev_addr, 276 1.10 cgd cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT, 277 1.49 ad l->l_cred, NULL, l); 278 1.1 cgd if (error) 279 1.1 cgd return error; 280 1.32 matt 281 1.78 christos if ((error = vmcmd_get_prot(l, cmd, &prot, &maxprot)) != 0) 282 1.77 joerg return error; 283 1.48 elad 284 1.32 matt #ifdef PMAP_NEED_PROCWR 285 1.32 matt /* 286 1.32 matt * we had to write the process, make sure the pages are synched 287 1.32 matt * with the instruction cache. 288 1.32 matt */ 289 1.48 elad if (prot & VM_PROT_EXECUTE) 290 1.32 matt pmap_procwr(p, cmd->ev_addr, cmd->ev_len); 291 1.32 matt #endif 292 1.1 cgd 293 1.48 elad /* 294 1.48 elad * we had to map in the area at PROT_ALL so that vn_rdwr() 295 1.48 elad * could write to it. however, the caller seems to want 296 1.48 elad * it mapped read-only, so now we are going to have to call 297 1.48 elad * uvm_map_protect() to fix up the protection. ICK. 298 1.48 elad */ 299 1.48 elad if (maxprot != VM_PROT_ALL) { 300 1.48 elad error = uvm_map_protect(&p->p_vmspace->vm_map, 301 1.48 elad trunc_page(cmd->ev_addr), 302 1.48 elad round_page(cmd->ev_addr + cmd->ev_len), 303 1.51 thorpej maxprot, true); 304 1.48 elad if (error) 305 1.71 maxv return error; 306 1.48 elad } 307 1.27 chs 308 1.48 elad if (prot != maxprot) { 309 1.48 elad error = uvm_map_protect(&p->p_vmspace->vm_map, 310 1.13 chuck trunc_page(cmd->ev_addr), 311 1.13 chuck round_page(cmd->ev_addr + cmd->ev_len), 312 1.51 thorpej prot, false); 313 1.48 elad if (error) 314 1.71 maxv return error; 315 1.13 chuck } 316 1.48 elad 317 1.27 chs return 0; 318 1.1 cgd } 319 1.1 cgd 320 1.1 cgd /* 321 1.1 cgd * vmcmd_map_zero(): 322 1.1 cgd * handle vmcmd which specifies a zero-filled address space region. The 323 1.1 cgd * address range must be first allocated, then protected appropriately. 324 1.1 cgd */ 325 1.1 cgd 326 1.1 cgd int 327 1.46 christos vmcmd_map_zero(struct lwp *l, struct exec_vmcmd *cmd) 328 1.1 cgd { 329 1.46 christos struct proc *p = l->l_proc; 330 1.1 cgd int error; 331 1.17 ws long diff; 332 1.48 elad vm_prot_t prot, maxprot; 333 1.1 cgd 334 1.17 ws diff = cmd->ev_addr - trunc_page(cmd->ev_addr); 335 1.17 ws cmd->ev_addr -= diff; /* required by uvm_map */ 336 1.17 ws cmd->ev_len += diff; 337 1.17 ws 338 1.78 christos if ((error = vmcmd_get_prot(l, cmd, &prot, &maxprot)) != 0) 339 1.77 joerg return error; 340 1.48 elad 341 1.43 perry error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, 342 1.24 thorpej round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0, 343 1.48 elad UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY, 344 1.11 mrg UVM_ADV_NORMAL, 345 1.34 atatat UVM_FLAG_FIXED|UVM_FLAG_COPYONW)); 346 1.62 mrg if (cmd->ev_flags & VMCMD_STACK) 347 1.62 mrg curproc->p_vmspace->vm_issize += atop(round_page(cmd->ev_len)); 348 1.27 chs return error; 349 1.1 cgd } 350 1.28 christos 351 1.28 christos /* 352 1.83 ad * exec_read(): 353 1.28 christos * 354 1.28 christos * Read from vnode into buffer at offset. 355 1.28 christos */ 356 1.28 christos int 357 1.83 ad exec_read(struct lwp *l, struct vnode *vp, u_long off, void *bf, size_t size, 358 1.83 ad int ioflg) 359 1.28 christos { 360 1.28 christos int error; 361 1.28 christos size_t resid; 362 1.28 christos 363 1.83 ad KASSERT((ioflg & IO_NODELOCKED) == 0 || VOP_ISLOCKED(vp) != LK_NONE); 364 1.83 ad 365 1.44 christos if ((error = vn_rdwr(UIO_READ, vp, bf, size, off, UIO_SYSSPACE, 366 1.83 ad ioflg, l->l_cred, &resid, NULL)) != 0) 367 1.28 christos return error; 368 1.28 christos /* 369 1.28 christos * See if we got all of it 370 1.28 christos */ 371 1.28 christos if (resid != 0) 372 1.90 riastrad return SET_ERROR(ENOEXEC); 373 1.28 christos return 0; 374 1.28 christos } 375 1.28 christos 376 1.38 christos /* 377 1.38 christos * exec_setup_stack(): Set up the stack segment for an elf 378 1.38 christos * executable. 379 1.38 christos * 380 1.38 christos * Note that the ep_ssize parameter must be set to be the current stack 381 1.38 christos * limit; this is adjusted in the body of execve() to yield the 382 1.38 christos * appropriate stack segment usage once the argument length is 383 1.38 christos * calculated. 384 1.38 christos * 385 1.38 christos * This function returns an int for uniformity with other (future) formats' 386 1.38 christos * stack setup functions. They might have errors to return. 387 1.38 christos */ 388 1.38 christos 389 1.38 christos int 390 1.46 christos exec_setup_stack(struct lwp *l, struct exec_package *epp) 391 1.38 christos { 392 1.63 matt vsize_t max_stack_size; 393 1.63 matt vaddr_t access_linear_min; 394 1.63 matt vsize_t access_size; 395 1.63 matt vaddr_t noaccess_linear_min; 396 1.63 matt vsize_t noaccess_size; 397 1.38 christos 398 1.38 christos #ifndef USRSTACK32 399 1.38 christos #define USRSTACK32 (0x00000000ffffffffL&~PGOFSET) 400 1.38 christos #endif 401 1.68 christos #ifndef MAXSSIZ32 402 1.68 christos #define MAXSSIZ32 (MAXSSIZ >> 2) 403 1.68 christos #endif 404 1.38 christos 405 1.38 christos if (epp->ep_flags & EXEC_32) { 406 1.38 christos epp->ep_minsaddr = USRSTACK32; 407 1.68 christos max_stack_size = MAXSSIZ32; 408 1.38 christos } else { 409 1.38 christos epp->ep_minsaddr = USRSTACK; 410 1.38 christos max_stack_size = MAXSSIZ; 411 1.38 christos } 412 1.68 christos 413 1.75 christos DPRINTF(("ep_minsaddr=%#jx max_stack_size=%#jx\n", 414 1.75 christos (uintmax_t)epp->ep_minsaddr, (uintmax_t)max_stack_size)); 415 1.57 christos 416 1.72 maxv pax_aslr_stack(epp, &max_stack_size); 417 1.57 christos 418 1.75 christos DPRINTF(("[RLIMIT_STACK].lim_cur=%#jx max_stack_size=%#jx\n", 419 1.75 christos (uintmax_t)l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur, 420 1.75 christos (uintmax_t)max_stack_size)); 421 1.75 christos epp->ep_ssize = MIN(l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur, 422 1.75 christos max_stack_size); 423 1.75 christos 424 1.57 christos l->l_proc->p_stackbase = epp->ep_minsaddr; 425 1.91 riastrad 426 1.63 matt epp->ep_maxsaddr = (vaddr_t)STACK_GROW(epp->ep_minsaddr, 427 1.75 christos max_stack_size); 428 1.38 christos 429 1.75 christos DPRINTF(("ep_ssize=%#jx ep_minsaddr=%#jx ep_maxsaddr=%#jx\n", 430 1.75 christos (uintmax_t)epp->ep_ssize, (uintmax_t)epp->ep_minsaddr, 431 1.75 christos (uintmax_t)epp->ep_maxsaddr)); 432 1.68 christos 433 1.38 christos /* 434 1.38 christos * set up commands for stack. note that this takes *two*, one to 435 1.38 christos * map the part of the stack which we can access, and one to map 436 1.38 christos * the part which we can't. 437 1.38 christos * 438 1.38 christos * arguably, it could be made into one, but that would require the 439 1.38 christos * addition of another mapping proc, which is unnecessary 440 1.38 christos */ 441 1.38 christos access_size = epp->ep_ssize; 442 1.63 matt access_linear_min = (vaddr_t)STACK_ALLOC(epp->ep_minsaddr, access_size); 443 1.38 christos noaccess_size = max_stack_size - access_size; 444 1.63 matt noaccess_linear_min = (vaddr_t)STACK_ALLOC(STACK_GROW(epp->ep_minsaddr, 445 1.38 christos access_size), noaccess_size); 446 1.68 christos 447 1.75 christos DPRINTF(("access_size=%#jx, access_linear_min=%#jx, " 448 1.75 christos "noaccess_size=%#jx, noaccess_linear_min=%#jx\n", 449 1.75 christos (uintmax_t)access_size, (uintmax_t)access_linear_min, 450 1.75 christos (uintmax_t)noaccess_size, (uintmax_t)noaccess_linear_min)); 451 1.68 christos 452 1.81 joerg if (user_stack_guard_size > 0) { 453 1.81 joerg #ifdef __MACHINE_STACK_GROWS_UP 454 1.81 joerg vsize_t guard_size = MIN(VM_MAXUSER_ADDRESS - epp->ep_maxsaddr, user_stack_guard_size); 455 1.81 joerg if (guard_size > 0) 456 1.81 joerg NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, guard_size, 457 1.81 joerg epp->ep_maxsaddr, NULL, 0, VM_PROT_NONE); 458 1.81 joerg #else 459 1.81 joerg NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, user_stack_guard_size, 460 1.81 joerg epp->ep_maxsaddr - user_stack_guard_size, NULL, 0, VM_PROT_NONE); 461 1.81 joerg #endif 462 1.81 joerg } 463 1.65 christos if (noaccess_size > 0 && noaccess_size <= MAXSSIZ) { 464 1.62 mrg NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, noaccess_size, 465 1.86 riastrad noaccess_linear_min, NULL, 0, 466 1.86 riastrad VM_PROT_NONE | PROT_MPROTECT(VM_PROT_READ | VM_PROT_WRITE), 467 1.86 riastrad VMCMD_STACK); 468 1.39 yamt } 469 1.85 riastrad KASSERT(access_size > 0); 470 1.85 riastrad KASSERT(access_size <= MAXSSIZ); 471 1.62 mrg NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, access_size, 472 1.62 mrg access_linear_min, NULL, 0, VM_PROT_READ | VM_PROT_WRITE, 473 1.62 mrg VMCMD_STACK); 474 1.38 christos 475 1.38 christos return 0; 476 1.38 christos } 477