Home | History | Annotate | Line # | Download | only in kern
exec_subr.c revision 1.84
      1  1.84        ad /*	$NetBSD: exec_subr.c,v 1.84 2020/04/13 19:23:18 ad Exp $	*/
      2   1.8       cgd 
      3   1.1       cgd /*
      4  1.10       cgd  * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
      5   1.1       cgd  * All rights reserved.
      6   1.1       cgd  *
      7   1.1       cgd  * Redistribution and use in source and binary forms, with or without
      8   1.1       cgd  * modification, are permitted provided that the following conditions
      9   1.1       cgd  * are met:
     10   1.1       cgd  * 1. Redistributions of source code must retain the above copyright
     11   1.1       cgd  *    notice, this list of conditions and the following disclaimer.
     12   1.1       cgd  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.1       cgd  *    notice, this list of conditions and the following disclaimer in the
     14   1.1       cgd  *    documentation and/or other materials provided with the distribution.
     15   1.1       cgd  * 3. All advertising materials mentioning features or use of this software
     16   1.1       cgd  *    must display the following acknowledgement:
     17   1.1       cgd  *      This product includes software developed by Christopher G. Demetriou.
     18   1.1       cgd  * 4. The name of the author may not be used to endorse or promote products
     19   1.5       jtc  *    derived from this software without specific prior written permission
     20   1.1       cgd  *
     21   1.1       cgd  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22   1.1       cgd  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     23   1.1       cgd  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     24   1.1       cgd  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     25   1.1       cgd  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     26   1.1       cgd  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     27   1.1       cgd  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28   1.1       cgd  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     29   1.1       cgd  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     30   1.1       cgd  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     31   1.1       cgd  */
     32  1.29     lukem 
     33  1.29     lukem #include <sys/cdefs.h>
     34  1.84        ad __KERNEL_RCSID(0, "$NetBSD: exec_subr.c,v 1.84 2020/04/13 19:23:18 ad Exp $");
     35  1.48      elad 
     36  1.48      elad #include "opt_pax.h"
     37  1.12       mrg 
     38   1.1       cgd #include <sys/param.h>
     39   1.1       cgd #include <sys/systm.h>
     40   1.1       cgd #include <sys/proc.h>
     41  1.59      yamt #include <sys/kmem.h>
     42   1.1       cgd #include <sys/vnode.h>
     43   1.4       cgd #include <sys/filedesc.h>
     44   1.1       cgd #include <sys/exec.h>
     45   1.1       cgd #include <sys/mman.h>
     46  1.38  christos #include <sys/resourcevar.h>
     47  1.45   thorpej #include <sys/device.h>
     48  1.48      elad #include <sys/pax.h>
     49  1.48      elad 
     50  1.67  uebayasi #include <uvm/uvm_extern.h>
     51  1.11       mrg 
     52  1.45   thorpej #define	VMCMD_EVCNT_DECL(name)					\
     53  1.45   thorpej static struct evcnt vmcmd_ev_##name =				\
     54  1.45   thorpej     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "vmcmd", #name);	\
     55  1.45   thorpej EVCNT_ATTACH_STATIC(vmcmd_ev_##name)
     56  1.45   thorpej 
     57  1.45   thorpej #define	VMCMD_EVCNT_INCR(name)					\
     58  1.45   thorpej     vmcmd_ev_##name.ev_count++
     59  1.45   thorpej 
     60  1.45   thorpej VMCMD_EVCNT_DECL(calls);
     61  1.45   thorpej VMCMD_EVCNT_DECL(extends);
     62  1.45   thorpej VMCMD_EVCNT_DECL(kills);
     63  1.10       cgd 
     64  1.68  christos #ifdef DEBUG_STACK
     65  1.68  christos #define DPRINTF(a) uprintf a
     66  1.68  christos #else
     67  1.68  christos #define DPRINTF(a)
     68  1.68  christos #endif
     69  1.68  christos 
     70  1.82     joerg unsigned int user_stack_guard_size = 1024 * 1024;
     71  1.82     joerg unsigned int user_thread_stack_guard_size = 64 * 1024;
     72  1.81     joerg 
     73   1.1       cgd /*
     74   1.1       cgd  * new_vmcmd():
     75   1.1       cgd  *	create a new vmcmd structure and fill in its fields based
     76   1.1       cgd  *	on function call arguments.  make sure objects ref'd by
     77   1.1       cgd  *	the vmcmd are 'held'.
     78   1.1       cgd  */
     79   1.1       cgd 
     80   1.1       cgd void
     81  1.22   thorpej new_vmcmd(struct exec_vmcmd_set *evsp,
     82  1.46  christos     int (*proc)(struct lwp * l, struct exec_vmcmd *),
     83  1.63      matt     vsize_t len, vaddr_t addr, struct vnode *vp, u_long offset,
     84  1.22   thorpej     u_int prot, int flags)
     85   1.1       cgd {
     86  1.71      maxv 	struct exec_vmcmd *vcp;
     87   1.1       cgd 
     88  1.45   thorpej 	VMCMD_EVCNT_INCR(calls);
     89  1.66      yamt 	KASSERT(proc != vmcmd_map_pagedvn || (vp->v_iflag & VI_TEXT));
     90  1.84        ad 	KASSERT(vp == NULL || vrefcnt(vp) > 0);
     91  1.45   thorpej 
     92   1.1       cgd 	if (evsp->evs_used >= evsp->evs_cnt)
     93   1.1       cgd 		vmcmdset_extend(evsp);
     94   1.1       cgd 	vcp = &evsp->evs_cmds[evsp->evs_used++];
     95   1.1       cgd 	vcp->ev_proc = proc;
     96   1.1       cgd 	vcp->ev_len = len;
     97   1.1       cgd 	vcp->ev_addr = addr;
     98   1.1       cgd 	if ((vcp->ev_vp = vp) != NULL)
     99   1.1       cgd 		vref(vp);
    100   1.1       cgd 	vcp->ev_offset = offset;
    101   1.1       cgd 	vcp->ev_prot = prot;
    102  1.25        tv 	vcp->ev_flags = flags;
    103   1.1       cgd }
    104   1.1       cgd 
    105   1.1       cgd void
    106  1.22   thorpej vmcmdset_extend(struct exec_vmcmd_set *evsp)
    107   1.1       cgd {
    108   1.1       cgd 	struct exec_vmcmd *nvcp;
    109   1.1       cgd 	u_int ocnt;
    110   1.1       cgd 
    111   1.1       cgd #ifdef DIAGNOSTIC
    112   1.1       cgd 	if (evsp->evs_used < evsp->evs_cnt)
    113   1.1       cgd 		panic("vmcmdset_extend: not necessary");
    114   1.1       cgd #endif
    115   1.1       cgd 
    116   1.1       cgd 	/* figure out number of entries in new set */
    117  1.45   thorpej 	if ((ocnt = evsp->evs_cnt) != 0) {
    118  1.45   thorpej 		evsp->evs_cnt += ocnt;
    119  1.45   thorpej 		VMCMD_EVCNT_INCR(extends);
    120  1.45   thorpej 	} else
    121  1.45   thorpej 		evsp->evs_cnt = EXEC_DEFAULT_VMCMD_SETSIZE;
    122   1.1       cgd 
    123   1.1       cgd 	/* allocate it */
    124  1.59      yamt 	nvcp = kmem_alloc(evsp->evs_cnt * sizeof(struct exec_vmcmd), KM_SLEEP);
    125   1.1       cgd 
    126   1.1       cgd 	/* free the old struct, if there was one, and record the new one */
    127   1.1       cgd 	if (ocnt) {
    128  1.23   thorpej 		memcpy(nvcp, evsp->evs_cmds,
    129  1.23   thorpej 		    (ocnt * sizeof(struct exec_vmcmd)));
    130  1.59      yamt 		kmem_free(evsp->evs_cmds, ocnt * sizeof(struct exec_vmcmd));
    131   1.1       cgd 	}
    132   1.1       cgd 	evsp->evs_cmds = nvcp;
    133   1.1       cgd }
    134   1.1       cgd 
    135   1.1       cgd void
    136  1.22   thorpej kill_vmcmds(struct exec_vmcmd_set *evsp)
    137   1.1       cgd {
    138   1.1       cgd 	struct exec_vmcmd *vcp;
    139  1.30   thorpej 	u_int i;
    140   1.1       cgd 
    141  1.45   thorpej 	VMCMD_EVCNT_INCR(kills);
    142  1.45   thorpej 
    143   1.1       cgd 	if (evsp->evs_cnt == 0)
    144   1.1       cgd 		return;
    145   1.1       cgd 
    146   1.1       cgd 	for (i = 0; i < evsp->evs_used; i++) {
    147   1.1       cgd 		vcp = &evsp->evs_cmds[i];
    148  1.40       chs 		if (vcp->ev_vp != NULL)
    149   1.1       cgd 			vrele(vcp->ev_vp);
    150   1.1       cgd 	}
    151  1.59      yamt 	kmem_free(evsp->evs_cmds, evsp->evs_cnt * sizeof(struct exec_vmcmd));
    152   1.1       cgd 	evsp->evs_used = evsp->evs_cnt = 0;
    153   1.1       cgd }
    154   1.1       cgd 
    155   1.1       cgd /*
    156   1.1       cgd  * vmcmd_map_pagedvn():
    157   1.1       cgd  *	handle vmcmd which specifies that a vnode should be mmap'd.
    158   1.1       cgd  *	appropriate for handling demand-paged text and data segments.
    159   1.1       cgd  */
    160   1.1       cgd 
    161  1.78  christos static int
    162  1.78  christos vmcmd_get_prot(struct lwp *l, const struct exec_vmcmd *cmd, vm_prot_t *prot,
    163  1.78  christos     vm_prot_t *maxprot)
    164  1.78  christos {
    165  1.78  christos 
    166  1.78  christos 	*prot = cmd->ev_prot;
    167  1.78  christos 	*maxprot = PAX_MPROTECT_MAXPROTECT(l, *prot, 0, UVM_PROT_ALL);
    168  1.78  christos 
    169  1.78  christos 	if ((*prot & *maxprot) != *prot)
    170  1.78  christos 		return EACCES;
    171  1.78  christos 	return PAX_MPROTECT_VALIDATE(l, *prot);
    172  1.78  christos }
    173  1.78  christos 
    174   1.1       cgd int
    175  1.46  christos vmcmd_map_pagedvn(struct lwp *l, struct exec_vmcmd *cmd)
    176   1.1       cgd {
    177  1.27       chs 	struct uvm_object *uobj;
    178  1.50       chs 	struct vnode *vp = cmd->ev_vp;
    179  1.46  christos 	struct proc *p = l->l_proc;
    180  1.27       chs 	int error;
    181  1.48      elad 	vm_prot_t prot, maxprot;
    182  1.27       chs 
    183  1.55        ad 	KASSERT(vp->v_iflag & VI_TEXT);
    184  1.11       mrg 
    185  1.11       mrg 	/*
    186  1.11       mrg 	 * map the vnode in using uvm_map.
    187  1.11       mrg 	 */
    188  1.11       mrg 
    189  1.71      maxv 	if (cmd->ev_len == 0)
    190  1.71      maxv 		return 0;
    191  1.71      maxv 	if (cmd->ev_offset & PAGE_MASK)
    192  1.71      maxv 		return EINVAL;
    193  1.11       mrg 	if (cmd->ev_addr & PAGE_MASK)
    194  1.71      maxv 		return EINVAL;
    195  1.18       chs 	if (cmd->ev_len & PAGE_MASK)
    196  1.71      maxv 		return EINVAL;
    197  1.11       mrg 
    198  1.78  christos 	if ((error = vmcmd_get_prot(l, cmd, &prot, &maxprot)) != 0)
    199  1.77     joerg 		return error;
    200  1.54     pooka 
    201  1.11       mrg 	/*
    202  1.53     pooka 	 * check the file system's opinion about mmapping the file
    203  1.11       mrg 	 */
    204  1.11       mrg 
    205  1.60        ad 	error = VOP_MMAP(vp, prot, l->l_cred);
    206  1.53     pooka 	if (error)
    207  1.53     pooka 		return error;
    208  1.50       chs 
    209  1.55        ad 	if ((vp->v_vflag & VV_MAPPED) == 0) {
    210  1.50       chs 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    211  1.55        ad 		vp->v_vflag |= VV_MAPPED;
    212  1.64   hannken 		VOP_UNLOCK(vp);
    213  1.50       chs 	}
    214  1.11       mrg 
    215  1.11       mrg 	/*
    216  1.53     pooka 	 * do the map, reference the object for this map entry
    217  1.11       mrg 	 */
    218  1.53     pooka 	uobj = &vp->v_uobj;
    219  1.53     pooka 	vref(vp);
    220  1.11       mrg 
    221  1.43     perry 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
    222  1.24   thorpej 		uobj, cmd->ev_offset, 0,
    223  1.48      elad 		UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
    224  1.34    atatat 			UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
    225  1.27       chs 	if (error) {
    226  1.27       chs 		uobj->pgops->pgo_detach(uobj);
    227  1.27       chs 	}
    228  1.27       chs 	return error;
    229   1.1       cgd }
    230   1.1       cgd 
    231   1.1       cgd /*
    232   1.1       cgd  * vmcmd_map_readvn():
    233   1.1       cgd  *	handle vmcmd which specifies that a vnode should be read from.
    234   1.1       cgd  *	appropriate for non-demand-paged text/data segments, i.e. impure
    235   1.1       cgd  *	objects (a la OMAGIC and NMAGIC).
    236   1.1       cgd  */
    237   1.1       cgd int
    238  1.46  christos vmcmd_map_readvn(struct lwp *l, struct exec_vmcmd *cmd)
    239   1.1       cgd {
    240  1.46  christos 	struct proc *p = l->l_proc;
    241   1.1       cgd 	int error;
    242  1.17        ws 	long diff;
    243   1.1       cgd 
    244  1.11       mrg 	if (cmd->ev_len == 0)
    245  1.27       chs 		return 0;
    246  1.27       chs 
    247  1.17        ws 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    248  1.17        ws 	cmd->ev_addr -= diff;			/* required by uvm_map */
    249  1.17        ws 	cmd->ev_offset -= diff;
    250  1.17        ws 	cmd->ev_len += diff;
    251  1.17        ws 
    252  1.43     perry 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    253  1.24   thorpej 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
    254  1.13     chuck 			UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
    255  1.11       mrg 			UVM_ADV_NORMAL,
    256  1.34    atatat 			UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
    257  1.11       mrg 
    258   1.1       cgd 	if (error)
    259   1.1       cgd 		return error;
    260  1.19      matt 
    261  1.46  christos 	return vmcmd_readvn(l, cmd);
    262  1.19      matt }
    263  1.19      matt 
    264  1.19      matt int
    265  1.46  christos vmcmd_readvn(struct lwp *l, struct exec_vmcmd *cmd)
    266  1.19      matt {
    267  1.46  christos 	struct proc *p = l->l_proc;
    268  1.19      matt 	int error;
    269  1.48      elad 	vm_prot_t prot, maxprot;
    270   1.1       cgd 
    271  1.52  christos 	error = vn_rdwr(UIO_READ, cmd->ev_vp, (void *)cmd->ev_addr,
    272  1.10       cgd 	    cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
    273  1.49        ad 	    l->l_cred, NULL, l);
    274   1.1       cgd 	if (error)
    275   1.1       cgd 		return error;
    276  1.32      matt 
    277  1.78  christos 	if ((error = vmcmd_get_prot(l, cmd, &prot, &maxprot)) != 0)
    278  1.77     joerg 		return error;
    279  1.48      elad 
    280  1.32      matt #ifdef PMAP_NEED_PROCWR
    281  1.32      matt 	/*
    282  1.32      matt 	 * we had to write the process, make sure the pages are synched
    283  1.32      matt 	 * with the instruction cache.
    284  1.32      matt 	 */
    285  1.48      elad 	if (prot & VM_PROT_EXECUTE)
    286  1.32      matt 		pmap_procwr(p, cmd->ev_addr, cmd->ev_len);
    287  1.32      matt #endif
    288   1.1       cgd 
    289  1.48      elad 	/*
    290  1.48      elad 	 * we had to map in the area at PROT_ALL so that vn_rdwr()
    291  1.48      elad 	 * could write to it.   however, the caller seems to want
    292  1.48      elad 	 * it mapped read-only, so now we are going to have to call
    293  1.48      elad 	 * uvm_map_protect() to fix up the protection.  ICK.
    294  1.48      elad 	 */
    295  1.48      elad 	if (maxprot != VM_PROT_ALL) {
    296  1.48      elad 		error = uvm_map_protect(&p->p_vmspace->vm_map,
    297  1.48      elad 				trunc_page(cmd->ev_addr),
    298  1.48      elad 				round_page(cmd->ev_addr + cmd->ev_len),
    299  1.51   thorpej 				maxprot, true);
    300  1.48      elad 		if (error)
    301  1.71      maxv 			return error;
    302  1.48      elad 	}
    303  1.27       chs 
    304  1.48      elad 	if (prot != maxprot) {
    305  1.48      elad 		error = uvm_map_protect(&p->p_vmspace->vm_map,
    306  1.13     chuck 				trunc_page(cmd->ev_addr),
    307  1.13     chuck 				round_page(cmd->ev_addr + cmd->ev_len),
    308  1.51   thorpej 				prot, false);
    309  1.48      elad 		if (error)
    310  1.71      maxv 			return error;
    311  1.13     chuck 	}
    312  1.48      elad 
    313  1.27       chs 	return 0;
    314   1.1       cgd }
    315   1.1       cgd 
    316   1.1       cgd /*
    317   1.1       cgd  * vmcmd_map_zero():
    318   1.1       cgd  *	handle vmcmd which specifies a zero-filled address space region.  The
    319   1.1       cgd  *	address range must be first allocated, then protected appropriately.
    320   1.1       cgd  */
    321   1.1       cgd 
    322   1.1       cgd int
    323  1.46  christos vmcmd_map_zero(struct lwp *l, struct exec_vmcmd *cmd)
    324   1.1       cgd {
    325  1.46  christos 	struct proc *p = l->l_proc;
    326   1.1       cgd 	int error;
    327  1.17        ws 	long diff;
    328  1.48      elad 	vm_prot_t prot, maxprot;
    329   1.1       cgd 
    330  1.17        ws 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    331  1.17        ws 	cmd->ev_addr -= diff;			/* required by uvm_map */
    332  1.17        ws 	cmd->ev_len += diff;
    333  1.17        ws 
    334  1.78  christos 	if ((error = vmcmd_get_prot(l, cmd, &prot, &maxprot)) != 0)
    335  1.77     joerg 		return error;
    336  1.48      elad 
    337  1.43     perry 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    338  1.24   thorpej 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
    339  1.48      elad 			UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
    340  1.11       mrg 			UVM_ADV_NORMAL,
    341  1.34    atatat 			UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
    342  1.62       mrg 	if (cmd->ev_flags & VMCMD_STACK)
    343  1.62       mrg 		curproc->p_vmspace->vm_issize += atop(round_page(cmd->ev_len));
    344  1.27       chs 	return error;
    345   1.1       cgd }
    346  1.28  christos 
    347  1.28  christos /*
    348  1.83        ad  * exec_read():
    349  1.28  christos  *
    350  1.28  christos  *	Read from vnode into buffer at offset.
    351  1.28  christos  */
    352  1.28  christos int
    353  1.83        ad exec_read(struct lwp *l, struct vnode *vp, u_long off, void *bf, size_t size,
    354  1.83        ad     int ioflg)
    355  1.28  christos {
    356  1.28  christos 	int error;
    357  1.28  christos 	size_t resid;
    358  1.28  christos 
    359  1.83        ad 	KASSERT((ioflg & IO_NODELOCKED) == 0 || VOP_ISLOCKED(vp) != LK_NONE);
    360  1.83        ad 
    361  1.44  christos 	if ((error = vn_rdwr(UIO_READ, vp, bf, size, off, UIO_SYSSPACE,
    362  1.83        ad 	    ioflg, l->l_cred, &resid, NULL)) != 0)
    363  1.28  christos 		return error;
    364  1.28  christos 	/*
    365  1.28  christos 	 * See if we got all of it
    366  1.28  christos 	 */
    367  1.28  christos 	if (resid != 0)
    368  1.28  christos 		return ENOEXEC;
    369  1.28  christos 	return 0;
    370  1.28  christos }
    371  1.28  christos 
    372  1.38  christos /*
    373  1.38  christos  * exec_setup_stack(): Set up the stack segment for an elf
    374  1.38  christos  * executable.
    375  1.38  christos  *
    376  1.38  christos  * Note that the ep_ssize parameter must be set to be the current stack
    377  1.38  christos  * limit; this is adjusted in the body of execve() to yield the
    378  1.38  christos  * appropriate stack segment usage once the argument length is
    379  1.38  christos  * calculated.
    380  1.38  christos  *
    381  1.38  christos  * This function returns an int for uniformity with other (future) formats'
    382  1.38  christos  * stack setup functions.  They might have errors to return.
    383  1.38  christos  */
    384  1.38  christos 
    385  1.38  christos int
    386  1.46  christos exec_setup_stack(struct lwp *l, struct exec_package *epp)
    387  1.38  christos {
    388  1.63      matt 	vsize_t max_stack_size;
    389  1.63      matt 	vaddr_t access_linear_min;
    390  1.63      matt 	vsize_t access_size;
    391  1.63      matt 	vaddr_t noaccess_linear_min;
    392  1.63      matt 	vsize_t noaccess_size;
    393  1.38  christos 
    394  1.38  christos #ifndef	USRSTACK32
    395  1.38  christos #define USRSTACK32	(0x00000000ffffffffL&~PGOFSET)
    396  1.38  christos #endif
    397  1.68  christos #ifndef MAXSSIZ32
    398  1.68  christos #define MAXSSIZ32	(MAXSSIZ >> 2)
    399  1.68  christos #endif
    400  1.38  christos 
    401  1.38  christos 	if (epp->ep_flags & EXEC_32) {
    402  1.38  christos 		epp->ep_minsaddr = USRSTACK32;
    403  1.68  christos 		max_stack_size = MAXSSIZ32;
    404  1.38  christos 	} else {
    405  1.38  christos 		epp->ep_minsaddr = USRSTACK;
    406  1.38  christos 		max_stack_size = MAXSSIZ;
    407  1.38  christos 	}
    408  1.68  christos 
    409  1.75  christos 	DPRINTF(("ep_minsaddr=%#jx max_stack_size=%#jx\n",
    410  1.75  christos 	    (uintmax_t)epp->ep_minsaddr, (uintmax_t)max_stack_size));
    411  1.57  christos 
    412  1.72      maxv 	pax_aslr_stack(epp, &max_stack_size);
    413  1.57  christos 
    414  1.75  christos 	DPRINTF(("[RLIMIT_STACK].lim_cur=%#jx max_stack_size=%#jx\n",
    415  1.75  christos 	    (uintmax_t)l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur,
    416  1.75  christos 	    (uintmax_t)max_stack_size));
    417  1.75  christos 	epp->ep_ssize = MIN(l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur,
    418  1.75  christos 	    max_stack_size);
    419  1.75  christos 
    420  1.57  christos 	l->l_proc->p_stackbase = epp->ep_minsaddr;
    421  1.57  christos 
    422  1.63      matt 	epp->ep_maxsaddr = (vaddr_t)STACK_GROW(epp->ep_minsaddr,
    423  1.75  christos 	    max_stack_size);
    424  1.38  christos 
    425  1.75  christos 	DPRINTF(("ep_ssize=%#jx ep_minsaddr=%#jx ep_maxsaddr=%#jx\n",
    426  1.75  christos 	    (uintmax_t)epp->ep_ssize, (uintmax_t)epp->ep_minsaddr,
    427  1.75  christos 	    (uintmax_t)epp->ep_maxsaddr));
    428  1.68  christos 
    429  1.38  christos 	/*
    430  1.38  christos 	 * set up commands for stack.  note that this takes *two*, one to
    431  1.38  christos 	 * map the part of the stack which we can access, and one to map
    432  1.38  christos 	 * the part which we can't.
    433  1.38  christos 	 *
    434  1.38  christos 	 * arguably, it could be made into one, but that would require the
    435  1.38  christos 	 * addition of another mapping proc, which is unnecessary
    436  1.38  christos 	 */
    437  1.38  christos 	access_size = epp->ep_ssize;
    438  1.63      matt 	access_linear_min = (vaddr_t)STACK_ALLOC(epp->ep_minsaddr, access_size);
    439  1.38  christos 	noaccess_size = max_stack_size - access_size;
    440  1.63      matt 	noaccess_linear_min = (vaddr_t)STACK_ALLOC(STACK_GROW(epp->ep_minsaddr,
    441  1.38  christos 	    access_size), noaccess_size);
    442  1.68  christos 
    443  1.75  christos 	DPRINTF(("access_size=%#jx, access_linear_min=%#jx, "
    444  1.75  christos 	    "noaccess_size=%#jx, noaccess_linear_min=%#jx\n",
    445  1.75  christos 	    (uintmax_t)access_size, (uintmax_t)access_linear_min,
    446  1.75  christos 	    (uintmax_t)noaccess_size, (uintmax_t)noaccess_linear_min));
    447  1.68  christos 
    448  1.81     joerg 	if (user_stack_guard_size > 0) {
    449  1.81     joerg #ifdef __MACHINE_STACK_GROWS_UP
    450  1.81     joerg 		vsize_t guard_size = MIN(VM_MAXUSER_ADDRESS - epp->ep_maxsaddr, user_stack_guard_size);
    451  1.81     joerg 		if (guard_size > 0)
    452  1.81     joerg 			NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, guard_size,
    453  1.81     joerg 			    epp->ep_maxsaddr, NULL, 0, VM_PROT_NONE);
    454  1.81     joerg #else
    455  1.81     joerg 		NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, user_stack_guard_size,
    456  1.81     joerg 		    epp->ep_maxsaddr - user_stack_guard_size, NULL, 0, VM_PROT_NONE);
    457  1.81     joerg #endif
    458  1.81     joerg 	}
    459  1.65  christos 	if (noaccess_size > 0 && noaccess_size <= MAXSSIZ) {
    460  1.62       mrg 		NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, noaccess_size,
    461  1.62       mrg 		    noaccess_linear_min, NULL, 0, VM_PROT_NONE, VMCMD_STACK);
    462  1.39      yamt 	}
    463  1.65  christos 	KASSERT(access_size > 0 && access_size <= MAXSSIZ);
    464  1.62       mrg 	NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, access_size,
    465  1.62       mrg 	    access_linear_min, NULL, 0, VM_PROT_READ | VM_PROT_WRITE,
    466  1.62       mrg 	    VMCMD_STACK);
    467  1.38  christos 
    468  1.38  christos 	return 0;
    469  1.38  christos }
    470