Home | History | Annotate | Line # | Download | only in kern
exec_subr.c revision 1.89
      1 /*	$NetBSD: exec_subr.c,v 1.89 2024/12/06 16:18:41 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *      This product includes software developed by Christopher G. Demetriou.
     18  * 4. The name of the author may not be used to endorse or promote products
     19  *    derived from this software without specific prior written permission
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: exec_subr.c,v 1.89 2024/12/06 16:18:41 riastradh Exp $");
     35 
     36 #include "opt_pax.h"
     37 
     38 #include <sys/param.h>
     39 #include <sys/types.h>
     40 
     41 #include <sys/device.h>
     42 #include <sys/exec.h>
     43 #include <sys/filedesc.h>
     44 #include <sys/kmem.h>
     45 #include <sys/mman.h>
     46 #include <sys/pax.h>
     47 #include <sys/proc.h>
     48 #include <sys/resourcevar.h>
     49 #include <sys/systm.h>
     50 #include <sys/vnode.h>
     51 
     52 #include <uvm/uvm_extern.h>
     53 
     54 #define	VMCMD_EVCNT_DECL(name)					\
     55 static struct evcnt vmcmd_ev_##name =				\
     56     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "vmcmd", #name);	\
     57 EVCNT_ATTACH_STATIC(vmcmd_ev_##name)
     58 
     59 #define	VMCMD_EVCNT_INCR(name)					\
     60     vmcmd_ev_##name.ev_count++
     61 
     62 VMCMD_EVCNT_DECL(calls);
     63 VMCMD_EVCNT_DECL(extends);
     64 VMCMD_EVCNT_DECL(kills);
     65 
     66 #ifdef DEBUG_STACK
     67 #define DPRINTF(a) uprintf a
     68 #else
     69 #define DPRINTF(a)
     70 #endif
     71 
     72 unsigned int user_stack_guard_size = 1024 * 1024;
     73 unsigned int user_thread_stack_guard_size = 64 * 1024;
     74 
     75 /*
     76  * new_vmcmd():
     77  *	create a new vmcmd structure and fill in its fields based
     78  *	on function call arguments.  make sure objects ref'd by
     79  *	the vmcmd are 'held'.
     80  */
     81 
     82 void
     83 new_vmcmd(struct exec_vmcmd_set *evsp,
     84     int (*proc)(struct lwp * l, struct exec_vmcmd *),
     85     vsize_t len, vaddr_t addr, struct vnode *vp, u_long offset,
     86     u_int prot, int flags)
     87 {
     88 	struct exec_vmcmd *vcp;
     89 
     90 	VMCMD_EVCNT_INCR(calls);
     91 	KASSERT(proc != vmcmd_map_pagedvn || (vp->v_iflag & VI_TEXT));
     92 	KASSERT(vp == NULL || vrefcnt(vp) > 0);
     93 
     94 	if (evsp->evs_used >= evsp->evs_cnt)
     95 		vmcmdset_extend(evsp);
     96 	vcp = &evsp->evs_cmds[evsp->evs_used++];
     97 	vcp->ev_proc = proc;
     98 	vcp->ev_len = len;
     99 	vcp->ev_addr = addr;
    100 	if ((vcp->ev_vp = vp) != NULL)
    101 		vref(vp);
    102 	vcp->ev_offset = offset;
    103 	vcp->ev_prot = prot;
    104 	vcp->ev_flags = flags;
    105 }
    106 
    107 void
    108 vmcmdset_extend(struct exec_vmcmd_set *evsp)
    109 {
    110 	struct exec_vmcmd *nvcp;
    111 	u_int ocnt;
    112 
    113 #ifdef DIAGNOSTIC
    114 	if (evsp->evs_used < evsp->evs_cnt)
    115 		panic("vmcmdset_extend: not necessary");
    116 #endif
    117 
    118 	/* figure out number of entries in new set */
    119 	if ((ocnt = evsp->evs_cnt) != 0) {
    120 		evsp->evs_cnt += ocnt;
    121 		VMCMD_EVCNT_INCR(extends);
    122 	} else
    123 		evsp->evs_cnt = EXEC_DEFAULT_VMCMD_SETSIZE;
    124 
    125 	/* allocate it */
    126 	nvcp = kmem_alloc(evsp->evs_cnt * sizeof(struct exec_vmcmd), KM_SLEEP);
    127 
    128 	/* free the old struct, if there was one, and record the new one */
    129 	if (ocnt) {
    130 		memcpy(nvcp, evsp->evs_cmds,
    131 		    (ocnt * sizeof(struct exec_vmcmd)));
    132 		kmem_free(evsp->evs_cmds, ocnt * sizeof(struct exec_vmcmd));
    133 	}
    134 	evsp->evs_cmds = nvcp;
    135 }
    136 
    137 void
    138 kill_vmcmds(struct exec_vmcmd_set *evsp)
    139 {
    140 	struct exec_vmcmd *vcp;
    141 	u_int i;
    142 
    143 	VMCMD_EVCNT_INCR(kills);
    144 
    145 	if (evsp->evs_cnt == 0)
    146 		return;
    147 
    148 	for (i = 0; i < evsp->evs_used; i++) {
    149 		vcp = &evsp->evs_cmds[i];
    150 		if (vcp->ev_vp != NULL)
    151 			vrele(vcp->ev_vp);
    152 	}
    153 	kmem_free(evsp->evs_cmds, evsp->evs_cnt * sizeof(struct exec_vmcmd));
    154 	evsp->evs_used = evsp->evs_cnt = 0;
    155 }
    156 
    157 /*
    158  * vmcmd_map_pagedvn():
    159  *	handle vmcmd which specifies that a vnode should be mmap'd.
    160  *	appropriate for handling demand-paged text and data segments.
    161  */
    162 
    163 static int
    164 vmcmd_get_prot(struct lwp *l, const struct exec_vmcmd *cmd, vm_prot_t *prot,
    165     vm_prot_t *maxprot)
    166 {
    167 	vm_prot_t extraprot = PROT_MPROTECT_EXTRACT(cmd->ev_prot);
    168 
    169 	*prot = cmd->ev_prot & UVM_PROT_ALL;
    170 	*maxprot = PAX_MPROTECT_MAXPROTECT(l, *prot, extraprot, UVM_PROT_ALL);
    171 
    172 	if ((*prot & *maxprot) != *prot)
    173 		return EACCES;
    174 	return PAX_MPROTECT_VALIDATE(l, *prot);
    175 }
    176 
    177 int
    178 vmcmd_map_pagedvn(struct lwp *l, struct exec_vmcmd *cmd)
    179 {
    180 	struct uvm_object *uobj;
    181 	struct vnode *vp = cmd->ev_vp;
    182 	struct proc *p = l->l_proc;
    183 	int error;
    184 	vm_prot_t prot, maxprot;
    185 
    186 	KASSERT(vp->v_iflag & VI_TEXT);
    187 
    188 	/*
    189 	 * map the vnode in using uvm_map.
    190 	 */
    191 
    192 	if (cmd->ev_len == 0)
    193 		return 0;
    194 	if (cmd->ev_offset & PAGE_MASK)
    195 		return EINVAL;
    196 	if (cmd->ev_addr & PAGE_MASK)
    197 		return EINVAL;
    198 	if (cmd->ev_len & PAGE_MASK)
    199 		return EINVAL;
    200 
    201 	if ((error = vmcmd_get_prot(l, cmd, &prot, &maxprot)) != 0)
    202 		return error;
    203 
    204 	/*
    205 	 * check the file system's opinion about mmapping the file
    206 	 */
    207 
    208 	error = VOP_MMAP(vp, prot, l->l_cred);
    209 	if (error)
    210 		return error;
    211 
    212 	if ((vp->v_vflag & VV_MAPPED) == 0) {
    213 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    214 		vp->v_vflag |= VV_MAPPED;
    215 		VOP_UNLOCK(vp);
    216 	}
    217 
    218 	/*
    219 	 * do the map, reference the object for this map entry
    220 	 */
    221 	uobj = &vp->v_uobj;
    222 	vref(vp);
    223 
    224 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
    225 		uobj, cmd->ev_offset, 0,
    226 		UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
    227 			UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
    228 	if (error) {
    229 		uobj->pgops->pgo_detach(uobj);
    230 	}
    231 	return error;
    232 }
    233 
    234 /*
    235  * vmcmd_map_readvn():
    236  *	handle vmcmd which specifies that a vnode should be read from.
    237  *	appropriate for non-demand-paged text/data segments, i.e. impure
    238  *	objects (a la OMAGIC and NMAGIC).
    239  */
    240 int
    241 vmcmd_map_readvn(struct lwp *l, struct exec_vmcmd *cmd)
    242 {
    243 	struct proc *p = l->l_proc;
    244 	int error;
    245 	long diff;
    246 
    247 	if (cmd->ev_len == 0)
    248 		return 0;
    249 
    250 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    251 	cmd->ev_addr -= diff;			/* required by uvm_map */
    252 	cmd->ev_offset -= diff;
    253 	cmd->ev_len += diff;
    254 
    255 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    256 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
    257 			UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
    258 			UVM_ADV_NORMAL,
    259 			UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
    260 
    261 	if (error)
    262 		return error;
    263 
    264 	return vmcmd_readvn(l, cmd);
    265 }
    266 
    267 int
    268 vmcmd_readvn(struct lwp *l, struct exec_vmcmd *cmd)
    269 {
    270 	struct proc *p = l->l_proc;
    271 	int error;
    272 	vm_prot_t prot, maxprot;
    273 
    274 	error = vn_rdwr(UIO_READ, cmd->ev_vp, (void *)cmd->ev_addr,
    275 	    cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
    276 	    l->l_cred, NULL, l);
    277 	if (error)
    278 		return error;
    279 
    280 	if ((error = vmcmd_get_prot(l, cmd, &prot, &maxprot)) != 0)
    281 		return error;
    282 
    283 #ifdef PMAP_NEED_PROCWR
    284 	/*
    285 	 * we had to write the process, make sure the pages are synched
    286 	 * with the instruction cache.
    287 	 */
    288 	if (prot & VM_PROT_EXECUTE)
    289 		pmap_procwr(p, cmd->ev_addr, cmd->ev_len);
    290 #endif
    291 
    292 	/*
    293 	 * we had to map in the area at PROT_ALL so that vn_rdwr()
    294 	 * could write to it.   however, the caller seems to want
    295 	 * it mapped read-only, so now we are going to have to call
    296 	 * uvm_map_protect() to fix up the protection.  ICK.
    297 	 */
    298 	if (maxprot != VM_PROT_ALL) {
    299 		error = uvm_map_protect(&p->p_vmspace->vm_map,
    300 				trunc_page(cmd->ev_addr),
    301 				round_page(cmd->ev_addr + cmd->ev_len),
    302 				maxprot, true);
    303 		if (error)
    304 			return error;
    305 	}
    306 
    307 	if (prot != maxprot) {
    308 		error = uvm_map_protect(&p->p_vmspace->vm_map,
    309 				trunc_page(cmd->ev_addr),
    310 				round_page(cmd->ev_addr + cmd->ev_len),
    311 				prot, false);
    312 		if (error)
    313 			return error;
    314 	}
    315 
    316 	return 0;
    317 }
    318 
    319 /*
    320  * vmcmd_map_zero():
    321  *	handle vmcmd which specifies a zero-filled address space region.  The
    322  *	address range must be first allocated, then protected appropriately.
    323  */
    324 
    325 int
    326 vmcmd_map_zero(struct lwp *l, struct exec_vmcmd *cmd)
    327 {
    328 	struct proc *p = l->l_proc;
    329 	int error;
    330 	long diff;
    331 	vm_prot_t prot, maxprot;
    332 
    333 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    334 	cmd->ev_addr -= diff;			/* required by uvm_map */
    335 	cmd->ev_len += diff;
    336 
    337 	if ((error = vmcmd_get_prot(l, cmd, &prot, &maxprot)) != 0)
    338 		return error;
    339 
    340 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    341 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
    342 			UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
    343 			UVM_ADV_NORMAL,
    344 			UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
    345 	if (cmd->ev_flags & VMCMD_STACK)
    346 		curproc->p_vmspace->vm_issize += atop(round_page(cmd->ev_len));
    347 	return error;
    348 }
    349 
    350 /*
    351  * exec_read():
    352  *
    353  *	Read from vnode into buffer at offset.
    354  */
    355 int
    356 exec_read(struct lwp *l, struct vnode *vp, u_long off, void *bf, size_t size,
    357     int ioflg)
    358 {
    359 	int error;
    360 	size_t resid;
    361 
    362 	KASSERT((ioflg & IO_NODELOCKED) == 0 || VOP_ISLOCKED(vp) != LK_NONE);
    363 
    364 	if ((error = vn_rdwr(UIO_READ, vp, bf, size, off, UIO_SYSSPACE,
    365 	    ioflg, l->l_cred, &resid, NULL)) != 0)
    366 		return error;
    367 	/*
    368 	 * See if we got all of it
    369 	 */
    370 	if (resid != 0)
    371 		return ENOEXEC;
    372 	return 0;
    373 }
    374 
    375 /*
    376  * exec_setup_stack(): Set up the stack segment for an elf
    377  * executable.
    378  *
    379  * Note that the ep_ssize parameter must be set to be the current stack
    380  * limit; this is adjusted in the body of execve() to yield the
    381  * appropriate stack segment usage once the argument length is
    382  * calculated.
    383  *
    384  * This function returns an int for uniformity with other (future) formats'
    385  * stack setup functions.  They might have errors to return.
    386  */
    387 
    388 int
    389 exec_setup_stack(struct lwp *l, struct exec_package *epp)
    390 {
    391 	vsize_t max_stack_size;
    392 	vaddr_t access_linear_min;
    393 	vsize_t access_size;
    394 	vaddr_t noaccess_linear_min;
    395 	vsize_t noaccess_size;
    396 
    397 #ifndef	USRSTACK32
    398 #define USRSTACK32	(0x00000000ffffffffL&~PGOFSET)
    399 #endif
    400 #ifndef MAXSSIZ32
    401 #define MAXSSIZ32	(MAXSSIZ >> 2)
    402 #endif
    403 
    404 	if (epp->ep_flags & EXEC_32) {
    405 		epp->ep_minsaddr = USRSTACK32;
    406 		max_stack_size = MAXSSIZ32;
    407 	} else {
    408 		epp->ep_minsaddr = USRSTACK;
    409 		max_stack_size = MAXSSIZ;
    410 	}
    411 
    412 	DPRINTF(("ep_minsaddr=%#jx max_stack_size=%#jx\n",
    413 	    (uintmax_t)epp->ep_minsaddr, (uintmax_t)max_stack_size));
    414 
    415 	pax_aslr_stack(epp, &max_stack_size);
    416 
    417 	DPRINTF(("[RLIMIT_STACK].lim_cur=%#jx max_stack_size=%#jx\n",
    418 	    (uintmax_t)l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur,
    419 	    (uintmax_t)max_stack_size));
    420 	epp->ep_ssize = MIN(l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur,
    421 	    max_stack_size);
    422 
    423 	l->l_proc->p_stackbase = epp->ep_minsaddr;
    424 
    425 	epp->ep_maxsaddr = (vaddr_t)STACK_GROW(epp->ep_minsaddr,
    426 	    max_stack_size);
    427 
    428 	DPRINTF(("ep_ssize=%#jx ep_minsaddr=%#jx ep_maxsaddr=%#jx\n",
    429 	    (uintmax_t)epp->ep_ssize, (uintmax_t)epp->ep_minsaddr,
    430 	    (uintmax_t)epp->ep_maxsaddr));
    431 
    432 	/*
    433 	 * set up commands for stack.  note that this takes *two*, one to
    434 	 * map the part of the stack which we can access, and one to map
    435 	 * the part which we can't.
    436 	 *
    437 	 * arguably, it could be made into one, but that would require the
    438 	 * addition of another mapping proc, which is unnecessary
    439 	 */
    440 	access_size = epp->ep_ssize;
    441 	access_linear_min = (vaddr_t)STACK_ALLOC(epp->ep_minsaddr, access_size);
    442 	noaccess_size = max_stack_size - access_size;
    443 	noaccess_linear_min = (vaddr_t)STACK_ALLOC(STACK_GROW(epp->ep_minsaddr,
    444 	    access_size), noaccess_size);
    445 
    446 	DPRINTF(("access_size=%#jx, access_linear_min=%#jx, "
    447 	    "noaccess_size=%#jx, noaccess_linear_min=%#jx\n",
    448 	    (uintmax_t)access_size, (uintmax_t)access_linear_min,
    449 	    (uintmax_t)noaccess_size, (uintmax_t)noaccess_linear_min));
    450 
    451 	if (user_stack_guard_size > 0) {
    452 #ifdef __MACHINE_STACK_GROWS_UP
    453 		vsize_t guard_size = MIN(VM_MAXUSER_ADDRESS - epp->ep_maxsaddr, user_stack_guard_size);
    454 		if (guard_size > 0)
    455 			NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, guard_size,
    456 			    epp->ep_maxsaddr, NULL, 0, VM_PROT_NONE);
    457 #else
    458 		NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, user_stack_guard_size,
    459 		    epp->ep_maxsaddr - user_stack_guard_size, NULL, 0, VM_PROT_NONE);
    460 #endif
    461 	}
    462 	if (noaccess_size > 0 && noaccess_size <= MAXSSIZ) {
    463 		NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, noaccess_size,
    464 		    noaccess_linear_min, NULL, 0,
    465 		    VM_PROT_NONE | PROT_MPROTECT(VM_PROT_READ | VM_PROT_WRITE),
    466 		    VMCMD_STACK);
    467 	}
    468 	KASSERT(access_size > 0);
    469 	KASSERT(access_size <= MAXSSIZ);
    470 	NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, access_size,
    471 	    access_linear_min, NULL, 0, VM_PROT_READ | VM_PROT_WRITE,
    472 	    VMCMD_STACK);
    473 
    474 	return 0;
    475 }
    476