Home | History | Annotate | Line # | Download | only in kern
exec_subr.c revision 1.62
      1 /*	$NetBSD: exec_subr.c,v 1.62 2009/03/29 01:02:50 mrg Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *      This product includes software developed by Christopher G. Demetriou.
     18  * 4. The name of the author may not be used to endorse or promote products
     19  *    derived from this software without specific prior written permission
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: exec_subr.c,v 1.62 2009/03/29 01:02:50 mrg Exp $");
     35 
     36 #include "opt_pax.h"
     37 
     38 #include <sys/param.h>
     39 #include <sys/systm.h>
     40 #include <sys/proc.h>
     41 #include <sys/kmem.h>
     42 #include <sys/vnode.h>
     43 #include <sys/filedesc.h>
     44 #include <sys/exec.h>
     45 #include <sys/mman.h>
     46 #include <sys/resourcevar.h>
     47 #include <sys/device.h>
     48 
     49 #ifdef PAX_MPROTECT
     50 #include <sys/pax.h>
     51 #endif /* PAX_MPROTECT */
     52 
     53 #include <uvm/uvm.h>
     54 
     55 #define	VMCMD_EVCNT_DECL(name)					\
     56 static struct evcnt vmcmd_ev_##name =				\
     57     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "vmcmd", #name);	\
     58 EVCNT_ATTACH_STATIC(vmcmd_ev_##name)
     59 
     60 #define	VMCMD_EVCNT_INCR(name)					\
     61     vmcmd_ev_##name.ev_count++
     62 
     63 VMCMD_EVCNT_DECL(calls);
     64 VMCMD_EVCNT_DECL(extends);
     65 VMCMD_EVCNT_DECL(kills);
     66 
     67 /*
     68  * new_vmcmd():
     69  *	create a new vmcmd structure and fill in its fields based
     70  *	on function call arguments.  make sure objects ref'd by
     71  *	the vmcmd are 'held'.
     72  */
     73 
     74 void
     75 new_vmcmd(struct exec_vmcmd_set *evsp,
     76     int (*proc)(struct lwp * l, struct exec_vmcmd *),
     77     u_long len, u_long addr, struct vnode *vp, u_long offset,
     78     u_int prot, int flags)
     79 {
     80 	struct exec_vmcmd    *vcp;
     81 
     82 	VMCMD_EVCNT_INCR(calls);
     83 
     84 	if (evsp->evs_used >= evsp->evs_cnt)
     85 		vmcmdset_extend(evsp);
     86 	vcp = &evsp->evs_cmds[evsp->evs_used++];
     87 	vcp->ev_proc = proc;
     88 	vcp->ev_len = len;
     89 	vcp->ev_addr = addr;
     90 	if ((vcp->ev_vp = vp) != NULL)
     91 		vref(vp);
     92 	vcp->ev_offset = offset;
     93 	vcp->ev_prot = prot;
     94 	vcp->ev_flags = flags;
     95 }
     96 
     97 void
     98 vmcmdset_extend(struct exec_vmcmd_set *evsp)
     99 {
    100 	struct exec_vmcmd *nvcp;
    101 	u_int ocnt;
    102 
    103 #ifdef DIAGNOSTIC
    104 	if (evsp->evs_used < evsp->evs_cnt)
    105 		panic("vmcmdset_extend: not necessary");
    106 #endif
    107 
    108 	/* figure out number of entries in new set */
    109 	if ((ocnt = evsp->evs_cnt) != 0) {
    110 		evsp->evs_cnt += ocnt;
    111 		VMCMD_EVCNT_INCR(extends);
    112 	} else
    113 		evsp->evs_cnt = EXEC_DEFAULT_VMCMD_SETSIZE;
    114 
    115 	/* allocate it */
    116 	nvcp = kmem_alloc(evsp->evs_cnt * sizeof(struct exec_vmcmd), KM_SLEEP);
    117 
    118 	/* free the old struct, if there was one, and record the new one */
    119 	if (ocnt) {
    120 		memcpy(nvcp, evsp->evs_cmds,
    121 		    (ocnt * sizeof(struct exec_vmcmd)));
    122 		kmem_free(evsp->evs_cmds, ocnt * sizeof(struct exec_vmcmd));
    123 	}
    124 	evsp->evs_cmds = nvcp;
    125 }
    126 
    127 void
    128 kill_vmcmds(struct exec_vmcmd_set *evsp)
    129 {
    130 	struct exec_vmcmd *vcp;
    131 	u_int i;
    132 
    133 	VMCMD_EVCNT_INCR(kills);
    134 
    135 	if (evsp->evs_cnt == 0)
    136 		return;
    137 
    138 	for (i = 0; i < evsp->evs_used; i++) {
    139 		vcp = &evsp->evs_cmds[i];
    140 		if (vcp->ev_vp != NULL)
    141 			vrele(vcp->ev_vp);
    142 	}
    143 	kmem_free(evsp->evs_cmds, evsp->evs_cnt * sizeof(struct exec_vmcmd));
    144 	evsp->evs_used = evsp->evs_cnt = 0;
    145 }
    146 
    147 /*
    148  * vmcmd_map_pagedvn():
    149  *	handle vmcmd which specifies that a vnode should be mmap'd.
    150  *	appropriate for handling demand-paged text and data segments.
    151  */
    152 
    153 int
    154 vmcmd_map_pagedvn(struct lwp *l, struct exec_vmcmd *cmd)
    155 {
    156 	struct uvm_object *uobj;
    157 	struct vnode *vp = cmd->ev_vp;
    158 	struct proc *p = l->l_proc;
    159 	int error;
    160 	vm_prot_t prot, maxprot;
    161 
    162 	KASSERT(vp->v_iflag & VI_TEXT);
    163 
    164 	/*
    165 	 * map the vnode in using uvm_map.
    166 	 */
    167 
    168         if (cmd->ev_len == 0)
    169                 return(0);
    170         if (cmd->ev_offset & PAGE_MASK)
    171                 return(EINVAL);
    172 	if (cmd->ev_addr & PAGE_MASK)
    173 		return(EINVAL);
    174 	if (cmd->ev_len & PAGE_MASK)
    175 		return(EINVAL);
    176 
    177 	prot = cmd->ev_prot;
    178 	maxprot = UVM_PROT_ALL;
    179 #ifdef PAX_MPROTECT
    180 	pax_mprotect(l, &prot, &maxprot);
    181 #endif /* PAX_MPROTECT */
    182 
    183 	/*
    184 	 * check the file system's opinion about mmapping the file
    185 	 */
    186 
    187 	error = VOP_MMAP(vp, prot, l->l_cred);
    188 	if (error)
    189 		return error;
    190 
    191 	if ((vp->v_vflag & VV_MAPPED) == 0) {
    192 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    193 		vp->v_vflag |= VV_MAPPED;
    194 		VOP_UNLOCK(vp, 0);
    195 	}
    196 
    197 	/*
    198 	 * do the map, reference the object for this map entry
    199 	 */
    200 	uobj = &vp->v_uobj;
    201 	vref(vp);
    202 
    203 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
    204 		uobj, cmd->ev_offset, 0,
    205 		UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
    206 			UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
    207 	if (error) {
    208 		uobj->pgops->pgo_detach(uobj);
    209 	}
    210 	return error;
    211 }
    212 
    213 /*
    214  * vmcmd_map_readvn():
    215  *	handle vmcmd which specifies that a vnode should be read from.
    216  *	appropriate for non-demand-paged text/data segments, i.e. impure
    217  *	objects (a la OMAGIC and NMAGIC).
    218  */
    219 int
    220 vmcmd_map_readvn(struct lwp *l, struct exec_vmcmd *cmd)
    221 {
    222 	struct proc *p = l->l_proc;
    223 	int error;
    224 	long diff;
    225 
    226 	if (cmd->ev_len == 0)
    227 		return 0;
    228 
    229 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    230 	cmd->ev_addr -= diff;			/* required by uvm_map */
    231 	cmd->ev_offset -= diff;
    232 	cmd->ev_len += diff;
    233 
    234 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    235 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
    236 			UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
    237 			UVM_ADV_NORMAL,
    238 			UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
    239 
    240 	if (error)
    241 		return error;
    242 
    243 	return vmcmd_readvn(l, cmd);
    244 }
    245 
    246 int
    247 vmcmd_readvn(struct lwp *l, struct exec_vmcmd *cmd)
    248 {
    249 	struct proc *p = l->l_proc;
    250 	int error;
    251 	vm_prot_t prot, maxprot;
    252 
    253 	error = vn_rdwr(UIO_READ, cmd->ev_vp, (void *)cmd->ev_addr,
    254 	    cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
    255 	    l->l_cred, NULL, l);
    256 	if (error)
    257 		return error;
    258 
    259 	prot = cmd->ev_prot;
    260 	maxprot = VM_PROT_ALL;
    261 #ifdef PAX_MPROTECT
    262 	pax_mprotect(l, &prot, &maxprot);
    263 #endif /* PAX_MPROTECT */
    264 
    265 #ifdef PMAP_NEED_PROCWR
    266 	/*
    267 	 * we had to write the process, make sure the pages are synched
    268 	 * with the instruction cache.
    269 	 */
    270 	if (prot & VM_PROT_EXECUTE)
    271 		pmap_procwr(p, cmd->ev_addr, cmd->ev_len);
    272 #endif
    273 
    274 	/*
    275 	 * we had to map in the area at PROT_ALL so that vn_rdwr()
    276 	 * could write to it.   however, the caller seems to want
    277 	 * it mapped read-only, so now we are going to have to call
    278 	 * uvm_map_protect() to fix up the protection.  ICK.
    279 	 */
    280 	if (maxprot != VM_PROT_ALL) {
    281 		error = uvm_map_protect(&p->p_vmspace->vm_map,
    282 				trunc_page(cmd->ev_addr),
    283 				round_page(cmd->ev_addr + cmd->ev_len),
    284 				maxprot, true);
    285 		if (error)
    286 			return (error);
    287 	}
    288 
    289 	if (prot != maxprot) {
    290 		error = uvm_map_protect(&p->p_vmspace->vm_map,
    291 				trunc_page(cmd->ev_addr),
    292 				round_page(cmd->ev_addr + cmd->ev_len),
    293 				prot, false);
    294 		if (error)
    295 			return (error);
    296 	}
    297 
    298 	return 0;
    299 }
    300 
    301 /*
    302  * vmcmd_map_zero():
    303  *	handle vmcmd which specifies a zero-filled address space region.  The
    304  *	address range must be first allocated, then protected appropriately.
    305  */
    306 
    307 int
    308 vmcmd_map_zero(struct lwp *l, struct exec_vmcmd *cmd)
    309 {
    310 	struct proc *p = l->l_proc;
    311 	int error;
    312 	long diff;
    313 	vm_prot_t prot, maxprot;
    314 
    315 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    316 	cmd->ev_addr -= diff;			/* required by uvm_map */
    317 	cmd->ev_len += diff;
    318 
    319 	prot = cmd->ev_prot;
    320 	maxprot = UVM_PROT_ALL;
    321 #ifdef PAX_MPROTECT
    322 	pax_mprotect(l, &prot, &maxprot);
    323 #endif /* PAX_MPROTECT */
    324 
    325 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    326 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
    327 			UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
    328 			UVM_ADV_NORMAL,
    329 			UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
    330 	if (cmd->ev_flags & VMCMD_STACK)
    331 		curproc->p_vmspace->vm_issize += atop(round_page(cmd->ev_len));
    332 	return error;
    333 }
    334 
    335 /*
    336  * exec_read_from():
    337  *
    338  *	Read from vnode into buffer at offset.
    339  */
    340 int
    341 exec_read_from(struct lwp *l, struct vnode *vp, u_long off, void *bf,
    342     size_t size)
    343 {
    344 	int error;
    345 	size_t resid;
    346 
    347 	if ((error = vn_rdwr(UIO_READ, vp, bf, size, off, UIO_SYSSPACE,
    348 	    0, l->l_cred, &resid, NULL)) != 0)
    349 		return error;
    350 	/*
    351 	 * See if we got all of it
    352 	 */
    353 	if (resid != 0)
    354 		return ENOEXEC;
    355 	return 0;
    356 }
    357 
    358 /*
    359  * exec_setup_stack(): Set up the stack segment for an elf
    360  * executable.
    361  *
    362  * Note that the ep_ssize parameter must be set to be the current stack
    363  * limit; this is adjusted in the body of execve() to yield the
    364  * appropriate stack segment usage once the argument length is
    365  * calculated.
    366  *
    367  * This function returns an int for uniformity with other (future) formats'
    368  * stack setup functions.  They might have errors to return.
    369  */
    370 
    371 int
    372 exec_setup_stack(struct lwp *l, struct exec_package *epp)
    373 {
    374 	u_long max_stack_size;
    375 	u_long access_linear_min, access_size;
    376 	u_long noaccess_linear_min, noaccess_size;
    377 
    378 #ifndef	USRSTACK32
    379 #define USRSTACK32	(0x00000000ffffffffL&~PGOFSET)
    380 #endif
    381 
    382 	if (epp->ep_flags & EXEC_32) {
    383 		epp->ep_minsaddr = USRSTACK32;
    384 		max_stack_size = MAXSSIZ;
    385 	} else {
    386 		epp->ep_minsaddr = USRSTACK;
    387 		max_stack_size = MAXSSIZ;
    388 	}
    389 
    390 #ifdef PAX_ASLR
    391 	pax_aslr_stack(l, epp, &max_stack_size);
    392 #endif /* PAX_ASLR */
    393 
    394 	l->l_proc->p_stackbase = epp->ep_minsaddr;
    395 
    396 	epp->ep_maxsaddr = (u_long)STACK_GROW(epp->ep_minsaddr,
    397 		max_stack_size);
    398 	epp->ep_ssize = l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur;
    399 
    400 	/*
    401 	 * set up commands for stack.  note that this takes *two*, one to
    402 	 * map the part of the stack which we can access, and one to map
    403 	 * the part which we can't.
    404 	 *
    405 	 * arguably, it could be made into one, but that would require the
    406 	 * addition of another mapping proc, which is unnecessary
    407 	 */
    408 	access_size = epp->ep_ssize;
    409 	access_linear_min = (u_long)STACK_ALLOC(epp->ep_minsaddr, access_size);
    410 	noaccess_size = max_stack_size - access_size;
    411 	noaccess_linear_min = (u_long)STACK_ALLOC(STACK_GROW(epp->ep_minsaddr,
    412 	    access_size), noaccess_size);
    413 	if (noaccess_size > 0) {
    414 		NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, noaccess_size,
    415 		    noaccess_linear_min, NULL, 0, VM_PROT_NONE, VMCMD_STACK);
    416 	}
    417 	KASSERT(access_size > 0);
    418 	NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, access_size,
    419 	    access_linear_min, NULL, 0, VM_PROT_READ | VM_PROT_WRITE,
    420 	    VMCMD_STACK);
    421 
    422 	return 0;
    423 }
    424