Home | History | Annotate | Line # | Download | only in kern
exec_subr.c revision 1.37.2.6
      1 /*	$NetBSD: exec_subr.c,v 1.37.2.6 2005/03/04 16:51:58 skrll Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *      This product includes software developed by Christopher G. Demetriou.
     18  * 4. The name of the author may not be used to endorse or promote products
     19  *    derived from this software without specific prior written permission
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: exec_subr.c,v 1.37.2.6 2005/03/04 16:51:58 skrll Exp $");
     35 
     36 #include <sys/param.h>
     37 #include <sys/systm.h>
     38 #include <sys/proc.h>
     39 #include <sys/malloc.h>
     40 #include <sys/vnode.h>
     41 #include <sys/filedesc.h>
     42 #include <sys/exec.h>
     43 #include <sys/mman.h>
     44 #include <sys/resourcevar.h>
     45 
     46 #include <uvm/uvm.h>
     47 
     48 /*
     49  * XXX cgd 960926: this module should collect simple statistics
     50  * (calls, extends, kills).
     51  */
     52 
     53 /*
     54  * new_vmcmd():
     55  *	create a new vmcmd structure and fill in its fields based
     56  *	on function call arguments.  make sure objects ref'd by
     57  *	the vmcmd are 'held'.
     58  */
     59 
     60 void
     61 new_vmcmd(struct exec_vmcmd_set *evsp,
     62     int (*proc)(struct lwp * l, struct exec_vmcmd *),
     63     u_long len, u_long addr, struct vnode *vp, u_long offset,
     64     u_int prot, int flags)
     65 {
     66 	struct exec_vmcmd    *vcp;
     67 
     68 	if (evsp->evs_used >= evsp->evs_cnt)
     69 		vmcmdset_extend(evsp);
     70 	vcp = &evsp->evs_cmds[evsp->evs_used++];
     71 	vcp->ev_proc = proc;
     72 	vcp->ev_len = len;
     73 	vcp->ev_addr = addr;
     74 	if ((vcp->ev_vp = vp) != NULL)
     75 		vref(vp);
     76 	vcp->ev_offset = offset;
     77 	vcp->ev_prot = prot;
     78 	vcp->ev_flags = flags;
     79 }
     80 
     81 void
     82 vmcmdset_extend(struct exec_vmcmd_set *evsp)
     83 {
     84 	struct exec_vmcmd *nvcp;
     85 	u_int ocnt;
     86 
     87 #ifdef DIAGNOSTIC
     88 	if (evsp->evs_used < evsp->evs_cnt)
     89 		panic("vmcmdset_extend: not necessary");
     90 #endif
     91 
     92 	/* figure out number of entries in new set */
     93 	ocnt = evsp->evs_cnt;
     94 	evsp->evs_cnt += ocnt ? ocnt : EXEC_DEFAULT_VMCMD_SETSIZE;
     95 
     96 	/* allocate it */
     97 	nvcp = malloc(evsp->evs_cnt * sizeof(struct exec_vmcmd),
     98 	    M_EXEC, M_WAITOK);
     99 
    100 	/* free the old struct, if there was one, and record the new one */
    101 	if (ocnt) {
    102 		memcpy(nvcp, evsp->evs_cmds,
    103 		    (ocnt * sizeof(struct exec_vmcmd)));
    104 		free(evsp->evs_cmds, M_EXEC);
    105 	}
    106 	evsp->evs_cmds = nvcp;
    107 }
    108 
    109 void
    110 kill_vmcmds(struct exec_vmcmd_set *evsp)
    111 {
    112 	struct exec_vmcmd *vcp;
    113 	u_int i;
    114 
    115 	if (evsp->evs_cnt == 0)
    116 		return;
    117 
    118 	for (i = 0; i < evsp->evs_used; i++) {
    119 		vcp = &evsp->evs_cmds[i];
    120 		if (vcp->ev_vp != NULL)
    121 			vrele(vcp->ev_vp);
    122 	}
    123 	evsp->evs_used = evsp->evs_cnt = 0;
    124 	free(evsp->evs_cmds, M_EXEC);
    125 }
    126 
    127 /*
    128  * vmcmd_map_pagedvn():
    129  *	handle vmcmd which specifies that a vnode should be mmap'd.
    130  *	appropriate for handling demand-paged text and data segments.
    131  */
    132 
    133 int
    134 vmcmd_map_pagedvn(struct lwp *l, struct exec_vmcmd *cmd)
    135 {
    136 	struct uvm_object *uobj;
    137 	struct proc *p = l->l_proc;
    138 	int error;
    139 
    140 	KASSERT(cmd->ev_vp->v_flag & VTEXT);
    141 
    142 	/*
    143 	 * map the vnode in using uvm_map.
    144 	 */
    145 
    146         if (cmd->ev_len == 0)
    147                 return(0);
    148         if (cmd->ev_offset & PAGE_MASK)
    149                 return(EINVAL);
    150 	if (cmd->ev_addr & PAGE_MASK)
    151 		return(EINVAL);
    152 	if (cmd->ev_len & PAGE_MASK)
    153 		return(EINVAL);
    154 
    155 	/*
    156 	 * first, attach to the object
    157 	 */
    158 
    159         uobj = uvn_attach(cmd->ev_vp, VM_PROT_READ|VM_PROT_EXECUTE);
    160         if (uobj == NULL)
    161                 return(ENOMEM);
    162 	VREF(cmd->ev_vp);
    163 
    164 	/*
    165 	 * do the map
    166 	 */
    167 
    168 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
    169 		uobj, cmd->ev_offset, 0,
    170 		UVM_MAPFLAG(cmd->ev_prot, VM_PROT_ALL, UVM_INH_COPY,
    171 			UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
    172 	if (error) {
    173 		uobj->pgops->pgo_detach(uobj);
    174 	}
    175 	return error;
    176 }
    177 
    178 /*
    179  * vmcmd_map_readvn():
    180  *	handle vmcmd which specifies that a vnode should be read from.
    181  *	appropriate for non-demand-paged text/data segments, i.e. impure
    182  *	objects (a la OMAGIC and NMAGIC).
    183  */
    184 int
    185 vmcmd_map_readvn(struct lwp *l, struct exec_vmcmd *cmd)
    186 {
    187 	struct proc *p = l->l_proc;
    188 	int error;
    189 	long diff;
    190 
    191 	if (cmd->ev_len == 0)
    192 		return 0;
    193 
    194 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    195 	cmd->ev_addr -= diff;			/* required by uvm_map */
    196 	cmd->ev_offset -= diff;
    197 	cmd->ev_len += diff;
    198 
    199 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    200 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
    201 			UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
    202 			UVM_ADV_NORMAL,
    203 			UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
    204 
    205 	if (error)
    206 		return error;
    207 
    208 	return vmcmd_readvn(l, cmd);
    209 }
    210 
    211 int
    212 vmcmd_readvn(struct lwp *l, struct exec_vmcmd *cmd)
    213 {
    214 	struct proc *p = l->l_proc;
    215 	int error;
    216 
    217 	error = vn_rdwr(UIO_READ, cmd->ev_vp, (caddr_t)cmd->ev_addr,
    218 	    cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
    219 	    p->p_ucred, NULL, l);
    220 	if (error)
    221 		return error;
    222 
    223 #ifdef PMAP_NEED_PROCWR
    224 	/*
    225 	 * we had to write the process, make sure the pages are synched
    226 	 * with the instruction cache.
    227 	 */
    228 	if (cmd->ev_prot & VM_PROT_EXECUTE)
    229 		pmap_procwr(p, cmd->ev_addr, cmd->ev_len);
    230 #endif
    231 
    232 	if (cmd->ev_prot != (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)) {
    233 
    234 		/*
    235 		 * we had to map in the area at PROT_ALL so that vn_rdwr()
    236 		 * could write to it.   however, the caller seems to want
    237 		 * it mapped read-only, so now we are going to have to call
    238 		 * uvm_map_protect() to fix up the protection.  ICK.
    239 		 */
    240 
    241 		return uvm_map_protect(&p->p_vmspace->vm_map,
    242 				trunc_page(cmd->ev_addr),
    243 				round_page(cmd->ev_addr + cmd->ev_len),
    244 				cmd->ev_prot, FALSE);
    245 	}
    246 	return 0;
    247 }
    248 
    249 /*
    250  * vmcmd_map_zero():
    251  *	handle vmcmd which specifies a zero-filled address space region.  The
    252  *	address range must be first allocated, then protected appropriately.
    253  */
    254 
    255 int
    256 vmcmd_map_zero(struct lwp *l, struct exec_vmcmd *cmd)
    257 {
    258 	struct proc *p = l->l_proc;
    259 	int error;
    260 	long diff;
    261 
    262 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    263 	cmd->ev_addr -= diff;			/* required by uvm_map */
    264 	cmd->ev_len += diff;
    265 
    266 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    267 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
    268 			UVM_MAPFLAG(cmd->ev_prot, UVM_PROT_ALL, UVM_INH_COPY,
    269 			UVM_ADV_NORMAL,
    270 			UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
    271 	return error;
    272 }
    273 
    274 /*
    275  * exec_read_from():
    276  *
    277  *	Read from vnode into buffer at offset.
    278  */
    279 int
    280 exec_read_from(struct lwp *l, struct vnode *vp, u_long off, void *buf,
    281     size_t size)
    282 {
    283 	int error;
    284 	size_t resid;
    285 
    286 	if ((error = vn_rdwr(UIO_READ, vp, buf, size, off, UIO_SYSSPACE,
    287 	    0, l->l_proc->p_ucred, &resid, NULL)) != 0)
    288 		return error;
    289 	/*
    290 	 * See if we got all of it
    291 	 */
    292 	if (resid != 0)
    293 		return ENOEXEC;
    294 	return 0;
    295 }
    296 
    297 /*
    298  * exec_setup_stack(): Set up the stack segment for an elf
    299  * executable.
    300  *
    301  * Note that the ep_ssize parameter must be set to be the current stack
    302  * limit; this is adjusted in the body of execve() to yield the
    303  * appropriate stack segment usage once the argument length is
    304  * calculated.
    305  *
    306  * This function returns an int for uniformity with other (future) formats'
    307  * stack setup functions.  They might have errors to return.
    308  */
    309 
    310 int
    311 exec_setup_stack(struct lwp *l, struct exec_package *epp)
    312 {
    313 	u_long max_stack_size;
    314 	u_long access_linear_min, access_size;
    315 	u_long noaccess_linear_min, noaccess_size;
    316 
    317 #ifndef	USRSTACK32
    318 #define USRSTACK32	(0x00000000ffffffffL&~PGOFSET)
    319 #endif
    320 
    321 	if (epp->ep_flags & EXEC_32) {
    322 		epp->ep_minsaddr = USRSTACK32;
    323 		max_stack_size = MAXSSIZ;
    324 	} else {
    325 		epp->ep_minsaddr = USRSTACK;
    326 		max_stack_size = MAXSSIZ;
    327 	}
    328 	epp->ep_maxsaddr = (u_long)STACK_GROW(epp->ep_minsaddr,
    329 		max_stack_size);
    330 	epp->ep_ssize = l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur;
    331 
    332 	/*
    333 	 * set up commands for stack.  note that this takes *two*, one to
    334 	 * map the part of the stack which we can access, and one to map
    335 	 * the part which we can't.
    336 	 *
    337 	 * arguably, it could be made into one, but that would require the
    338 	 * addition of another mapping proc, which is unnecessary
    339 	 */
    340 	access_size = epp->ep_ssize;
    341 	access_linear_min = (u_long)STACK_ALLOC(epp->ep_minsaddr, access_size);
    342 	noaccess_size = max_stack_size - access_size;
    343 	noaccess_linear_min = (u_long)STACK_ALLOC(STACK_GROW(epp->ep_minsaddr,
    344 	    access_size), noaccess_size);
    345 	if (noaccess_size > 0) {
    346 		NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, noaccess_size,
    347 		    noaccess_linear_min, NULL, 0, VM_PROT_NONE);
    348 	}
    349 	KASSERT(access_size > 0);
    350 	NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, access_size,
    351 	    access_linear_min, NULL, 0, VM_PROT_READ | VM_PROT_WRITE);
    352 
    353 	return 0;
    354 }
    355