Home | History | Annotate | Line # | Download | only in kern
exec_subr.c revision 1.20
      1 /*	$NetBSD: exec_subr.c,v 1.20 2000/06/27 17:41:12 mrg Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *      This product includes software developed by Christopher G. Demetriou.
     18  * 4. The name of the author may not be used to endorse or promote products
     19  *    derived from this software without specific prior written permission
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/param.h>
     34 #include <sys/systm.h>
     35 #include <sys/proc.h>
     36 #include <sys/malloc.h>
     37 #include <sys/vnode.h>
     38 #include <sys/filedesc.h>
     39 #include <sys/exec.h>
     40 #include <sys/mman.h>
     41 
     42 #include <uvm/uvm.h>
     43 
     44 /*
     45  * XXX cgd 960926: this module should collect simple statistics
     46  * (calls, extends, kills).
     47  */
     48 
     49 #ifdef DEBUG
     50 /*
     51  * new_vmcmd():
     52  *	create a new vmcmd structure and fill in its fields based
     53  *	on function call arguments.  make sure objects ref'd by
     54  *	the vmcmd are 'held'.
     55  *
     56  * If not debugging, this is a macro, so it's expanded inline.
     57  */
     58 
     59 void
     60 new_vmcmd(evsp, proc, len, addr, vp, offset, prot)
     61 	struct	exec_vmcmd_set *evsp;
     62 	int	(*proc) __P((struct proc * p, struct exec_vmcmd *));
     63 	u_long	len;
     64 	u_long	addr;
     65 	struct	vnode *vp;
     66 	u_long	offset;
     67 	u_int	prot;
     68 {
     69 	struct exec_vmcmd    *vcp;
     70 
     71 	if (evsp->evs_used >= evsp->evs_cnt)
     72 		vmcmdset_extend(evsp);
     73 	vcp = &evsp->evs_cmds[evsp->evs_used++];
     74 	vcp->ev_proc = proc;
     75 	vcp->ev_len = len;
     76 	vcp->ev_addr = addr;
     77 	if ((vcp->ev_vp = vp) != NULL)
     78 		vref(vp);
     79 	vcp->ev_offset = offset;
     80 	vcp->ev_prot = prot;
     81 }
     82 #endif /* DEBUG */
     83 
     84 void
     85 vmcmdset_extend(evsp)
     86 	struct	exec_vmcmd_set *evsp;
     87 {
     88 	struct exec_vmcmd *nvcp;
     89 	u_int ocnt;
     90 
     91 #ifdef DIAGNOSTIC
     92 	if (evsp->evs_used < evsp->evs_cnt)
     93 		panic("vmcmdset_extend: not necessary");
     94 #endif
     95 
     96 	/* figure out number of entries in new set */
     97 	ocnt = evsp->evs_cnt;
     98 	evsp->evs_cnt += ocnt ? ocnt : EXEC_DEFAULT_VMCMD_SETSIZE;
     99 
    100 	/* allocate it */
    101 	MALLOC(nvcp, struct exec_vmcmd *,
    102 	    (evsp->evs_cnt * sizeof(struct exec_vmcmd)), M_EXEC, M_WAITOK);
    103 
    104 	/* free the old struct, if there was one, and record the new one */
    105 	if (ocnt) {
    106 		memcpy(nvcp, evsp->evs_cmds, (ocnt * sizeof(struct exec_vmcmd)));
    107 		FREE(evsp->evs_cmds, M_EXEC);
    108 	}
    109 	evsp->evs_cmds = nvcp;
    110 }
    111 
    112 void
    113 kill_vmcmds(evsp)
    114 	struct	exec_vmcmd_set *evsp;
    115 {
    116 	struct exec_vmcmd *vcp;
    117 	int i;
    118 
    119 	if (evsp->evs_cnt == 0)
    120 		return;
    121 
    122 	for (i = 0; i < evsp->evs_used; i++) {
    123 		vcp = &evsp->evs_cmds[i];
    124 		if (vcp->ev_vp != NULLVP)
    125 			vrele(vcp->ev_vp);
    126 	}
    127 	evsp->evs_used = evsp->evs_cnt = 0;
    128 	FREE(evsp->evs_cmds, M_EXEC);
    129 }
    130 
    131 /*
    132  * vmcmd_map_pagedvn():
    133  *	handle vmcmd which specifies that a vnode should be mmap'd.
    134  *	appropriate for handling demand-paged text and data segments.
    135  */
    136 
    137 int
    138 vmcmd_map_pagedvn(p, cmd)
    139 	struct proc *p;
    140 	struct exec_vmcmd *cmd;
    141 {
    142 	/*
    143 	 * note that if you're going to map part of an process as being
    144 	 * paged from a vnode, that vnode had damn well better be marked as
    145 	 * VTEXT.  that's handled in the routine which sets up the vmcmd to
    146 	 * call this routine.
    147 	 */
    148         struct uvm_object *uobj;
    149 	int retval;
    150 
    151 	/*
    152 	 * map the vnode in using uvm_map.
    153 	 */
    154 
    155 	/* checks imported from uvm_mmap, needed? */
    156         if (cmd->ev_len == 0)
    157                 return(0);
    158         if (cmd->ev_offset & PAGE_MASK)
    159                 return(EINVAL);
    160 	if (cmd->ev_addr & PAGE_MASK)
    161 		return(EINVAL);
    162 	if (cmd->ev_len & PAGE_MASK)
    163 		return(EINVAL);
    164 
    165 	/*
    166 	 * first, attach to the object
    167 	 */
    168 
    169         uobj = uvn_attach((void *) cmd->ev_vp, VM_PROT_READ|VM_PROT_EXECUTE);
    170         if (uobj == NULL)
    171                 return(ENOMEM);
    172 
    173 	/*
    174 	 * do the map
    175 	 */
    176 
    177 	retval = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
    178 		uobj, cmd->ev_offset,
    179 		UVM_MAPFLAG(cmd->ev_prot, VM_PROT_ALL, UVM_INH_COPY,
    180 			UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
    181 
    182 	/*
    183 	 * check for error
    184 	 */
    185 
    186 	if (retval == KERN_SUCCESS)
    187 		return(0);
    188 
    189 	/*
    190 	 * error: detach from object
    191 	 */
    192 
    193 	uobj->pgops->pgo_detach(uobj);
    194 	return (EINVAL);
    195 }
    196 
    197 /*
    198  * vmcmd_map_readvn():
    199  *	handle vmcmd which specifies that a vnode should be read from.
    200  *	appropriate for non-demand-paged text/data segments, i.e. impure
    201  *	objects (a la OMAGIC and NMAGIC).
    202  */
    203 int
    204 vmcmd_map_readvn(p, cmd)
    205 	struct proc *p;
    206 	struct exec_vmcmd *cmd;
    207 {
    208 	int error;
    209 	long diff;
    210 
    211 	if (cmd->ev_len == 0)
    212 		return(KERN_SUCCESS); /* XXXCDC: should it happen? */
    213 
    214 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    215 	cmd->ev_addr -= diff;			/* required by uvm_map */
    216 	cmd->ev_offset -= diff;
    217 	cmd->ev_len += diff;
    218 
    219 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    220 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET,
    221 			UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
    222 			UVM_ADV_NORMAL,
    223 			UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
    224 
    225 	if (error)
    226 		return error;
    227 
    228 	return vmcmd_readvn(p, cmd);
    229 }
    230 
    231 int
    232 vmcmd_readvn(p, cmd)
    233 	struct proc *p;
    234 	struct exec_vmcmd *cmd;
    235 {
    236 	int error;
    237 
    238 	error = vn_rdwr(UIO_READ, cmd->ev_vp, (caddr_t)cmd->ev_addr,
    239 	    cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
    240 	    p->p_ucred, NULL, p);
    241 	if (error)
    242 		return error;
    243 
    244 	if (cmd->ev_prot != (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)) {
    245 		/*
    246 		 * we had to map in the area at PROT_ALL so that vn_rdwr()
    247 		 * could write to it.   however, the caller seems to want
    248 		 * it mapped read-only, so now we are going to have to call
    249 		 * uvm_map_protect() to fix up the protection.  ICK.
    250 		 */
    251 		return(uvm_map_protect(&p->p_vmspace->vm_map,
    252 				trunc_page(cmd->ev_addr),
    253 				round_page(cmd->ev_addr + cmd->ev_len),
    254 				cmd->ev_prot, FALSE));
    255 	} else {
    256 		return (KERN_SUCCESS);
    257 	}
    258 }
    259 
    260 /*
    261  * vmcmd_map_zero():
    262  *	handle vmcmd which specifies a zero-filled address space region.  The
    263  *	address range must be first allocated, then protected appropriately.
    264  */
    265 
    266 int
    267 vmcmd_map_zero(p, cmd)
    268 	struct proc *p;
    269 	struct exec_vmcmd *cmd;
    270 {
    271 	int error;
    272 	long diff;
    273 
    274 	if (cmd->ev_len == 0)
    275 		return(KERN_SUCCESS); /* XXXCDC: should it happen? */
    276 
    277 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    278 	cmd->ev_addr -= diff;			/* required by uvm_map */
    279 	cmd->ev_len += diff;
    280 
    281 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    282 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET,
    283 			UVM_MAPFLAG(cmd->ev_prot, UVM_PROT_ALL, UVM_INH_COPY,
    284 			UVM_ADV_NORMAL,
    285 			UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
    286 
    287 	if (error)
    288 		return error;
    289 	return (KERN_SUCCESS);
    290 }
    291