Home | History | Annotate | Line # | Download | only in kern
exec_subr.c revision 1.22
      1 /*	$NetBSD: exec_subr.c,v 1.22 2000/08/01 04:57:29 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *      This product includes software developed by Christopher G. Demetriou.
     18  * 4. The name of the author may not be used to endorse or promote products
     19  *    derived from this software without specific prior written permission
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/param.h>
     34 #include <sys/systm.h>
     35 #include <sys/proc.h>
     36 #include <sys/malloc.h>
     37 #include <sys/vnode.h>
     38 #include <sys/filedesc.h>
     39 #include <sys/exec.h>
     40 #include <sys/mman.h>
     41 
     42 #include <uvm/uvm.h>
     43 
     44 /*
     45  * XXX cgd 960926: this module should collect simple statistics
     46  * (calls, extends, kills).
     47  */
     48 
     49 #ifdef DEBUG
     50 /*
     51  * new_vmcmd():
     52  *	create a new vmcmd structure and fill in its fields based
     53  *	on function call arguments.  make sure objects ref'd by
     54  *	the vmcmd are 'held'.
     55  *
     56  * If not debugging, this is a macro, so it's expanded inline.
     57  */
     58 
     59 void
     60 new_vmcmd(struct exec_vmcmd_set *evsp,
     61     int (*proc)(struct proc * p, struct exec_vmcmd *),
     62     u_long len, u_long addr, struct vnode *vp, u_long offset,
     63     u_int prot, int flags)
     64 {
     65 	struct exec_vmcmd    *vcp;
     66 
     67 	if (evsp->evs_used >= evsp->evs_cnt)
     68 		vmcmdset_extend(evsp);
     69 	vcp = &evsp->evs_cmds[evsp->evs_used++];
     70 	vcp->ev_proc = proc;
     71 	vcp->ev_len = len;
     72 	vcp->ev_addr = addr;
     73 	if ((vcp->ev_vp = vp) != NULL)
     74 		vref(vp);
     75 	vcp->ev_offset = offset;
     76 	vcp->ev_prot = prot;
     77         vcp->ev_flags = flags;
     78 }
     79 #endif /* DEBUG */
     80 
     81 void
     82 vmcmdset_extend(struct exec_vmcmd_set *evsp)
     83 {
     84 	struct exec_vmcmd *nvcp;
     85 	u_int ocnt;
     86 
     87 #ifdef DIAGNOSTIC
     88 	if (evsp->evs_used < evsp->evs_cnt)
     89 		panic("vmcmdset_extend: not necessary");
     90 #endif
     91 
     92 	/* figure out number of entries in new set */
     93 	ocnt = evsp->evs_cnt;
     94 	evsp->evs_cnt += ocnt ? ocnt : EXEC_DEFAULT_VMCMD_SETSIZE;
     95 
     96 	/* allocate it */
     97 	MALLOC(nvcp, struct exec_vmcmd *,
     98 	    (evsp->evs_cnt * sizeof(struct exec_vmcmd)), M_EXEC, M_WAITOK);
     99 
    100 	/* free the old struct, if there was one, and record the new one */
    101 	if (ocnt) {
    102 		memcpy(nvcp, evsp->evs_cmds, (ocnt * sizeof(struct exec_vmcmd)));
    103 		FREE(evsp->evs_cmds, M_EXEC);
    104 	}
    105 	evsp->evs_cmds = nvcp;
    106 }
    107 
    108 void
    109 kill_vmcmds(struct exec_vmcmd_set *evsp)
    110 {
    111 	struct exec_vmcmd *vcp;
    112 	int i;
    113 
    114 	if (evsp->evs_cnt == 0)
    115 		return;
    116 
    117 	for (i = 0; i < evsp->evs_used; i++) {
    118 		vcp = &evsp->evs_cmds[i];
    119 		if (vcp->ev_vp != NULLVP)
    120 			vrele(vcp->ev_vp);
    121 	}
    122 	evsp->evs_used = evsp->evs_cnt = 0;
    123 	FREE(evsp->evs_cmds, M_EXEC);
    124 }
    125 
    126 /*
    127  * vmcmd_map_pagedvn():
    128  *	handle vmcmd which specifies that a vnode should be mmap'd.
    129  *	appropriate for handling demand-paged text and data segments.
    130  */
    131 
    132 int
    133 vmcmd_map_pagedvn(struct proc *p, struct exec_vmcmd *cmd)
    134 {
    135 	/*
    136 	 * note that if you're going to map part of an process as being
    137 	 * paged from a vnode, that vnode had damn well better be marked as
    138 	 * VTEXT.  that's handled in the routine which sets up the vmcmd to
    139 	 * call this routine.
    140 	 */
    141         struct uvm_object *uobj;
    142 	int retval;
    143 
    144 	/*
    145 	 * map the vnode in using uvm_map.
    146 	 */
    147 
    148 	/* checks imported from uvm_mmap, needed? */
    149         if (cmd->ev_len == 0)
    150                 return(0);
    151         if (cmd->ev_offset & PAGE_MASK)
    152                 return(EINVAL);
    153 	if (cmd->ev_addr & PAGE_MASK)
    154 		return(EINVAL);
    155 	if (cmd->ev_len & PAGE_MASK)
    156 		return(EINVAL);
    157 
    158 	/*
    159 	 * first, attach to the object
    160 	 */
    161 
    162         uobj = uvn_attach((void *) cmd->ev_vp, VM_PROT_READ|VM_PROT_EXECUTE);
    163         if (uobj == NULL)
    164                 return(ENOMEM);
    165 
    166 	/*
    167 	 * do the map
    168 	 */
    169 
    170 	retval = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
    171 		uobj, cmd->ev_offset,
    172 		UVM_MAPFLAG(cmd->ev_prot, VM_PROT_ALL, UVM_INH_COPY,
    173 			UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
    174 
    175 	/*
    176 	 * check for error
    177 	 */
    178 
    179 	if (retval == KERN_SUCCESS)
    180 		return(0);
    181 
    182 	/*
    183 	 * error: detach from object
    184 	 */
    185 
    186 	uobj->pgops->pgo_detach(uobj);
    187 	return (EINVAL);
    188 }
    189 
    190 /*
    191  * vmcmd_map_readvn():
    192  *	handle vmcmd which specifies that a vnode should be read from.
    193  *	appropriate for non-demand-paged text/data segments, i.e. impure
    194  *	objects (a la OMAGIC and NMAGIC).
    195  */
    196 int
    197 vmcmd_map_readvn(struct proc *p, struct exec_vmcmd *cmd)
    198 {
    199 	int error;
    200 	long diff;
    201 
    202 	if (cmd->ev_len == 0)
    203 		return(KERN_SUCCESS); /* XXXCDC: should it happen? */
    204 
    205 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    206 	cmd->ev_addr -= diff;			/* required by uvm_map */
    207 	cmd->ev_offset -= diff;
    208 	cmd->ev_len += diff;
    209 
    210 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    211 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET,
    212 			UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
    213 			UVM_ADV_NORMAL,
    214 			UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
    215 
    216 	if (error)
    217 		return error;
    218 
    219 	return vmcmd_readvn(p, cmd);
    220 }
    221 
    222 int
    223 vmcmd_readvn(struct proc *p, struct exec_vmcmd *cmd)
    224 {
    225 	int error;
    226 
    227 	error = vn_rdwr(UIO_READ, cmd->ev_vp, (caddr_t)cmd->ev_addr,
    228 	    cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
    229 	    p->p_ucred, NULL, p);
    230 	if (error)
    231 		return error;
    232 
    233 	if (cmd->ev_prot != (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)) {
    234 		/*
    235 		 * we had to map in the area at PROT_ALL so that vn_rdwr()
    236 		 * could write to it.   however, the caller seems to want
    237 		 * it mapped read-only, so now we are going to have to call
    238 		 * uvm_map_protect() to fix up the protection.  ICK.
    239 		 */
    240 		return(uvm_map_protect(&p->p_vmspace->vm_map,
    241 				trunc_page(cmd->ev_addr),
    242 				round_page(cmd->ev_addr + cmd->ev_len),
    243 				cmd->ev_prot, FALSE));
    244 	} else {
    245 		return (KERN_SUCCESS);
    246 	}
    247 }
    248 
    249 /*
    250  * vmcmd_map_zero():
    251  *	handle vmcmd which specifies a zero-filled address space region.  The
    252  *	address range must be first allocated, then protected appropriately.
    253  */
    254 
    255 int
    256 vmcmd_map_zero(struct proc *p, struct exec_vmcmd *cmd)
    257 {
    258 	int error;
    259 	long diff;
    260 
    261 	if (cmd->ev_len == 0)
    262 		return(KERN_SUCCESS); /* XXXCDC: should it happen? */
    263 
    264 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    265 	cmd->ev_addr -= diff;			/* required by uvm_map */
    266 	cmd->ev_len += diff;
    267 
    268 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    269 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET,
    270 			UVM_MAPFLAG(cmd->ev_prot, UVM_PROT_ALL, UVM_INH_COPY,
    271 			UVM_ADV_NORMAL,
    272 			UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
    273 
    274 	if (error)
    275 		return error;
    276 	return (KERN_SUCCESS);
    277 }
    278