Home | History | Annotate | Line # | Download | only in kern
exec_subr.c revision 1.34
      1 /*	$NetBSD: exec_subr.c,v 1.34 2003/02/20 22:16:07 atatat Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *      This product includes software developed by Christopher G. Demetriou.
     18  * 4. The name of the author may not be used to endorse or promote products
     19  *    derived from this software without specific prior written permission
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: exec_subr.c,v 1.34 2003/02/20 22:16:07 atatat Exp $");
     35 
     36 #include <sys/param.h>
     37 #include <sys/systm.h>
     38 #include <sys/proc.h>
     39 #include <sys/malloc.h>
     40 #include <sys/vnode.h>
     41 #include <sys/filedesc.h>
     42 #include <sys/exec.h>
     43 #include <sys/mman.h>
     44 
     45 #include <uvm/uvm.h>
     46 
     47 /*
     48  * XXX cgd 960926: this module should collect simple statistics
     49  * (calls, extends, kills).
     50  */
     51 
     52 #ifdef DEBUG
     53 /*
     54  * new_vmcmd():
     55  *	create a new vmcmd structure and fill in its fields based
     56  *	on function call arguments.  make sure objects ref'd by
     57  *	the vmcmd are 'held'.
     58  *
     59  * If not debugging, this is a macro, so it's expanded inline.
     60  */
     61 
     62 void
     63 new_vmcmd(struct exec_vmcmd_set *evsp,
     64     int (*proc)(struct proc * p, struct exec_vmcmd *),
     65     u_long len, u_long addr, struct vnode *vp, u_long offset,
     66     u_int prot, int flags)
     67 {
     68 	struct exec_vmcmd    *vcp;
     69 
     70 	if (evsp->evs_used >= evsp->evs_cnt)
     71 		vmcmdset_extend(evsp);
     72 	vcp = &evsp->evs_cmds[evsp->evs_used++];
     73 	vcp->ev_proc = proc;
     74 	vcp->ev_len = len;
     75 	vcp->ev_addr = addr;
     76 	if ((vcp->ev_vp = vp) != NULL)
     77 		vref(vp);
     78 	vcp->ev_offset = offset;
     79 	vcp->ev_prot = prot;
     80 	vcp->ev_flags = flags;
     81 	if ((flags & (VMCMD_TOPDOWN|VMCMD_RELATIVE)) ==
     82 	    (VMCMD_TOPDOWN|VMCMD_RELATIVE)) {
     83 		int i = evsp->evs_used - 2;
     84 		while (i >= 0) {
     85 			vcp = &evsp->evs_cmds[i--];
     86 			if (vcp->ev_flags & VMCMD_BASE) {
     87 				if ((vcp->ev_flags &
     88 				    (VMCMD_TOPDOWN|VMCMD_FIXED)) ==
     89 				    (VMCMD_TOPDOWN))
     90 					vcp->ev_addr -= round_page(len);
     91 				break;
     92 			}
     93 		}
     94 	}
     95 }
     96 #endif /* DEBUG */
     97 
     98 void
     99 vmcmdset_extend(struct exec_vmcmd_set *evsp)
    100 {
    101 	struct exec_vmcmd *nvcp;
    102 	u_int ocnt;
    103 
    104 #ifdef DIAGNOSTIC
    105 	if (evsp->evs_used < evsp->evs_cnt)
    106 		panic("vmcmdset_extend: not necessary");
    107 #endif
    108 
    109 	/* figure out number of entries in new set */
    110 	ocnt = evsp->evs_cnt;
    111 	evsp->evs_cnt += ocnt ? ocnt : EXEC_DEFAULT_VMCMD_SETSIZE;
    112 
    113 	/* allocate it */
    114 	nvcp = malloc(evsp->evs_cnt * sizeof(struct exec_vmcmd),
    115 	    M_EXEC, M_WAITOK);
    116 
    117 	/* free the old struct, if there was one, and record the new one */
    118 	if (ocnt) {
    119 		memcpy(nvcp, evsp->evs_cmds,
    120 		    (ocnt * sizeof(struct exec_vmcmd)));
    121 		free(evsp->evs_cmds, M_EXEC);
    122 	}
    123 	evsp->evs_cmds = nvcp;
    124 }
    125 
    126 void
    127 kill_vmcmds(struct exec_vmcmd_set *evsp)
    128 {
    129 	struct exec_vmcmd *vcp;
    130 	u_int i;
    131 
    132 	if (evsp->evs_cnt == 0)
    133 		return;
    134 
    135 	for (i = 0; i < evsp->evs_used; i++) {
    136 		vcp = &evsp->evs_cmds[i];
    137 		if (vcp->ev_vp != NULLVP)
    138 			vrele(vcp->ev_vp);
    139 	}
    140 	evsp->evs_used = evsp->evs_cnt = 0;
    141 	free(evsp->evs_cmds, M_EXEC);
    142 }
    143 
    144 /*
    145  * vmcmd_map_pagedvn():
    146  *	handle vmcmd which specifies that a vnode should be mmap'd.
    147  *	appropriate for handling demand-paged text and data segments.
    148  */
    149 
    150 int
    151 vmcmd_map_pagedvn(struct proc *p, struct exec_vmcmd *cmd)
    152 {
    153 	struct uvm_object *uobj;
    154 	int error;
    155 
    156 	KASSERT(cmd->ev_vp->v_flag & VTEXT);
    157 
    158 	/*
    159 	 * map the vnode in using uvm_map.
    160 	 */
    161 
    162         if (cmd->ev_len == 0)
    163                 return(0);
    164         if (cmd->ev_offset & PAGE_MASK)
    165                 return(EINVAL);
    166 	if (cmd->ev_addr & PAGE_MASK)
    167 		return(EINVAL);
    168 	if (cmd->ev_len & PAGE_MASK)
    169 		return(EINVAL);
    170 
    171 	/*
    172 	 * first, attach to the object
    173 	 */
    174 
    175         uobj = uvn_attach(cmd->ev_vp, VM_PROT_READ|VM_PROT_EXECUTE);
    176         if (uobj == NULL)
    177                 return(ENOMEM);
    178 	VREF(cmd->ev_vp);
    179 
    180 	/*
    181 	 * do the map
    182 	 */
    183 
    184 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
    185 		uobj, cmd->ev_offset, 0,
    186 		UVM_MAPFLAG(cmd->ev_prot, VM_PROT_ALL, UVM_INH_COPY,
    187 			UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
    188 	if (error) {
    189 		uobj->pgops->pgo_detach(uobj);
    190 	}
    191 	return error;
    192 }
    193 
    194 /*
    195  * vmcmd_map_readvn():
    196  *	handle vmcmd which specifies that a vnode should be read from.
    197  *	appropriate for non-demand-paged text/data segments, i.e. impure
    198  *	objects (a la OMAGIC and NMAGIC).
    199  */
    200 int
    201 vmcmd_map_readvn(struct proc *p, struct exec_vmcmd *cmd)
    202 {
    203 	int error;
    204 	long diff;
    205 
    206 	if (cmd->ev_len == 0)
    207 		return 0;
    208 
    209 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    210 	cmd->ev_addr -= diff;			/* required by uvm_map */
    211 	cmd->ev_offset -= diff;
    212 	cmd->ev_len += diff;
    213 
    214 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    215 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
    216 			UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
    217 			UVM_ADV_NORMAL,
    218 			UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
    219 
    220 	if (error)
    221 		return error;
    222 
    223 	return vmcmd_readvn(p, cmd);
    224 }
    225 
    226 int
    227 vmcmd_readvn(struct proc *p, struct exec_vmcmd *cmd)
    228 {
    229 	int error;
    230 
    231 	error = vn_rdwr(UIO_READ, cmd->ev_vp, (caddr_t)cmd->ev_addr,
    232 	    cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
    233 	    p->p_ucred, NULL, p);
    234 	if (error)
    235 		return error;
    236 
    237 #ifdef PMAP_NEED_PROCWR
    238 	/*
    239 	 * we had to write the process, make sure the pages are synched
    240 	 * with the instruction cache.
    241 	 */
    242 	if (cmd->ev_prot & VM_PROT_EXECUTE)
    243 		pmap_procwr(p, cmd->ev_addr, cmd->ev_len);
    244 #endif
    245 
    246 	if (cmd->ev_prot != (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)) {
    247 
    248 		/*
    249 		 * we had to map in the area at PROT_ALL so that vn_rdwr()
    250 		 * could write to it.   however, the caller seems to want
    251 		 * it mapped read-only, so now we are going to have to call
    252 		 * uvm_map_protect() to fix up the protection.  ICK.
    253 		 */
    254 
    255 		return uvm_map_protect(&p->p_vmspace->vm_map,
    256 				trunc_page(cmd->ev_addr),
    257 				round_page(cmd->ev_addr + cmd->ev_len),
    258 				cmd->ev_prot, FALSE);
    259 	}
    260 	return 0;
    261 }
    262 
    263 /*
    264  * vmcmd_map_zero():
    265  *	handle vmcmd which specifies a zero-filled address space region.  The
    266  *	address range must be first allocated, then protected appropriately.
    267  */
    268 
    269 int
    270 vmcmd_map_zero(struct proc *p, struct exec_vmcmd *cmd)
    271 {
    272 	int error;
    273 	long diff;
    274 
    275 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    276 	cmd->ev_addr -= diff;			/* required by uvm_map */
    277 	cmd->ev_len += diff;
    278 
    279 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    280 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
    281 			UVM_MAPFLAG(cmd->ev_prot, UVM_PROT_ALL, UVM_INH_COPY,
    282 			UVM_ADV_NORMAL,
    283 			UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
    284 	return error;
    285 }
    286 
    287 /*
    288  * exec_read_from():
    289  *
    290  *	Read from vnode into buffer at offset.
    291  */
    292 int
    293 exec_read_from(struct proc *p, struct vnode *vp, u_long off, void *buf,
    294     size_t size)
    295 {
    296 	int error;
    297 	size_t resid;
    298 
    299 	if ((error = vn_rdwr(UIO_READ, vp, buf, size, off, UIO_SYSSPACE,
    300 	    0, p->p_ucred, &resid, p)) != 0)
    301 		return error;
    302 	/*
    303 	 * See if we got all of it
    304 	 */
    305 	if (resid != 0)
    306 		return ENOEXEC;
    307 	return 0;
    308 }
    309 
    310