Home | History | Annotate | Line # | Download | only in kern
exec_subr.c revision 1.17
      1  1.17       ws /*	$NetBSD: exec_subr.c,v 1.17 1999/07/07 20:23:45 ws Exp $	*/
      2   1.8      cgd 
      3   1.1      cgd /*
      4  1.10      cgd  * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
      5   1.1      cgd  * All rights reserved.
      6   1.1      cgd  *
      7   1.1      cgd  * Redistribution and use in source and binary forms, with or without
      8   1.1      cgd  * modification, are permitted provided that the following conditions
      9   1.1      cgd  * are met:
     10   1.1      cgd  * 1. Redistributions of source code must retain the above copyright
     11   1.1      cgd  *    notice, this list of conditions and the following disclaimer.
     12   1.1      cgd  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.1      cgd  *    notice, this list of conditions and the following disclaimer in the
     14   1.1      cgd  *    documentation and/or other materials provided with the distribution.
     15   1.1      cgd  * 3. All advertising materials mentioning features or use of this software
     16   1.1      cgd  *    must display the following acknowledgement:
     17   1.1      cgd  *      This product includes software developed by Christopher G. Demetriou.
     18   1.1      cgd  * 4. The name of the author may not be used to endorse or promote products
     19   1.5      jtc  *    derived from this software without specific prior written permission
     20   1.1      cgd  *
     21   1.1      cgd  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22   1.1      cgd  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     23   1.1      cgd  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     24   1.1      cgd  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     25   1.1      cgd  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     26   1.1      cgd  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     27   1.1      cgd  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28   1.1      cgd  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     29   1.1      cgd  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     30   1.1      cgd  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     31   1.1      cgd  */
     32  1.12      mrg 
     33   1.1      cgd #include <sys/param.h>
     34   1.1      cgd #include <sys/systm.h>
     35   1.1      cgd #include <sys/proc.h>
     36   1.1      cgd #include <sys/malloc.h>
     37   1.1      cgd #include <sys/vnode.h>
     38   1.4      cgd #include <sys/filedesc.h>
     39   1.1      cgd #include <sys/exec.h>
     40   1.1      cgd #include <sys/mman.h>
     41   1.1      cgd 
     42   1.1      cgd #include <vm/vm.h>
     43   1.1      cgd 
     44  1.11      mrg #include <uvm/uvm.h>
     45  1.11      mrg 
     46  1.10      cgd /*
     47  1.10      cgd  * XXX cgd 960926: this module should collect simple statistics
     48  1.10      cgd  * (calls, extends, kills).
     49  1.10      cgd  */
     50  1.10      cgd 
     51   1.3      cgd #ifdef DEBUG
     52   1.1      cgd /*
     53   1.1      cgd  * new_vmcmd():
     54   1.1      cgd  *	create a new vmcmd structure and fill in its fields based
     55   1.1      cgd  *	on function call arguments.  make sure objects ref'd by
     56   1.1      cgd  *	the vmcmd are 'held'.
     57   1.1      cgd  *
     58   1.1      cgd  * If not debugging, this is a macro, so it's expanded inline.
     59   1.1      cgd  */
     60   1.1      cgd 
     61   1.1      cgd void
     62   1.1      cgd new_vmcmd(evsp, proc, len, addr, vp, offset, prot)
     63   1.1      cgd 	struct	exec_vmcmd_set *evsp;
     64   1.1      cgd 	int	(*proc) __P((struct proc * p, struct exec_vmcmd *));
     65   1.1      cgd 	u_long	len;
     66   1.1      cgd 	u_long	addr;
     67   1.1      cgd 	struct	vnode *vp;
     68   1.1      cgd 	u_long	offset;
     69   1.1      cgd 	u_int	prot;
     70   1.1      cgd {
     71   1.1      cgd 	struct exec_vmcmd    *vcp;
     72   1.1      cgd 
     73   1.1      cgd 	if (evsp->evs_used >= evsp->evs_cnt)
     74   1.1      cgd 		vmcmdset_extend(evsp);
     75   1.1      cgd 	vcp = &evsp->evs_cmds[evsp->evs_used++];
     76   1.1      cgd 	vcp->ev_proc = proc;
     77   1.1      cgd 	vcp->ev_len = len;
     78   1.1      cgd 	vcp->ev_addr = addr;
     79   1.1      cgd 	if ((vcp->ev_vp = vp) != NULL)
     80   1.1      cgd 		vref(vp);
     81   1.1      cgd 	vcp->ev_offset = offset;
     82   1.1      cgd 	vcp->ev_prot = prot;
     83   1.1      cgd }
     84   1.3      cgd #endif /* DEBUG */
     85   1.1      cgd 
     86   1.1      cgd void
     87   1.1      cgd vmcmdset_extend(evsp)
     88   1.1      cgd 	struct	exec_vmcmd_set *evsp;
     89   1.1      cgd {
     90   1.1      cgd 	struct exec_vmcmd *nvcp;
     91   1.1      cgd 	u_int ocnt;
     92   1.1      cgd 
     93   1.1      cgd #ifdef DIAGNOSTIC
     94   1.1      cgd 	if (evsp->evs_used < evsp->evs_cnt)
     95   1.1      cgd 		panic("vmcmdset_extend: not necessary");
     96   1.1      cgd #endif
     97   1.1      cgd 
     98   1.1      cgd 	/* figure out number of entries in new set */
     99   1.1      cgd 	ocnt = evsp->evs_cnt;
    100   1.1      cgd 	evsp->evs_cnt += ocnt ? ocnt : EXEC_DEFAULT_VMCMD_SETSIZE;
    101   1.1      cgd 
    102   1.1      cgd 	/* allocate it */
    103   1.1      cgd 	MALLOC(nvcp, struct exec_vmcmd *,
    104   1.1      cgd 	    (evsp->evs_cnt * sizeof(struct exec_vmcmd)), M_EXEC, M_WAITOK);
    105   1.1      cgd 
    106   1.1      cgd 	/* free the old struct, if there was one, and record the new one */
    107   1.1      cgd 	if (ocnt) {
    108  1.15    perry 		memcpy(nvcp, evsp->evs_cmds, (ocnt * sizeof(struct exec_vmcmd)));
    109   1.1      cgd 		FREE(evsp->evs_cmds, M_EXEC);
    110   1.1      cgd 	}
    111   1.1      cgd 	evsp->evs_cmds = nvcp;
    112   1.1      cgd }
    113   1.1      cgd 
    114   1.1      cgd void
    115   1.1      cgd kill_vmcmds(evsp)
    116   1.1      cgd 	struct	exec_vmcmd_set *evsp;
    117   1.1      cgd {
    118   1.1      cgd 	struct exec_vmcmd *vcp;
    119   1.1      cgd 	int i;
    120   1.1      cgd 
    121   1.1      cgd 	if (evsp->evs_cnt == 0)
    122   1.1      cgd 		return;
    123   1.1      cgd 
    124   1.1      cgd 	for (i = 0; i < evsp->evs_used; i++) {
    125   1.1      cgd 		vcp = &evsp->evs_cmds[i];
    126   1.1      cgd 		if (vcp->ev_vp != NULLVP)
    127   1.1      cgd 			vrele(vcp->ev_vp);
    128   1.1      cgd 	}
    129   1.1      cgd 	evsp->evs_used = evsp->evs_cnt = 0;
    130   1.1      cgd 	FREE(evsp->evs_cmds, M_EXEC);
    131   1.1      cgd }
    132   1.1      cgd 
    133   1.1      cgd /*
    134   1.1      cgd  * vmcmd_map_pagedvn():
    135   1.1      cgd  *	handle vmcmd which specifies that a vnode should be mmap'd.
    136   1.1      cgd  *	appropriate for handling demand-paged text and data segments.
    137   1.1      cgd  */
    138   1.1      cgd 
    139   1.1      cgd int
    140   1.1      cgd vmcmd_map_pagedvn(p, cmd)
    141   1.1      cgd 	struct proc *p;
    142   1.1      cgd 	struct exec_vmcmd *cmd;
    143   1.1      cgd {
    144   1.1      cgd 	/*
    145   1.1      cgd 	 * note that if you're going to map part of an process as being
    146   1.1      cgd 	 * paged from a vnode, that vnode had damn well better be marked as
    147   1.1      cgd 	 * VTEXT.  that's handled in the routine which sets up the vmcmd to
    148   1.1      cgd 	 * call this routine.
    149   1.1      cgd 	 */
    150  1.11      mrg         struct uvm_object *uobj;
    151  1.11      mrg 	int retval;
    152  1.11      mrg 
    153  1.11      mrg 	/*
    154  1.11      mrg 	 * map the vnode in using uvm_map.
    155  1.11      mrg 	 */
    156  1.11      mrg 
    157  1.11      mrg 	/* checks imported from uvm_mmap, needed? */
    158  1.11      mrg         if (cmd->ev_len == 0)
    159  1.11      mrg                 return(0);
    160  1.11      mrg         if (cmd->ev_offset & PAGE_MASK)
    161  1.11      mrg                 return(EINVAL);
    162  1.11      mrg 	if (cmd->ev_addr & PAGE_MASK)
    163  1.11      mrg 		return(EINVAL);
    164  1.11      mrg 
    165  1.11      mrg 	/*
    166  1.11      mrg 	 * first, attach to the object
    167  1.11      mrg 	 */
    168  1.11      mrg 
    169  1.11      mrg         uobj = uvn_attach((void *) cmd->ev_vp, VM_PROT_READ|VM_PROT_EXECUTE);
    170  1.11      mrg         if (uobj == NULL)
    171  1.11      mrg                 return(ENOMEM);
    172  1.11      mrg 
    173  1.11      mrg 	/*
    174  1.11      mrg 	 * do the map
    175  1.11      mrg 	 */
    176  1.11      mrg 
    177  1.11      mrg 	retval = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
    178  1.11      mrg 		uobj, cmd->ev_offset,
    179  1.11      mrg 		UVM_MAPFLAG(cmd->ev_prot, VM_PROT_ALL, UVM_INH_COPY,
    180  1.11      mrg 			UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
    181  1.11      mrg 
    182  1.11      mrg 	/*
    183  1.11      mrg 	 * check for error
    184  1.11      mrg 	 */
    185  1.11      mrg 
    186  1.11      mrg 	if (retval == KERN_SUCCESS)
    187  1.11      mrg 		return(0);
    188  1.11      mrg 
    189  1.11      mrg 	/*
    190  1.11      mrg 	 * error: detach from object
    191  1.11      mrg 	 */
    192  1.11      mrg 
    193  1.11      mrg 	uobj->pgops->pgo_detach(uobj);
    194  1.16      mrg 	return (EINVAL);
    195   1.1      cgd }
    196   1.1      cgd 
    197   1.1      cgd /*
    198   1.1      cgd  * vmcmd_map_readvn():
    199   1.1      cgd  *	handle vmcmd which specifies that a vnode should be read from.
    200   1.1      cgd  *	appropriate for non-demand-paged text/data segments, i.e. impure
    201   1.1      cgd  *	objects (a la OMAGIC and NMAGIC).
    202   1.1      cgd  */
    203   1.1      cgd int
    204   1.1      cgd vmcmd_map_readvn(p, cmd)
    205   1.1      cgd 	struct proc *p;
    206   1.1      cgd 	struct exec_vmcmd *cmd;
    207   1.1      cgd {
    208   1.1      cgd 	int error;
    209  1.17       ws 	long diff;
    210   1.1      cgd 
    211  1.11      mrg 	if (cmd->ev_len == 0)
    212  1.11      mrg 		return(KERN_SUCCESS); /* XXXCDC: should it happen? */
    213  1.11      mrg 
    214  1.17       ws 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    215  1.17       ws 	cmd->ev_addr -= diff;			/* required by uvm_map */
    216  1.17       ws 	cmd->ev_offset -= diff;
    217  1.17       ws 	cmd->ev_len += diff;
    218  1.17       ws 
    219  1.11      mrg 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    220  1.11      mrg 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET,
    221  1.13    chuck 			UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
    222  1.11      mrg 			UVM_ADV_NORMAL,
    223  1.11      mrg 			UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
    224  1.11      mrg 
    225   1.1      cgd 	if (error)
    226   1.1      cgd 		return error;
    227   1.1      cgd 
    228   1.1      cgd 	error = vn_rdwr(UIO_READ, cmd->ev_vp, (caddr_t)cmd->ev_addr,
    229  1.10      cgd 	    cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT,
    230  1.14  thorpej 	    p->p_ucred, NULL, p);
    231   1.1      cgd 	if (error)
    232   1.1      cgd 		return error;
    233   1.1      cgd 
    234  1.13    chuck 	if (cmd->ev_prot != (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)) {
    235  1.13    chuck 		/*
    236  1.13    chuck 		 * we had to map in the area at PROT_ALL so that vn_rdwr()
    237  1.13    chuck 		 * could write to it.   however, the caller seems to want
    238  1.13    chuck 		 * it mapped read-only, so now we are going to have to call
    239  1.13    chuck 		 * uvm_map_protect() to fix up the protection.  ICK.
    240  1.13    chuck 		 */
    241  1.13    chuck 		return(uvm_map_protect(&p->p_vmspace->vm_map,
    242  1.13    chuck 				trunc_page(cmd->ev_addr),
    243  1.13    chuck 				round_page(cmd->ev_addr + cmd->ev_len),
    244  1.13    chuck 				cmd->ev_prot, FALSE));
    245  1.13    chuck 	} else {
    246  1.16      mrg 		return (KERN_SUCCESS);
    247  1.13    chuck 	}
    248   1.1      cgd }
    249   1.1      cgd 
    250   1.1      cgd /*
    251   1.1      cgd  * vmcmd_map_zero():
    252   1.1      cgd  *	handle vmcmd which specifies a zero-filled address space region.  The
    253   1.1      cgd  *	address range must be first allocated, then protected appropriately.
    254   1.1      cgd  */
    255   1.1      cgd 
    256   1.1      cgd int
    257   1.1      cgd vmcmd_map_zero(p, cmd)
    258   1.1      cgd 	struct proc *p;
    259   1.1      cgd 	struct exec_vmcmd *cmd;
    260   1.1      cgd {
    261   1.1      cgd 	int error;
    262  1.17       ws 	long diff;
    263   1.1      cgd 
    264  1.11      mrg 	if (cmd->ev_len == 0)
    265  1.11      mrg 		return(KERN_SUCCESS); /* XXXCDC: should it happen? */
    266  1.11      mrg 
    267  1.17       ws 	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
    268  1.17       ws 	cmd->ev_addr -= diff;			/* required by uvm_map */
    269  1.17       ws 	cmd->ev_len += diff;
    270  1.17       ws 
    271  1.11      mrg 	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
    272  1.11      mrg 			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET,
    273  1.11      mrg 			UVM_MAPFLAG(cmd->ev_prot, UVM_PROT_ALL, UVM_INH_COPY,
    274  1.11      mrg 			UVM_ADV_NORMAL,
    275  1.11      mrg 			UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
    276  1.11      mrg 
    277   1.1      cgd 	if (error)
    278   1.1      cgd 		return error;
    279  1.16      mrg 	return (KERN_SUCCESS);
    280   1.1      cgd }
    281