Home | History | Annotate | Line # | Download | only in kern
exec_subr.c revision 1.4
      1  1.1  cgd /*
      2  1.4  cgd  * Copyright (c) 1993, 1994 Christopher G. Demetriou
      3  1.1  cgd  * All rights reserved.
      4  1.1  cgd  *
      5  1.1  cgd  * Redistribution and use in source and binary forms, with or without
      6  1.1  cgd  * modification, are permitted provided that the following conditions
      7  1.1  cgd  * are met:
      8  1.1  cgd  * 1. Redistributions of source code must retain the above copyright
      9  1.1  cgd  *    notice, this list of conditions and the following disclaimer.
     10  1.1  cgd  * 2. Redistributions in binary form must reproduce the above copyright
     11  1.1  cgd  *    notice, this list of conditions and the following disclaimer in the
     12  1.1  cgd  *    documentation and/or other materials provided with the distribution.
     13  1.1  cgd  * 3. All advertising materials mentioning features or use of this software
     14  1.1  cgd  *    must display the following acknowledgement:
     15  1.1  cgd  *      This product includes software developed by Christopher G. Demetriou.
     16  1.1  cgd  * 4. The name of the author may not be used to endorse or promote products
     17  1.1  cgd  *    derived from this software withough specific prior written permission
     18  1.1  cgd  *
     19  1.1  cgd  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     20  1.1  cgd  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     21  1.1  cgd  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     22  1.1  cgd  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     23  1.1  cgd  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     24  1.1  cgd  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25  1.1  cgd  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26  1.1  cgd  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27  1.1  cgd  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     28  1.1  cgd  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29  1.1  cgd  *
     30  1.4  cgd  *	$Id: exec_subr.c,v 1.4 1994/01/16 03:07:33 cgd Exp $
     31  1.1  cgd  */
     32  1.1  cgd 
     33  1.1  cgd #include <sys/param.h>
     34  1.1  cgd #include <sys/systm.h>
     35  1.1  cgd #include <sys/proc.h>
     36  1.1  cgd #include <sys/malloc.h>
     37  1.1  cgd #include <sys/vnode.h>
     38  1.4  cgd #include <sys/filedesc.h>
     39  1.1  cgd #include <sys/exec.h>
     40  1.1  cgd #include <sys/mman.h>
     41  1.1  cgd 
     42  1.1  cgd #include <vm/vm.h>
     43  1.1  cgd #include <vm/vm_user.h>
     44  1.1  cgd 
     45  1.3  cgd #ifdef DEBUG
     46  1.1  cgd /*
     47  1.1  cgd  * new_vmcmd():
     48  1.1  cgd  *	create a new vmcmd structure and fill in its fields based
     49  1.1  cgd  *	on function call arguments.  make sure objects ref'd by
     50  1.1  cgd  *	the vmcmd are 'held'.
     51  1.1  cgd  *
     52  1.1  cgd  * If not debugging, this is a macro, so it's expanded inline.
     53  1.1  cgd  */
     54  1.1  cgd 
     55  1.1  cgd void
     56  1.1  cgd new_vmcmd(evsp, proc, len, addr, vp, offset, prot)
     57  1.1  cgd 	struct	exec_vmcmd_set *evsp;
     58  1.1  cgd 	int	(*proc) __P((struct proc * p, struct exec_vmcmd *));
     59  1.1  cgd 	u_long	len;
     60  1.1  cgd 	u_long	addr;
     61  1.1  cgd 	struct	vnode *vp;
     62  1.1  cgd 	u_long	offset;
     63  1.1  cgd 	u_int	prot;
     64  1.1  cgd {
     65  1.1  cgd 	struct exec_vmcmd    *vcp;
     66  1.1  cgd 
     67  1.1  cgd 	if (evsp->evs_used >= evsp->evs_cnt)
     68  1.1  cgd 		vmcmdset_extend(evsp);
     69  1.1  cgd 	vcp = &evsp->evs_cmds[evsp->evs_used++];
     70  1.1  cgd 	vcp->ev_proc = proc;
     71  1.1  cgd 	vcp->ev_len = len;
     72  1.1  cgd 	vcp->ev_addr = addr;
     73  1.1  cgd 	if ((vcp->ev_vp = vp) != NULL)
     74  1.1  cgd 		vref(vp);
     75  1.1  cgd 	vcp->ev_offset = offset;
     76  1.1  cgd 	vcp->ev_prot = prot;
     77  1.1  cgd }
     78  1.3  cgd #endif /* DEBUG */
     79  1.1  cgd 
     80  1.1  cgd void
     81  1.1  cgd vmcmdset_extend(evsp)
     82  1.1  cgd 	struct	exec_vmcmd_set *evsp;
     83  1.1  cgd {
     84  1.1  cgd 	struct exec_vmcmd *nvcp;
     85  1.1  cgd 	u_int ocnt;
     86  1.1  cgd 
     87  1.1  cgd #ifdef DIAGNOSTIC
     88  1.1  cgd 	if (evsp->evs_used < evsp->evs_cnt)
     89  1.1  cgd 		panic("vmcmdset_extend: not necessary");
     90  1.1  cgd #endif
     91  1.1  cgd 
     92  1.1  cgd 	/* figure out number of entries in new set */
     93  1.1  cgd 	ocnt = evsp->evs_cnt;
     94  1.1  cgd 	evsp->evs_cnt += ocnt ? ocnt : EXEC_DEFAULT_VMCMD_SETSIZE;
     95  1.1  cgd 
     96  1.1  cgd 	/* allocate it */
     97  1.1  cgd 	MALLOC(nvcp, struct exec_vmcmd *,
     98  1.1  cgd 	    (evsp->evs_cnt * sizeof(struct exec_vmcmd)), M_EXEC, M_WAITOK);
     99  1.1  cgd 
    100  1.1  cgd 	/* free the old struct, if there was one, and record the new one */
    101  1.1  cgd 	if (ocnt) {
    102  1.1  cgd 		bcopy(evsp->evs_cmds, nvcp, (ocnt * sizeof(struct exec_vmcmd)));
    103  1.1  cgd 		FREE(evsp->evs_cmds, M_EXEC);
    104  1.1  cgd 	}
    105  1.1  cgd 	evsp->evs_cmds = nvcp;
    106  1.1  cgd }
    107  1.1  cgd 
    108  1.1  cgd void
    109  1.1  cgd kill_vmcmds(evsp)
    110  1.1  cgd 	struct	exec_vmcmd_set *evsp;
    111  1.1  cgd {
    112  1.1  cgd 	struct exec_vmcmd *vcp;
    113  1.1  cgd 	int i;
    114  1.1  cgd 
    115  1.1  cgd 	if (evsp->evs_cnt == 0)
    116  1.1  cgd 		return;
    117  1.1  cgd 
    118  1.1  cgd 	for (i = 0; i < evsp->evs_used; i++) {
    119  1.1  cgd 		vcp = &evsp->evs_cmds[i];
    120  1.1  cgd 		if (vcp->ev_vp != NULLVP)
    121  1.1  cgd 			vrele(vcp->ev_vp);
    122  1.1  cgd 	}
    123  1.1  cgd 	evsp->evs_used = evsp->evs_cnt = 0;
    124  1.1  cgd 	FREE(evsp->evs_cmds, M_EXEC);
    125  1.1  cgd }
    126  1.1  cgd 
    127  1.1  cgd /*
    128  1.1  cgd  * vmcmd_map_pagedvn():
    129  1.1  cgd  *	handle vmcmd which specifies that a vnode should be mmap'd.
    130  1.1  cgd  *	appropriate for handling demand-paged text and data segments.
    131  1.1  cgd  */
    132  1.1  cgd 
    133  1.1  cgd int
    134  1.1  cgd vmcmd_map_pagedvn(p, cmd)
    135  1.1  cgd 	struct proc *p;
    136  1.1  cgd 	struct exec_vmcmd *cmd;
    137  1.1  cgd {
    138  1.1  cgd 	/*
    139  1.1  cgd 	 * note that if you're going to map part of an process as being
    140  1.1  cgd 	 * paged from a vnode, that vnode had damn well better be marked as
    141  1.1  cgd 	 * VTEXT.  that's handled in the routine which sets up the vmcmd to
    142  1.1  cgd 	 * call this routine.
    143  1.1  cgd 	 */
    144  1.1  cgd 	return vm_mmap(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
    145  1.1  cgd 	    cmd->ev_prot, VM_PROT_ALL, MAP_FIXED|MAP_FILE|MAP_COPY,
    146  1.1  cgd 	    cmd->ev_vp, cmd->ev_offset);
    147  1.1  cgd }
    148  1.1  cgd 
    149  1.1  cgd /*
    150  1.1  cgd  * vmcmd_map_readvn():
    151  1.1  cgd  *	handle vmcmd which specifies that a vnode should be read from.
    152  1.1  cgd  *	appropriate for non-demand-paged text/data segments, i.e. impure
    153  1.1  cgd  *	objects (a la OMAGIC and NMAGIC).
    154  1.1  cgd  */
    155  1.1  cgd int
    156  1.1  cgd vmcmd_map_readvn(p, cmd)
    157  1.1  cgd 	struct proc *p;
    158  1.1  cgd 	struct exec_vmcmd *cmd;
    159  1.1  cgd {
    160  1.1  cgd 	int error;
    161  1.1  cgd 
    162  1.1  cgd 	error = vm_allocate(&p->p_vmspace->vm_map, &cmd->ev_addr,
    163  1.1  cgd 	    cmd->ev_len, 0);
    164  1.1  cgd 	if (error)
    165  1.1  cgd 		return error;
    166  1.1  cgd 
    167  1.1  cgd 	error = vn_rdwr(UIO_READ, cmd->ev_vp, (caddr_t)cmd->ev_addr,
    168  1.1  cgd 	    cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT|IO_NODELOCKED,
    169  1.1  cgd 	    p->p_ucred, (int *)0, p);
    170  1.1  cgd 	if (error)
    171  1.1  cgd 		return error;
    172  1.1  cgd 
    173  1.1  cgd 	return vm_protect(&p->p_vmspace->vm_map, cmd->ev_addr, cmd->ev_len,
    174  1.1  cgd 	    FALSE, cmd->ev_prot);
    175  1.1  cgd }
    176  1.1  cgd 
    177  1.1  cgd /*
    178  1.1  cgd  * vmcmd_map_zero():
    179  1.1  cgd  *	handle vmcmd which specifies a zero-filled address space region.  The
    180  1.1  cgd  *	address range must be first allocated, then protected appropriately.
    181  1.1  cgd  */
    182  1.1  cgd 
    183  1.1  cgd int
    184  1.1  cgd vmcmd_map_zero(p, cmd)
    185  1.1  cgd 	struct proc *p;
    186  1.1  cgd 	struct exec_vmcmd *cmd;
    187  1.1  cgd {
    188  1.1  cgd 	int error;
    189  1.1  cgd 
    190  1.1  cgd 	error = vm_allocate(&p->p_vmspace->vm_map, &cmd->ev_addr,
    191  1.1  cgd 	    cmd->ev_len, 0);
    192  1.1  cgd 	if (error)
    193  1.1  cgd 		return error;
    194  1.1  cgd 
    195  1.1  cgd 	return vm_protect(&p->p_vmspace->vm_map, cmd->ev_addr, cmd->ev_len,
    196  1.1  cgd 	    FALSE, cmd->ev_prot);
    197  1.4  cgd }
    198  1.4  cgd 
    199  1.4  cgd void
    200  1.4  cgd exec_closefd(p, fd)
    201  1.4  cgd 	struct proc *p;
    202  1.4  cgd 	int fd;
    203  1.4  cgd {
    204  1.4  cgd 	if (p->p_fd->fd_lastfile == fd)
    205  1.4  cgd 		p->p_fd->fd_lastfile--;
    206  1.4  cgd 	if (p->p_fd->fd_freefile > fd)
    207  1.4  cgd 		p->p_fd->fd_freefile = fd;
    208  1.4  cgd 	closef(p->p_fd->fd_ofiles[fd], p);
    209  1.4  cgd 	p->p_fd->fd_ofiles[fd] = 0;
    210  1.1  cgd }
    211