Home | History | Annotate | Line # | Download | only in uvm
uvm_map.h revision 1.62
      1  1.62     matt /*	$NetBSD: uvm_map.h,v 1.62 2008/07/29 00:03:06 matt Exp $	*/
      2   1.1      mrg 
      3  1.26      chs /*
      4   1.1      mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  1.26      chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6   1.1      mrg  *
      7   1.1      mrg  * All rights reserved.
      8   1.1      mrg  *
      9   1.1      mrg  * This code is derived from software contributed to Berkeley by
     10   1.1      mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11   1.1      mrg  *
     12   1.1      mrg  * Redistribution and use in source and binary forms, with or without
     13   1.1      mrg  * modification, are permitted provided that the following conditions
     14   1.1      mrg  * are met:
     15   1.1      mrg  * 1. Redistributions of source code must retain the above copyright
     16   1.1      mrg  *    notice, this list of conditions and the following disclaimer.
     17   1.1      mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18   1.1      mrg  *    notice, this list of conditions and the following disclaimer in the
     19   1.1      mrg  *    documentation and/or other materials provided with the distribution.
     20   1.1      mrg  * 3. All advertising materials mentioning features or use of this software
     21   1.1      mrg  *    must display the following acknowledgement:
     22   1.1      mrg  *	This product includes software developed by Charles D. Cranor,
     23  1.26      chs  *      Washington University, the University of California, Berkeley and
     24   1.1      mrg  *      its contributors.
     25   1.1      mrg  * 4. Neither the name of the University nor the names of its contributors
     26   1.1      mrg  *    may be used to endorse or promote products derived from this software
     27   1.1      mrg  *    without specific prior written permission.
     28   1.1      mrg  *
     29   1.1      mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30   1.1      mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31   1.1      mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32   1.1      mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33   1.1      mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34   1.1      mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35   1.1      mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36   1.1      mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37   1.1      mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38   1.1      mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39   1.1      mrg  * SUCH DAMAGE.
     40   1.1      mrg  *
     41   1.1      mrg  *	@(#)vm_map.h    8.3 (Berkeley) 3/15/94
     42   1.4      mrg  * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp
     43   1.1      mrg  *
     44   1.1      mrg  *
     45   1.1      mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46   1.1      mrg  * All rights reserved.
     47  1.26      chs  *
     48   1.1      mrg  * Permission to use, copy, modify and distribute this software and
     49   1.1      mrg  * its documentation is hereby granted, provided that both the copyright
     50   1.1      mrg  * notice and this permission notice appear in all copies of the
     51   1.1      mrg  * software, derivative works or modified versions, and any portions
     52   1.1      mrg  * thereof, and that both notices appear in supporting documentation.
     53  1.26      chs  *
     54  1.26      chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55  1.26      chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56   1.1      mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57  1.26      chs  *
     58   1.1      mrg  * Carnegie Mellon requests users of this software to return to
     59   1.1      mrg  *
     60   1.1      mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61   1.1      mrg  *  School of Computer Science
     62   1.1      mrg  *  Carnegie Mellon University
     63   1.1      mrg  *  Pittsburgh PA 15213-3890
     64   1.1      mrg  *
     65   1.1      mrg  * any improvements or extensions that they make and grant Carnegie the
     66   1.1      mrg  * rights to redistribute these changes.
     67   1.1      mrg  */
     68   1.1      mrg 
     69   1.5    perry #ifndef _UVM_UVM_MAP_H_
     70   1.5    perry #define _UVM_UVM_MAP_H_
     71   1.5    perry 
     72   1.1      mrg /*
     73   1.1      mrg  * uvm_map.h
     74   1.1      mrg  */
     75   1.1      mrg 
     76  1.15  thorpej #ifdef _KERNEL
     77  1.15  thorpej 
     78   1.1      mrg /*
     79   1.1      mrg  * macros
     80   1.1      mrg  */
     81   1.1      mrg 
     82   1.1      mrg /*
     83   1.1      mrg  * UVM_MAP_CLIP_START: ensure that the entry begins at or after
     84   1.1      mrg  * the starting address, if it doesn't we split the entry.
     85  1.26      chs  *
     86   1.1      mrg  * => map must be locked by caller
     87   1.1      mrg  */
     88   1.1      mrg 
     89  1.40     yamt #define UVM_MAP_CLIP_START(MAP,ENTRY,VA,UMR) { \
     90  1.40     yamt 	if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA,UMR); }
     91   1.1      mrg 
     92   1.1      mrg /*
     93   1.1      mrg  * UVM_MAP_CLIP_END: ensure that the entry ends at or before
     94   1.1      mrg  *      the ending address, if it does't we split the entry.
     95   1.1      mrg  *
     96   1.1      mrg  * => map must be locked by caller
     97   1.1      mrg  */
     98   1.1      mrg 
     99  1.40     yamt #define UVM_MAP_CLIP_END(MAP,ENTRY,VA,UMR) { \
    100  1.40     yamt 	if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA,UMR); }
    101   1.1      mrg 
    102   1.1      mrg /*
    103   1.1      mrg  * extract flags
    104   1.1      mrg  */
    105  1.50     yamt #define UVM_EXTRACT_REMOVE	0x01	/* remove mapping from old map */
    106  1.50     yamt #define UVM_EXTRACT_CONTIG	0x02	/* try to keep it contig */
    107  1.50     yamt #define UVM_EXTRACT_QREF	0x04	/* use quick refs */
    108  1.50     yamt #define UVM_EXTRACT_FIXPROT	0x08	/* set prot to maxprot as we go */
    109  1.50     yamt #define UVM_EXTRACT_RESERVED	0x10	/* caller did uvm_map_reserve() */
    110   1.1      mrg 
    111  1.19      mrg #endif /* _KERNEL */
    112  1.19      mrg 
    113  1.62     matt #include <sys/rb.h>
    114  1.42     yamt #include <sys/pool.h>
    115  1.57       ad #include <sys/rwlock.h>
    116  1.57       ad #include <sys/mutex.h>
    117  1.57       ad #include <sys/condvar.h>
    118  1.37     yamt 
    119  1.18      mrg #include <uvm/uvm_anon.h>
    120  1.18      mrg 
    121  1.18      mrg /*
    122  1.18      mrg  * Address map entries consist of start and end addresses,
    123  1.18      mrg  * a VM object (or sharing map) and offset into that object,
    124  1.18      mrg  * and user-exported inheritance and protection information.
    125  1.18      mrg  * Also included is control information for virtual copy operations.
    126  1.18      mrg  */
    127  1.18      mrg struct vm_map_entry {
    128  1.62     matt 	struct rb_node		rb_node;	/* tree information */
    129  1.62     matt 	vsize_t			gap;		/* free space after */
    130  1.62     matt 	vsize_t			maxgap;		/* space in subtree */
    131  1.18      mrg 	struct vm_map_entry	*prev;		/* previous entry */
    132  1.18      mrg 	struct vm_map_entry	*next;		/* next entry */
    133  1.18      mrg 	vaddr_t			start;		/* start address */
    134  1.18      mrg 	vaddr_t			end;		/* end address */
    135  1.28      chs 	union {
    136  1.28      chs 		struct uvm_object *uvm_obj;	/* uvm object */
    137  1.28      chs 		struct vm_map	*sub_map;	/* belongs to another map */
    138  1.28      chs 	} object;				/* object I point to */
    139  1.18      mrg 	voff_t			offset;		/* offset into object */
    140  1.18      mrg 	int			etype;		/* entry type */
    141  1.18      mrg 	vm_prot_t		protection;	/* protection code */
    142  1.18      mrg 	vm_prot_t		max_protection;	/* maximum protection */
    143  1.18      mrg 	vm_inherit_t		inheritance;	/* inheritance */
    144  1.18      mrg 	int			wired_count;	/* can be paged if == 0 */
    145  1.18      mrg 	struct vm_aref		aref;		/* anonymous overlay */
    146  1.18      mrg 	int			advice;		/* madvise advice */
    147  1.18      mrg #define uvm_map_entry_stop_copy flags
    148  1.18      mrg 	u_int8_t		flags;		/* flags */
    149  1.18      mrg 
    150  1.40     yamt #define	UVM_MAP_KERNEL		0x01		/* kernel map entry */
    151  1.40     yamt #define	UVM_MAP_KMAPENT		0x02		/* contains map entries */
    152  1.40     yamt #define	UVM_MAP_FIRST		0x04		/* the first special entry */
    153  1.40     yamt #define	UVM_MAP_QUANTUM		0x08		/* allocated with
    154  1.40     yamt 						 * UVM_FLAG_QUANTUM */
    155  1.39     matt #define	UVM_MAP_NOMERGE		0x10		/* this entry is not mergable */
    156  1.39     matt 
    157  1.18      mrg };
    158  1.18      mrg 
    159  1.18      mrg #define	VM_MAPENT_ISWIRED(entry)	((entry)->wired_count != 0)
    160  1.18      mrg 
    161  1.18      mrg /*
    162  1.18      mrg  *	Maps are doubly-linked lists of map entries, kept sorted
    163  1.18      mrg  *	by address.  A single hint is provided to start
    164  1.18      mrg  *	searches again from the last successful search,
    165  1.18      mrg  *	insertion, or removal.
    166  1.18      mrg  *
    167  1.18      mrg  *	LOCKING PROTOCOL NOTES:
    168  1.18      mrg  *	-----------------------
    169  1.18      mrg  *
    170  1.18      mrg  *	VM map locking is a little complicated.  There are both shared
    171  1.18      mrg  *	and exclusive locks on maps.  However, it is sometimes required
    172  1.18      mrg  *	to downgrade an exclusive lock to a shared lock, and upgrade to
    173  1.18      mrg  *	an exclusive lock again (to perform error recovery).  However,
    174  1.18      mrg  *	another thread *must not* queue itself to receive an exclusive
    175  1.18      mrg  *	lock while before we upgrade back to exclusive, otherwise the
    176  1.18      mrg  *	error recovery becomes extremely difficult, if not impossible.
    177  1.18      mrg  *
    178  1.18      mrg  *	In order to prevent this scenario, we introduce the notion of
    179  1.18      mrg  *	a `busy' map.  A `busy' map is read-locked, but other threads
    180  1.18      mrg  *	attempting to write-lock wait for this flag to clear before
    181  1.18      mrg  *	entering the lock manager.  A map may only be marked busy
    182  1.18      mrg  *	when the map is write-locked (and then the map must be downgraded
    183  1.18      mrg  *	to read-locked), and may only be marked unbusy by the thread
    184  1.18      mrg  *	which marked it busy (holding *either* a read-lock or a
    185  1.18      mrg  *	write-lock, the latter being gained by an upgrade).
    186  1.18      mrg  *
    187  1.18      mrg  *	Access to the map `flags' member is controlled by the `flags_lock'
    188  1.18      mrg  *	simple lock.  Note that some flags are static (set once at map
    189  1.18      mrg  *	creation time, and never changed), and thus require no locking
    190  1.18      mrg  *	to check those flags.  All flags which are r/w must be set or
    191  1.18      mrg  *	cleared while the `flags_lock' is asserted.  Additional locking
    192  1.18      mrg  *	requirements are:
    193  1.18      mrg  *
    194  1.18      mrg  *		VM_MAP_PAGEABLE		r/o static flag; no locking required
    195  1.18      mrg  *
    196  1.18      mrg  *		VM_MAP_INTRSAFE		r/o static flag; no locking required
    197  1.18      mrg  *
    198  1.18      mrg  *		VM_MAP_WIREFUTURE	r/w; may only be set or cleared when
    199  1.18      mrg  *					map is write-locked.  may be tested
    200  1.18      mrg  *					without asserting `flags_lock'.
    201  1.18      mrg  *
    202  1.34   atatat  *		VM_MAP_DYING		r/o; set when a vmspace is being
    203  1.34   atatat  *					destroyed to indicate that updates
    204  1.34   atatat  *					to the pmap can be skipped.
    205  1.34   atatat  *
    206  1.34   atatat  *		VM_MAP_TOPDOWN		r/o; set when the vmspace is
    207  1.34   atatat  *					created if the unspecified map
    208  1.34   atatat  *					allocations are to be arranged in
    209  1.34   atatat  *					a "top down" manner.
    210  1.18      mrg  */
    211  1.18      mrg struct vm_map {
    212  1.18      mrg 	struct pmap *		pmap;		/* Physical map */
    213  1.57       ad 	krwlock_t		lock;		/* Non-intrsafe lock */
    214  1.57       ad 	struct lwp *		busy;		/* LWP holding map busy */
    215  1.57       ad 	kmutex_t		mutex;		/* INTRSAFE lock */
    216  1.57       ad 	kmutex_t		misc_lock;	/* Lock for ref_count, cv */
    217  1.57       ad 	kcondvar_t		cv;		/* For signalling */
    218  1.57       ad 	int			flags;		/* flags */
    219  1.62     matt 	struct rb_tree		rb_tree;	/* Tree for entries */
    220  1.18      mrg 	struct vm_map_entry	header;		/* List of entries */
    221  1.18      mrg 	int			nentries;	/* Number of entries */
    222  1.18      mrg 	vsize_t			size;		/* virtual size */
    223  1.18      mrg 	int			ref_count;	/* Reference count */
    224  1.28      chs 	struct vm_map_entry *	hint;		/* hint for quick lookups */
    225  1.28      chs 	struct vm_map_entry *	first_free;	/* First free space hint */
    226  1.18      mrg 	unsigned int		timestamp;	/* Version number */
    227  1.18      mrg };
    228  1.18      mrg 
    229  1.41     yamt #if defined(_KERNEL)
    230  1.54     yamt 
    231  1.54     yamt #include <sys/callback.h>
    232  1.54     yamt 
    233  1.41     yamt struct vm_map_kernel {
    234  1.41     yamt 	struct vm_map vmk_map;
    235  1.41     yamt 	LIST_HEAD(, uvm_kmapent_hdr) vmk_kentry_free;
    236  1.41     yamt 			/* Freelist of map entry */
    237  1.41     yamt 	struct vm_map_entry	*vmk_merged_entries;
    238  1.41     yamt 			/* Merged entries, kept for later splitting */
    239  1.42     yamt 
    240  1.54     yamt 	struct callback_head vmk_reclaim_callback;
    241  1.42     yamt #if !defined(PMAP_MAP_POOLPAGE)
    242  1.42     yamt 	struct pool vmk_vacache; /* kva cache */
    243  1.42     yamt 	struct pool_allocator vmk_vacache_allocator; /* ... and its allocator */
    244  1.42     yamt #endif
    245  1.41     yamt };
    246  1.41     yamt #endif /* defined(_KERNEL) */
    247  1.41     yamt 
    248  1.41     yamt #define	VM_MAP_IS_KERNEL(map)	(vm_map_pmap(map) == pmap_kernel())
    249  1.41     yamt 
    250  1.18      mrg /* vm_map flags */
    251  1.18      mrg #define	VM_MAP_PAGEABLE		0x01		/* ro: entries are pageable */
    252  1.18      mrg #define	VM_MAP_INTRSAFE		0x02		/* ro: interrupt safe map */
    253  1.18      mrg #define	VM_MAP_WIREFUTURE	0x04		/* rw: wire future mappings */
    254  1.32      chs #define	VM_MAP_DYING		0x20		/* rw: map is being destroyed */
    255  1.34   atatat #define	VM_MAP_TOPDOWN		0x40		/* ro: arrange map top-down */
    256  1.42     yamt #define	VM_MAP_VACACHE		0x80		/* ro: use kva cache */
    257  1.44     yamt #define	VM_MAP_WANTVA		0x100		/* rw: want va */
    258  1.18      mrg 
    259  1.40     yamt #ifdef _KERNEL
    260  1.40     yamt struct uvm_mapent_reservation {
    261  1.40     yamt 	struct vm_map_entry *umr_entries[2];
    262  1.40     yamt 	int umr_nentries;
    263  1.40     yamt };
    264  1.40     yamt #define	UMR_EMPTY(umr)		((umr) == NULL || (umr)->umr_nentries == 0)
    265  1.40     yamt #define	UMR_GETENTRY(umr)	((umr)->umr_entries[--(umr)->umr_nentries])
    266  1.40     yamt #define	UMR_PUTENTRY(umr, ent)	\
    267  1.40     yamt 	(umr)->umr_entries[(umr)->umr_nentries++] = (ent)
    268  1.40     yamt 
    269  1.40     yamt struct uvm_map_args {
    270  1.40     yamt 	struct vm_map_entry *uma_prev;
    271  1.40     yamt 
    272  1.40     yamt 	vaddr_t uma_start;
    273  1.40     yamt 	vsize_t uma_size;
    274  1.38     yamt 
    275  1.40     yamt 	struct uvm_object *uma_uobj;
    276  1.40     yamt 	voff_t uma_uoffset;
    277  1.40     yamt 
    278  1.40     yamt 	uvm_flag_t uma_flags;
    279  1.40     yamt };
    280  1.40     yamt #endif /* _KERNEL */
    281  1.18      mrg 
    282   1.1      mrg /*
    283  1.12  thorpej  * globals:
    284  1.12  thorpej  */
    285  1.19      mrg 
    286  1.19      mrg #ifdef _KERNEL
    287  1.12  thorpej 
    288  1.58       he #include <sys/proc.h>
    289  1.58       he 
    290  1.12  thorpej #ifdef PMAP_GROWKERNEL
    291  1.12  thorpej extern vaddr_t	uvm_maxkaddr;
    292  1.12  thorpej #endif
    293   1.1      mrg 
    294   1.1      mrg /*
    295   1.1      mrg  * protos: the following prototypes define the interface to vm_map
    296   1.1      mrg  */
    297   1.1      mrg 
    298  1.36    enami void		uvm_map_deallocate(struct vm_map *);
    299   1.1      mrg 
    300  1.36    enami int		uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int);
    301  1.36    enami void		uvm_map_clip_start(struct vm_map *, struct vm_map_entry *,
    302  1.40     yamt 		    vaddr_t, struct uvm_mapent_reservation *);
    303  1.36    enami void		uvm_map_clip_end(struct vm_map *, struct vm_map_entry *,
    304  1.40     yamt 		    vaddr_t, struct uvm_mapent_reservation *);
    305  1.36    enami struct vm_map	*uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
    306  1.36    enami int		uvm_map_extract(struct vm_map *, vaddr_t, vsize_t,
    307  1.36    enami 		    struct vm_map *, vaddr_t *, int);
    308  1.36    enami struct vm_map_entry *
    309  1.36    enami 		uvm_map_findspace(struct vm_map *, vaddr_t, vsize_t,
    310  1.36    enami 		    vaddr_t *, struct uvm_object *, voff_t, vsize_t, int);
    311  1.36    enami int		uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t,
    312  1.36    enami 		    vm_inherit_t);
    313  1.36    enami int		uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
    314  1.36    enami void		uvm_map_init(void);
    315  1.55  thorpej bool		uvm_map_lookup_entry(struct vm_map *, vaddr_t,
    316  1.36    enami 		    struct vm_map_entry **);
    317  1.36    enami void		uvm_map_reference(struct vm_map *);
    318  1.36    enami int		uvm_map_replace(struct vm_map *, vaddr_t, vaddr_t,
    319  1.61     yamt 		    struct vm_map_entry *, int, struct vm_map_entry **);
    320  1.36    enami int		uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t,
    321  1.50     yamt 		    vaddr_t *, uvm_flag_t);
    322  1.36    enami void		uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int);
    323  1.41     yamt void		uvm_map_setup_kernel(struct vm_map_kernel *,
    324  1.41     yamt 		    vaddr_t, vaddr_t, int);
    325  1.41     yamt struct vm_map_kernel *
    326  1.41     yamt 		vm_map_to_kernel(struct vm_map *);
    327  1.36    enami int		uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t,
    328  1.36    enami 		    struct vm_map *);
    329  1.43     yamt void		uvm_unmap1(struct vm_map *, vaddr_t, vaddr_t, int);
    330  1.43     yamt #define	uvm_unmap(map, s, e)	uvm_unmap1((map), (s), (e), 0)
    331  1.36    enami void		uvm_unmap_detach(struct vm_map_entry *,int);
    332  1.36    enami void		uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t,
    333  1.46     yamt 		    struct vm_map_entry **, struct uvm_mapent_reservation *,
    334  1.46     yamt 		    int);
    335  1.40     yamt 
    336  1.40     yamt int		uvm_map_prepare(struct vm_map *, vaddr_t, vsize_t,
    337  1.40     yamt 		    struct uvm_object *, voff_t, vsize_t, uvm_flag_t,
    338  1.40     yamt 		    struct uvm_map_args *);
    339  1.40     yamt int		uvm_map_enter(struct vm_map *, const struct uvm_map_args *,
    340  1.40     yamt 		    struct vm_map_entry *);
    341  1.40     yamt 
    342  1.40     yamt int		uvm_mapent_reserve(struct vm_map *,
    343  1.40     yamt 		    struct uvm_mapent_reservation *, int, int);
    344  1.40     yamt void		uvm_mapent_unreserve(struct vm_map *,
    345  1.40     yamt 		    struct uvm_mapent_reservation *);
    346  1.40     yamt 
    347  1.53     yamt vsize_t		uvm_mapent_overhead(vsize_t, int);
    348  1.53     yamt 
    349  1.47     yamt int		uvm_mapent_trymerge(struct vm_map *,
    350  1.47     yamt 		    struct vm_map_entry *, int);
    351  1.47     yamt #define	UVM_MERGE_COPYING	1
    352  1.15  thorpej 
    353  1.57       ad bool		vm_map_starved_p(struct vm_map *);
    354  1.18      mrg 
    355  1.18      mrg /*
    356  1.57       ad  * VM map locking operations.
    357  1.18      mrg  */
    358  1.18      mrg 
    359  1.57       ad bool		vm_map_lock_try(struct vm_map *);
    360  1.57       ad void		vm_map_lock(struct vm_map *);
    361  1.57       ad void		vm_map_unlock(struct vm_map *);
    362  1.57       ad void		vm_map_unbusy(struct vm_map *);
    363  1.59       ad void		vm_map_lock_read(struct vm_map *);
    364  1.59       ad void		vm_map_unlock_read(struct vm_map *);
    365  1.59       ad void		vm_map_busy(struct vm_map *);
    366  1.59       ad bool		vm_map_locked_p(struct vm_map *);
    367  1.54     yamt 
    368  1.18      mrg #endif /* _KERNEL */
    369  1.18      mrg 
    370  1.18      mrg /*
    371  1.18      mrg  *	Functions implemented as macros
    372  1.18      mrg  */
    373  1.45      chs #define		vm_map_min(map)		((map)->header.end)
    374  1.45      chs #define		vm_map_max(map)		((map)->header.start)
    375  1.45      chs #define		vm_map_setmin(map, v)	((map)->header.end = (v))
    376  1.45      chs #define		vm_map_setmax(map, v)	((map)->header.start = (v))
    377  1.45      chs 
    378  1.18      mrg #define		vm_map_pmap(map)	((map)->pmap)
    379   1.5    perry 
    380   1.5    perry #endif /* _UVM_UVM_MAP_H_ */
    381