Home | History | Annotate | Line # | Download | only in uvm
uvm_map.h revision 1.53
      1 /*	$NetBSD: uvm_map.h,v 1.53 2006/05/03 14:12:01 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by Charles D. Cranor,
     23  *      Washington University, the University of California, Berkeley and
     24  *      its contributors.
     25  * 4. Neither the name of the University nor the names of its contributors
     26  *    may be used to endorse or promote products derived from this software
     27  *    without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39  * SUCH DAMAGE.
     40  *
     41  *	@(#)vm_map.h    8.3 (Berkeley) 3/15/94
     42  * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp
     43  *
     44  *
     45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46  * All rights reserved.
     47  *
     48  * Permission to use, copy, modify and distribute this software and
     49  * its documentation is hereby granted, provided that both the copyright
     50  * notice and this permission notice appear in all copies of the
     51  * software, derivative works or modified versions, and any portions
     52  * thereof, and that both notices appear in supporting documentation.
     53  *
     54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57  *
     58  * Carnegie Mellon requests users of this software to return to
     59  *
     60  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61  *  School of Computer Science
     62  *  Carnegie Mellon University
     63  *  Pittsburgh PA 15213-3890
     64  *
     65  * any improvements or extensions that they make and grant Carnegie the
     66  * rights to redistribute these changes.
     67  */
     68 
     69 #ifndef _UVM_UVM_MAP_H_
     70 #define _UVM_UVM_MAP_H_
     71 
     72 /*
     73  * uvm_map.h
     74  */
     75 
     76 #ifdef _KERNEL
     77 
     78 /*
     79  * macros
     80  */
     81 
     82 /*
     83  * UVM_MAP_CLIP_START: ensure that the entry begins at or after
     84  * the starting address, if it doesn't we split the entry.
     85  *
     86  * => map must be locked by caller
     87  */
     88 
     89 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA,UMR) { \
     90 	if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA,UMR); }
     91 
     92 /*
     93  * UVM_MAP_CLIP_END: ensure that the entry ends at or before
     94  *      the ending address, if it does't we split the entry.
     95  *
     96  * => map must be locked by caller
     97  */
     98 
     99 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA,UMR) { \
    100 	if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA,UMR); }
    101 
    102 /*
    103  * extract flags
    104  */
    105 #define UVM_EXTRACT_REMOVE	0x01	/* remove mapping from old map */
    106 #define UVM_EXTRACT_CONTIG	0x02	/* try to keep it contig */
    107 #define UVM_EXTRACT_QREF	0x04	/* use quick refs */
    108 #define UVM_EXTRACT_FIXPROT	0x08	/* set prot to maxprot as we go */
    109 #define UVM_EXTRACT_RESERVED	0x10	/* caller did uvm_map_reserve() */
    110 
    111 #endif /* _KERNEL */
    112 
    113 #include <sys/tree.h>
    114 #include <sys/pool.h>
    115 
    116 #include <uvm/uvm_anon.h>
    117 
    118 /*
    119  * Address map entries consist of start and end addresses,
    120  * a VM object (or sharing map) and offset into that object,
    121  * and user-exported inheritance and protection information.
    122  * Also included is control information for virtual copy operations.
    123  */
    124 struct vm_map_entry {
    125 	RB_ENTRY(vm_map_entry)	rb_entry;	/* tree information */
    126 	vaddr_t			ownspace;	/* free space after */
    127 	vaddr_t			space;		/* space in subtree */
    128 	struct vm_map_entry	*prev;		/* previous entry */
    129 	struct vm_map_entry	*next;		/* next entry */
    130 	vaddr_t			start;		/* start address */
    131 	vaddr_t			end;		/* end address */
    132 	union {
    133 		struct uvm_object *uvm_obj;	/* uvm object */
    134 		struct vm_map	*sub_map;	/* belongs to another map */
    135 	} object;				/* object I point to */
    136 	voff_t			offset;		/* offset into object */
    137 	int			etype;		/* entry type */
    138 	vm_prot_t		protection;	/* protection code */
    139 	vm_prot_t		max_protection;	/* maximum protection */
    140 	vm_inherit_t		inheritance;	/* inheritance */
    141 	int			wired_count;	/* can be paged if == 0 */
    142 	struct vm_aref		aref;		/* anonymous overlay */
    143 	int			advice;		/* madvise advice */
    144 #define uvm_map_entry_stop_copy flags
    145 	u_int8_t		flags;		/* flags */
    146 
    147 #define	UVM_MAP_KERNEL		0x01		/* kernel map entry */
    148 #define	UVM_MAP_KMAPENT		0x02		/* contains map entries */
    149 #define	UVM_MAP_FIRST		0x04		/* the first special entry */
    150 #define	UVM_MAP_QUANTUM		0x08		/* allocated with
    151 						 * UVM_FLAG_QUANTUM */
    152 #define	UVM_MAP_NOMERGE		0x10		/* this entry is not mergable */
    153 
    154 };
    155 
    156 #define	VM_MAPENT_ISWIRED(entry)	((entry)->wired_count != 0)
    157 
    158 /*
    159  *	Maps are doubly-linked lists of map entries, kept sorted
    160  *	by address.  A single hint is provided to start
    161  *	searches again from the last successful search,
    162  *	insertion, or removal.
    163  *
    164  *	LOCKING PROTOCOL NOTES:
    165  *	-----------------------
    166  *
    167  *	VM map locking is a little complicated.  There are both shared
    168  *	and exclusive locks on maps.  However, it is sometimes required
    169  *	to downgrade an exclusive lock to a shared lock, and upgrade to
    170  *	an exclusive lock again (to perform error recovery).  However,
    171  *	another thread *must not* queue itself to receive an exclusive
    172  *	lock while before we upgrade back to exclusive, otherwise the
    173  *	error recovery becomes extremely difficult, if not impossible.
    174  *
    175  *	In order to prevent this scenario, we introduce the notion of
    176  *	a `busy' map.  A `busy' map is read-locked, but other threads
    177  *	attempting to write-lock wait for this flag to clear before
    178  *	entering the lock manager.  A map may only be marked busy
    179  *	when the map is write-locked (and then the map must be downgraded
    180  *	to read-locked), and may only be marked unbusy by the thread
    181  *	which marked it busy (holding *either* a read-lock or a
    182  *	write-lock, the latter being gained by an upgrade).
    183  *
    184  *	Access to the map `flags' member is controlled by the `flags_lock'
    185  *	simple lock.  Note that some flags are static (set once at map
    186  *	creation time, and never changed), and thus require no locking
    187  *	to check those flags.  All flags which are r/w must be set or
    188  *	cleared while the `flags_lock' is asserted.  Additional locking
    189  *	requirements are:
    190  *
    191  *		VM_MAP_PAGEABLE		r/o static flag; no locking required
    192  *
    193  *		VM_MAP_INTRSAFE		r/o static flag; no locking required
    194  *
    195  *		VM_MAP_WIREFUTURE	r/w; may only be set or cleared when
    196  *					map is write-locked.  may be tested
    197  *					without asserting `flags_lock'.
    198  *
    199  *		VM_MAP_BUSY		r/w; may only be set when map is
    200  *					write-locked, may only be cleared by
    201  *					thread which set it, map read-locked
    202  *					or write-locked.  must be tested
    203  *					while `flags_lock' is asserted.
    204  *
    205  *		VM_MAP_WANTLOCK		r/w; may only be set when the map
    206  *					is busy, and thread is attempting
    207  *					to write-lock.  must be tested
    208  *					while `flags_lock' is asserted.
    209  *
    210  *		VM_MAP_DYING		r/o; set when a vmspace is being
    211  *					destroyed to indicate that updates
    212  *					to the pmap can be skipped.
    213  *
    214  *		VM_MAP_TOPDOWN		r/o; set when the vmspace is
    215  *					created if the unspecified map
    216  *					allocations are to be arranged in
    217  *					a "top down" manner.
    218  */
    219 struct vm_map {
    220 	struct pmap *		pmap;		/* Physical map */
    221 	struct lock		lock;		/* Lock for map data */
    222 	RB_HEAD(uvm_tree, vm_map_entry) rbhead;	/* Tree for entries */
    223 	struct vm_map_entry	header;		/* List of entries */
    224 	int			nentries;	/* Number of entries */
    225 	vsize_t			size;		/* virtual size */
    226 	int			ref_count;	/* Reference count */
    227 	struct simplelock	ref_lock;	/* Lock for ref_count field */
    228 	struct vm_map_entry *	hint;		/* hint for quick lookups */
    229 	struct simplelock	hint_lock;	/* lock for hint storage */
    230 	struct vm_map_entry *	first_free;	/* First free space hint */
    231 	int			flags;		/* flags */
    232 	struct simplelock	flags_lock;	/* Lock for flags field */
    233 	unsigned int		timestamp;	/* Version number */
    234 };
    235 
    236 #if defined(_KERNEL)
    237 struct vm_map_kernel {
    238 	struct vm_map vmk_map;
    239 	LIST_HEAD(, uvm_kmapent_hdr) vmk_kentry_free;
    240 			/* Freelist of map entry */
    241 	struct vm_map_entry	*vmk_merged_entries;
    242 			/* Merged entries, kept for later splitting */
    243 
    244 #if !defined(PMAP_MAP_POOLPAGE)
    245 	struct pool vmk_vacache; /* kva cache */
    246 	struct pool_allocator vmk_vacache_allocator; /* ... and its allocator */
    247 #endif
    248 };
    249 #endif /* defined(_KERNEL) */
    250 
    251 #define	VM_MAP_IS_KERNEL(map)	(vm_map_pmap(map) == pmap_kernel())
    252 
    253 /* vm_map flags */
    254 #define	VM_MAP_PAGEABLE		0x01		/* ro: entries are pageable */
    255 #define	VM_MAP_INTRSAFE		0x02		/* ro: interrupt safe map */
    256 #define	VM_MAP_WIREFUTURE	0x04		/* rw: wire future mappings */
    257 #define	VM_MAP_BUSY		0x08		/* rw: map is busy */
    258 #define	VM_MAP_WANTLOCK		0x10		/* rw: want to write-lock */
    259 #define	VM_MAP_DYING		0x20		/* rw: map is being destroyed */
    260 #define	VM_MAP_TOPDOWN		0x40		/* ro: arrange map top-down */
    261 #define	VM_MAP_VACACHE		0x80		/* ro: use kva cache */
    262 #define	VM_MAP_WANTVA		0x100		/* rw: want va */
    263 
    264 #ifdef _KERNEL
    265 struct uvm_mapent_reservation {
    266 	struct vm_map_entry *umr_entries[2];
    267 	int umr_nentries;
    268 };
    269 #define	UMR_EMPTY(umr)		((umr) == NULL || (umr)->umr_nentries == 0)
    270 #define	UMR_GETENTRY(umr)	((umr)->umr_entries[--(umr)->umr_nentries])
    271 #define	UMR_PUTENTRY(umr, ent)	\
    272 	(umr)->umr_entries[(umr)->umr_nentries++] = (ent)
    273 
    274 struct uvm_map_args {
    275 	struct vm_map_entry *uma_prev;
    276 
    277 	vaddr_t uma_start;
    278 	vsize_t uma_size;
    279 
    280 	struct uvm_object *uma_uobj;
    281 	voff_t uma_uoffset;
    282 
    283 	uvm_flag_t uma_flags;
    284 };
    285 #endif /* _KERNEL */
    286 
    287 #ifdef _KERNEL
    288 #define	vm_map_modflags(map, set, clear)				\
    289 do {									\
    290 	simple_lock(&(map)->flags_lock);				\
    291 	(map)->flags = ((map)->flags | (set)) & ~(clear);		\
    292 	simple_unlock(&(map)->flags_lock);				\
    293 } while (/*CONSTCOND*/ 0)
    294 #endif /* _KERNEL */
    295 
    296 /*
    297  * globals:
    298  */
    299 
    300 #ifdef _KERNEL
    301 
    302 #ifdef PMAP_GROWKERNEL
    303 extern vaddr_t	uvm_maxkaddr;
    304 #endif
    305 
    306 /*
    307  * protos: the following prototypes define the interface to vm_map
    308  */
    309 
    310 void		uvm_map_deallocate(struct vm_map *);
    311 
    312 int		uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int);
    313 void		uvm_map_clip_start(struct vm_map *, struct vm_map_entry *,
    314 		    vaddr_t, struct uvm_mapent_reservation *);
    315 void		uvm_map_clip_end(struct vm_map *, struct vm_map_entry *,
    316 		    vaddr_t, struct uvm_mapent_reservation *);
    317 struct vm_map	*uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
    318 int		uvm_map_extract(struct vm_map *, vaddr_t, vsize_t,
    319 		    struct vm_map *, vaddr_t *, int);
    320 struct vm_map_entry *
    321 		uvm_map_findspace(struct vm_map *, vaddr_t, vsize_t,
    322 		    vaddr_t *, struct uvm_object *, voff_t, vsize_t, int);
    323 int		uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t,
    324 		    vm_inherit_t);
    325 int		uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
    326 void		uvm_map_init(void);
    327 boolean_t	uvm_map_lookup_entry(struct vm_map *, vaddr_t,
    328 		    struct vm_map_entry **);
    329 void		uvm_map_reference(struct vm_map *);
    330 int		uvm_map_replace(struct vm_map *, vaddr_t, vaddr_t,
    331 		    struct vm_map_entry *, int);
    332 int		uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t,
    333 		    vaddr_t *, uvm_flag_t);
    334 void		uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int);
    335 void		uvm_map_setup_kernel(struct vm_map_kernel *,
    336 		    vaddr_t, vaddr_t, int);
    337 struct vm_map_kernel *
    338 		vm_map_to_kernel(struct vm_map *);
    339 int		uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t,
    340 		    struct vm_map *);
    341 void		uvm_unmap1(struct vm_map *, vaddr_t, vaddr_t, int);
    342 #define	uvm_unmap(map, s, e)	uvm_unmap1((map), (s), (e), 0)
    343 void		uvm_unmap_detach(struct vm_map_entry *,int);
    344 void		uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t,
    345 		    struct vm_map_entry **, struct uvm_mapent_reservation *,
    346 		    int);
    347 
    348 int		uvm_map_prepare(struct vm_map *, vaddr_t, vsize_t,
    349 		    struct uvm_object *, voff_t, vsize_t, uvm_flag_t,
    350 		    struct uvm_map_args *);
    351 int		uvm_map_enter(struct vm_map *, const struct uvm_map_args *,
    352 		    struct vm_map_entry *);
    353 
    354 int		uvm_mapent_reserve(struct vm_map *,
    355 		    struct uvm_mapent_reservation *, int, int);
    356 void		uvm_mapent_unreserve(struct vm_map *,
    357 		    struct uvm_mapent_reservation *);
    358 
    359 vsize_t		uvm_mapent_overhead(vsize_t, int);
    360 
    361 int		uvm_mapent_trymerge(struct vm_map *,
    362 		    struct vm_map_entry *, int);
    363 #define	UVM_MERGE_COPYING	1
    364 
    365 #endif /* _KERNEL */
    366 
    367 /*
    368  * VM map locking operations:
    369  *
    370  *	These operations perform locking on the data portion of the
    371  *	map.
    372  *
    373  *	vm_map_lock_try: try to lock a map, failing if it is already locked.
    374  *
    375  *	vm_map_lock: acquire an exclusive (write) lock on a map.
    376  *
    377  *	vm_map_lock_read: acquire a shared (read) lock on a map.
    378  *
    379  *	vm_map_unlock: release an exclusive lock on a map.
    380  *
    381  *	vm_map_unlock_read: release a shared lock on a map.
    382  *
    383  *	vm_map_downgrade: downgrade an exclusive lock to a shared lock.
    384  *
    385  *	vm_map_upgrade: upgrade a shared lock to an exclusive lock.
    386  *
    387  *	vm_map_busy: mark a map as busy.
    388  *
    389  *	vm_map_unbusy: clear busy status on a map.
    390  *
    391  * Note that "intrsafe" maps use only exclusive, spin locks.  We simply
    392  * use the sleep lock's interlock for this.
    393  */
    394 
    395 #ifdef _KERNEL
    396 /* XXX: clean up later */
    397 #include <sys/time.h>
    398 #include <sys/proc.h>	/* for tsleep(), wakeup() */
    399 #include <sys/systm.h>	/* for panic() */
    400 
    401 static __inline boolean_t	vm_map_lock_try(struct vm_map *);
    402 static __inline void		vm_map_lock(struct vm_map *);
    403 extern const char vmmapbsy[];
    404 
    405 static __inline boolean_t
    406 vm_map_lock_try(struct vm_map *map)
    407 {
    408 	boolean_t rv;
    409 
    410 	if (map->flags & VM_MAP_INTRSAFE)
    411 		rv = simple_lock_try(&map->lock.lk_interlock);
    412 	else {
    413 		simple_lock(&map->flags_lock);
    414 		if (map->flags & VM_MAP_BUSY) {
    415 			simple_unlock(&map->flags_lock);
    416 			return (FALSE);
    417 		}
    418 		rv = (lockmgr(&map->lock, LK_EXCLUSIVE|LK_NOWAIT|LK_INTERLOCK,
    419 		    &map->flags_lock) == 0);
    420 	}
    421 
    422 	if (rv)
    423 		map->timestamp++;
    424 
    425 	return (rv);
    426 }
    427 
    428 static __inline void
    429 vm_map_lock(struct vm_map *map)
    430 {
    431 	int error;
    432 
    433 	if (map->flags & VM_MAP_INTRSAFE) {
    434 		simple_lock(&map->lock.lk_interlock);
    435 		return;
    436 	}
    437 
    438  try_again:
    439 	simple_lock(&map->flags_lock);
    440 	while (map->flags & VM_MAP_BUSY) {
    441 		map->flags |= VM_MAP_WANTLOCK;
    442 		ltsleep(&map->flags, PVM, vmmapbsy, 0, &map->flags_lock);
    443 	}
    444 
    445 	error = lockmgr(&map->lock, LK_EXCLUSIVE|LK_SLEEPFAIL|LK_INTERLOCK,
    446 	    &map->flags_lock);
    447 
    448 	if (error) {
    449 		KASSERT(error == ENOLCK);
    450 		goto try_again;
    451 	}
    452 
    453 	(map)->timestamp++;
    454 }
    455 
    456 #ifdef DIAGNOSTIC
    457 #define	vm_map_lock_read(map)						\
    458 do {									\
    459 	if ((map)->flags & VM_MAP_INTRSAFE)				\
    460 		panic("vm_map_lock_read: intrsafe Map");		\
    461 	(void) lockmgr(&(map)->lock, LK_SHARED, NULL);			\
    462 } while (/*CONSTCOND*/ 0)
    463 #else
    464 #define	vm_map_lock_read(map)						\
    465 	(void) lockmgr(&(map)->lock, LK_SHARED, NULL)
    466 #endif
    467 
    468 #define	vm_map_unlock(map)						\
    469 do {									\
    470 	if ((map)->flags & VM_MAP_INTRSAFE)				\
    471 		simple_unlock(&(map)->lock.lk_interlock);		\
    472 	else								\
    473 		(void) lockmgr(&(map)->lock, LK_RELEASE, NULL);		\
    474 } while (/*CONSTCOND*/ 0)
    475 
    476 #define	vm_map_unlock_read(map)						\
    477 	(void) lockmgr(&(map)->lock, LK_RELEASE, NULL)
    478 
    479 #define	vm_map_downgrade(map)						\
    480 	(void) lockmgr(&(map)->lock, LK_DOWNGRADE, NULL)
    481 
    482 #ifdef DIAGNOSTIC
    483 #define	vm_map_upgrade(map)						\
    484 do {									\
    485 	if (lockmgr(&(map)->lock, LK_UPGRADE, NULL) != 0)		\
    486 		panic("vm_map_upgrade: failed to upgrade lock");	\
    487 } while (/*CONSTCOND*/ 0)
    488 #else
    489 #define	vm_map_upgrade(map)						\
    490 	(void) lockmgr(&(map)->lock, LK_UPGRADE, NULL)
    491 #endif
    492 
    493 #define	vm_map_busy(map)						\
    494 do {									\
    495 	simple_lock(&(map)->flags_lock);				\
    496 	(map)->flags |= VM_MAP_BUSY;					\
    497 	simple_unlock(&(map)->flags_lock);				\
    498 } while (/*CONSTCOND*/ 0)
    499 
    500 #define	vm_map_unbusy(map)						\
    501 do {									\
    502 	int oflags;							\
    503 									\
    504 	simple_lock(&(map)->flags_lock);				\
    505 	oflags = (map)->flags;						\
    506 	(map)->flags &= ~(VM_MAP_BUSY|VM_MAP_WANTLOCK);			\
    507 	simple_unlock(&(map)->flags_lock);				\
    508 	if (oflags & VM_MAP_WANTLOCK)					\
    509 		wakeup(&(map)->flags);					\
    510 } while (/*CONSTCOND*/ 0)
    511 
    512 #endif /* _KERNEL */
    513 
    514 /*
    515  *	Functions implemented as macros
    516  */
    517 #define		vm_map_min(map)		((map)->header.end)
    518 #define		vm_map_max(map)		((map)->header.start)
    519 #define		vm_map_setmin(map, v)	((map)->header.end = (v))
    520 #define		vm_map_setmax(map, v)	((map)->header.start = (v))
    521 
    522 #define		vm_map_pmap(map)	((map)->pmap)
    523 
    524 #endif /* _UVM_UVM_MAP_H_ */
    525