Home | History | Annotate | Line # | Download | only in uvm
uvm_map.h revision 1.66
      1 /*	$NetBSD: uvm_map.h,v 1.66 2011/02/02 15:25:27 chuck Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)vm_map.h    8.3 (Berkeley) 3/15/94
     37  * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp
     38  *
     39  *
     40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41  * All rights reserved.
     42  *
     43  * Permission to use, copy, modify and distribute this software and
     44  * its documentation is hereby granted, provided that both the copyright
     45  * notice and this permission notice appear in all copies of the
     46  * software, derivative works or modified versions, and any portions
     47  * thereof, and that both notices appear in supporting documentation.
     48  *
     49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52  *
     53  * Carnegie Mellon requests users of this software to return to
     54  *
     55  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56  *  School of Computer Science
     57  *  Carnegie Mellon University
     58  *  Pittsburgh PA 15213-3890
     59  *
     60  * any improvements or extensions that they make and grant Carnegie the
     61  * rights to redistribute these changes.
     62  */
     63 
     64 #ifndef _UVM_UVM_MAP_H_
     65 #define _UVM_UVM_MAP_H_
     66 
     67 /*
     68  * uvm_map.h
     69  */
     70 
     71 #ifdef _KERNEL
     72 
     73 /*
     74  * macros
     75  */
     76 
     77 /*
     78  * UVM_MAP_CLIP_START: ensure that the entry begins at or after
     79  * the starting address, if it doesn't we split the entry.
     80  *
     81  * => map must be locked by caller
     82  */
     83 
     84 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA,UMR) { \
     85 	if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA,UMR); }
     86 
     87 /*
     88  * UVM_MAP_CLIP_END: ensure that the entry ends at or before
     89  *      the ending address, if it does't we split the entry.
     90  *
     91  * => map must be locked by caller
     92  */
     93 
     94 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA,UMR) { \
     95 	if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA,UMR); }
     96 
     97 /*
     98  * extract flags
     99  */
    100 #define UVM_EXTRACT_REMOVE	0x01	/* remove mapping from old map */
    101 #define UVM_EXTRACT_CONTIG	0x02	/* try to keep it contig */
    102 #define UVM_EXTRACT_QREF	0x04	/* use quick refs */
    103 #define UVM_EXTRACT_FIXPROT	0x08	/* set prot to maxprot as we go */
    104 #define UVM_EXTRACT_RESERVED	0x10	/* caller did uvm_map_reserve() */
    105 
    106 #endif /* _KERNEL */
    107 
    108 #include <sys/rbtree.h>
    109 #include <sys/pool.h>
    110 #include <sys/rwlock.h>
    111 #include <sys/mutex.h>
    112 #include <sys/condvar.h>
    113 
    114 #include <uvm/uvm_anon.h>
    115 
    116 /*
    117  * Address map entries consist of start and end addresses,
    118  * a VM object (or sharing map) and offset into that object,
    119  * and user-exported inheritance and protection information.
    120  * Also included is control information for virtual copy operations.
    121  */
    122 struct vm_map_entry {
    123 	struct rb_node		rb_node;	/* tree information */
    124 	vsize_t			gap;		/* free space after */
    125 	vsize_t			maxgap;		/* space in subtree */
    126 	struct vm_map_entry	*prev;		/* previous entry */
    127 	struct vm_map_entry	*next;		/* next entry */
    128 	vaddr_t			start;		/* start address */
    129 	vaddr_t			end;		/* end address */
    130 	union {
    131 		struct uvm_object *uvm_obj;	/* uvm object */
    132 		struct vm_map	*sub_map;	/* belongs to another map */
    133 	} object;				/* object I point to */
    134 	voff_t			offset;		/* offset into object */
    135 	int			etype;		/* entry type */
    136 	vm_prot_t		protection;	/* protection code */
    137 	vm_prot_t		max_protection;	/* maximum protection */
    138 	vm_inherit_t		inheritance;	/* inheritance */
    139 	int			wired_count;	/* can be paged if == 0 */
    140 	struct vm_aref		aref;		/* anonymous overlay */
    141 	int			advice;		/* madvise advice */
    142 #define uvm_map_entry_stop_copy flags
    143 	u_int8_t		flags;		/* flags */
    144 
    145 #define	UVM_MAP_KERNEL		0x01		/* kernel map entry */
    146 #define	UVM_MAP_KMAPENT		0x02		/* contains map entries */
    147 #define	UVM_MAP_FIRST		0x04		/* the first special entry */
    148 #define	UVM_MAP_QUANTUM		0x08		/* allocated with
    149 						 * UVM_FLAG_QUANTUM */
    150 #define	UVM_MAP_NOMERGE		0x10		/* this entry is not mergable */
    151 
    152 };
    153 
    154 #define	VM_MAPENT_ISWIRED(entry)	((entry)->wired_count != 0)
    155 
    156 /*
    157  *	Maps are doubly-linked lists of map entries, kept sorted
    158  *	by address.  A single hint is provided to start
    159  *	searches again from the last successful search,
    160  *	insertion, or removal.
    161  *
    162  *	LOCKING PROTOCOL NOTES:
    163  *	-----------------------
    164  *
    165  *	VM map locking is a little complicated.  There are both shared
    166  *	and exclusive locks on maps.  However, it is sometimes required
    167  *	to downgrade an exclusive lock to a shared lock, and upgrade to
    168  *	an exclusive lock again (to perform error recovery).  However,
    169  *	another thread *must not* queue itself to receive an exclusive
    170  *	lock while before we upgrade back to exclusive, otherwise the
    171  *	error recovery becomes extremely difficult, if not impossible.
    172  *
    173  *	In order to prevent this scenario, we introduce the notion of
    174  *	a `busy' map.  A `busy' map is read-locked, but other threads
    175  *	attempting to write-lock wait for this flag to clear before
    176  *	entering the lock manager.  A map may only be marked busy
    177  *	when the map is write-locked (and then the map must be downgraded
    178  *	to read-locked), and may only be marked unbusy by the thread
    179  *	which marked it busy (holding *either* a read-lock or a
    180  *	write-lock, the latter being gained by an upgrade).
    181  *
    182  *	Access to the map `flags' member is controlled by the `flags_lock'
    183  *	simple lock.  Note that some flags are static (set once at map
    184  *	creation time, and never changed), and thus require no locking
    185  *	to check those flags.  All flags which are r/w must be set or
    186  *	cleared while the `flags_lock' is asserted.  Additional locking
    187  *	requirements are:
    188  *
    189  *		VM_MAP_PAGEABLE		r/o static flag; no locking required
    190  *
    191  *		VM_MAP_INTRSAFE		r/o static flag; no locking required
    192  *
    193  *		VM_MAP_WIREFUTURE	r/w; may only be set or cleared when
    194  *					map is write-locked.  may be tested
    195  *					without asserting `flags_lock'.
    196  *
    197  *		VM_MAP_DYING		r/o; set when a vmspace is being
    198  *					destroyed to indicate that updates
    199  *					to the pmap can be skipped.
    200  *
    201  *		VM_MAP_TOPDOWN		r/o; set when the vmspace is
    202  *					created if the unspecified map
    203  *					allocations are to be arranged in
    204  *					a "top down" manner.
    205  */
    206 struct vm_map {
    207 	struct pmap *		pmap;		/* Physical map */
    208 	krwlock_t		lock;		/* Non-intrsafe lock */
    209 	struct lwp *		busy;		/* LWP holding map busy */
    210 	kmutex_t		mutex;		/* INTRSAFE lock */
    211 	kmutex_t		misc_lock;	/* Lock for ref_count, cv */
    212 	kcondvar_t		cv;		/* For signalling */
    213 	int			flags;		/* flags */
    214 	struct rb_tree		rb_tree;	/* Tree for entries */
    215 	struct vm_map_entry	header;		/* List of entries */
    216 	int			nentries;	/* Number of entries */
    217 	vsize_t			size;		/* virtual size */
    218 	int			ref_count;	/* Reference count */
    219 	struct vm_map_entry *	hint;		/* hint for quick lookups */
    220 	struct vm_map_entry *	first_free;	/* First free space hint */
    221 	unsigned int		timestamp;	/* Version number */
    222 };
    223 
    224 #if defined(_KERNEL)
    225 
    226 #include <sys/callback.h>
    227 
    228 struct vm_map_kernel {
    229 	struct vm_map vmk_map;
    230 	LIST_HEAD(, uvm_kmapent_hdr) vmk_kentry_free;
    231 			/* Freelist of map entry */
    232 	struct vm_map_entry	*vmk_merged_entries;
    233 			/* Merged entries, kept for later splitting */
    234 
    235 	struct callback_head vmk_reclaim_callback;
    236 #if !defined(PMAP_MAP_POOLPAGE)
    237 	struct pool vmk_vacache; /* kva cache */
    238 	struct pool_allocator vmk_vacache_allocator; /* ... and its allocator */
    239 #endif
    240 };
    241 #endif /* defined(_KERNEL) */
    242 
    243 #define	VM_MAP_IS_KERNEL(map)	(vm_map_pmap(map) == pmap_kernel())
    244 
    245 /* vm_map flags */
    246 #define	VM_MAP_PAGEABLE		0x01		/* ro: entries are pageable */
    247 #define	VM_MAP_INTRSAFE		0x02		/* ro: interrupt safe map */
    248 #define	VM_MAP_WIREFUTURE	0x04		/* rw: wire future mappings */
    249 #define	VM_MAP_DYING		0x20		/* rw: map is being destroyed */
    250 #define	VM_MAP_TOPDOWN		0x40		/* ro: arrange map top-down */
    251 #define	VM_MAP_VACACHE		0x80		/* ro: use kva cache */
    252 #define	VM_MAP_WANTVA		0x100		/* rw: want va */
    253 
    254 #ifdef _KERNEL
    255 struct uvm_mapent_reservation {
    256 	struct vm_map_entry *umr_entries[2];
    257 	int umr_nentries;
    258 };
    259 #define	UMR_EMPTY(umr)		((umr) == NULL || (umr)->umr_nentries == 0)
    260 #define	UMR_GETENTRY(umr)	((umr)->umr_entries[--(umr)->umr_nentries])
    261 #define	UMR_PUTENTRY(umr, ent)	\
    262 	(umr)->umr_entries[(umr)->umr_nentries++] = (ent)
    263 
    264 struct uvm_map_args {
    265 	struct vm_map_entry *uma_prev;
    266 
    267 	vaddr_t uma_start;
    268 	vsize_t uma_size;
    269 
    270 	struct uvm_object *uma_uobj;
    271 	voff_t uma_uoffset;
    272 
    273 	uvm_flag_t uma_flags;
    274 };
    275 #endif /* _KERNEL */
    276 
    277 /*
    278  * globals:
    279  */
    280 
    281 #ifdef _KERNEL
    282 
    283 #include <sys/proc.h>
    284 
    285 #ifdef PMAP_GROWKERNEL
    286 extern vaddr_t	uvm_maxkaddr;
    287 #endif
    288 
    289 /*
    290  * protos: the following prototypes define the interface to vm_map
    291  */
    292 
    293 void		uvm_map_deallocate(struct vm_map *);
    294 
    295 int		uvm_map_willneed(struct vm_map *, vaddr_t, vaddr_t);
    296 int		uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int);
    297 void		uvm_map_clip_start(struct vm_map *, struct vm_map_entry *,
    298 		    vaddr_t, struct uvm_mapent_reservation *);
    299 void		uvm_map_clip_end(struct vm_map *, struct vm_map_entry *,
    300 		    vaddr_t, struct uvm_mapent_reservation *);
    301 struct vm_map	*uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
    302 int		uvm_map_extract(struct vm_map *, vaddr_t, vsize_t,
    303 		    struct vm_map *, vaddr_t *, int);
    304 struct vm_map_entry *
    305 		uvm_map_findspace(struct vm_map *, vaddr_t, vsize_t,
    306 		    vaddr_t *, struct uvm_object *, voff_t, vsize_t, int);
    307 int		uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t,
    308 		    vm_inherit_t);
    309 int		uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
    310 void		uvm_map_init(void);
    311 bool		uvm_map_lookup_entry(struct vm_map *, vaddr_t,
    312 		    struct vm_map_entry **);
    313 void		uvm_map_reference(struct vm_map *);
    314 int		uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t,
    315 		    vaddr_t *, uvm_flag_t);
    316 void		uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int);
    317 void		uvm_map_setup_kernel(struct vm_map_kernel *,
    318 		    vaddr_t, vaddr_t, int);
    319 struct vm_map_kernel *
    320 		vm_map_to_kernel(struct vm_map *);
    321 int		uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t,
    322 		    struct vm_map *);
    323 void		uvm_unmap1(struct vm_map *, vaddr_t, vaddr_t, int);
    324 #define	uvm_unmap(map, s, e)	uvm_unmap1((map), (s), (e), 0)
    325 void		uvm_unmap_detach(struct vm_map_entry *,int);
    326 void		uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t,
    327 		    struct vm_map_entry **, struct uvm_mapent_reservation *,
    328 		    int);
    329 
    330 int		uvm_map_prepare(struct vm_map *, vaddr_t, vsize_t,
    331 		    struct uvm_object *, voff_t, vsize_t, uvm_flag_t,
    332 		    struct uvm_map_args *);
    333 int		uvm_map_enter(struct vm_map *, const struct uvm_map_args *,
    334 		    struct vm_map_entry *);
    335 
    336 int		uvm_mapent_reserve(struct vm_map *,
    337 		    struct uvm_mapent_reservation *, int, int);
    338 void		uvm_mapent_unreserve(struct vm_map *,
    339 		    struct uvm_mapent_reservation *);
    340 
    341 vsize_t		uvm_mapent_overhead(vsize_t, int);
    342 
    343 int		uvm_mapent_trymerge(struct vm_map *,
    344 		    struct vm_map_entry *, int);
    345 #define	UVM_MERGE_COPYING	1
    346 
    347 bool		vm_map_starved_p(struct vm_map *);
    348 
    349 /*
    350  * VM map locking operations.
    351  */
    352 
    353 bool		vm_map_lock_try(struct vm_map *);
    354 void		vm_map_lock(struct vm_map *);
    355 void		vm_map_unlock(struct vm_map *);
    356 void		vm_map_unbusy(struct vm_map *);
    357 void		vm_map_lock_read(struct vm_map *);
    358 void		vm_map_unlock_read(struct vm_map *);
    359 void		vm_map_busy(struct vm_map *);
    360 bool		vm_map_locked_p(struct vm_map *);
    361 
    362 #endif /* _KERNEL */
    363 
    364 /*
    365  *	Functions implemented as macros
    366  */
    367 #define		vm_map_min(map)		((map)->header.end)
    368 #define		vm_map_max(map)		((map)->header.start)
    369 #define		vm_map_setmin(map, v)	((map)->header.end = (v))
    370 #define		vm_map_setmax(map, v)	((map)->header.start = (v))
    371 
    372 #define		vm_map_pmap(map)	((map)->pmap)
    373 
    374 #endif /* _UVM_UVM_MAP_H_ */
    375