uvm_map.h revision 1.70 1 /* $NetBSD: uvm_map.h,v 1.70 2012/01/27 19:48:41 para Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_map.h 8.3 (Berkeley) 3/15/94
37 * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
56 * School of Computer Science
57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64 #ifndef _UVM_UVM_MAP_H_
65 #define _UVM_UVM_MAP_H_
66
67 /*
68 * uvm_map.h
69 */
70
71 #ifdef _KERNEL
72
73 /*
74 * macros
75 */
76
77 /*
78 * UVM_MAP_CLIP_START: ensure that the entry begins at or after
79 * the starting address, if it doesn't we split the entry.
80 *
81 * => map must be locked by caller
82 */
83
84 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \
85 if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \
86 uvm_map_clip_start(MAP,ENTRY,VA); \
87 } \
88 }
89
90 /*
91 * UVM_MAP_CLIP_END: ensure that the entry ends at or before
92 * the ending address, if it does't we split the entry.
93 *
94 * => map must be locked by caller
95 */
96
97 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \
98 if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \
99 uvm_map_clip_end(MAP,ENTRY,VA); \
100 } \
101 }
102
103 /*
104 * extract flags
105 */
106 #define UVM_EXTRACT_REMOVE 0x01 /* remove mapping from old map */
107 #define UVM_EXTRACT_CONTIG 0x02 /* try to keep it contig */
108 #define UVM_EXTRACT_QREF 0x04 /* use quick refs */
109 #define UVM_EXTRACT_FIXPROT 0x08 /* set prot to maxprot as we go */
110 #define UVM_EXTRACT_RESERVED 0x10 /* caller did uvm_map_reserve() */
111
112 #endif /* _KERNEL */
113
114 #include <sys/rbtree.h>
115 #include <sys/pool.h>
116 #include <sys/rwlock.h>
117 #include <sys/mutex.h>
118 #include <sys/condvar.h>
119
120 #include <uvm/uvm_anon.h>
121
122 /*
123 * Address map entries consist of start and end addresses,
124 * a VM object (or sharing map) and offset into that object,
125 * and user-exported inheritance and protection information.
126 * Also included is control information for virtual copy operations.
127 */
128 struct vm_map_entry {
129 struct rb_node rb_node; /* tree information */
130 vsize_t gap; /* free space after */
131 vsize_t maxgap; /* space in subtree */
132 struct vm_map_entry *prev; /* previous entry */
133 struct vm_map_entry *next; /* next entry */
134 vaddr_t start; /* start address */
135 vaddr_t end; /* end address */
136 union {
137 struct uvm_object *uvm_obj; /* uvm object */
138 struct vm_map *sub_map; /* belongs to another map */
139 } object; /* object I point to */
140 voff_t offset; /* offset into object */
141 int etype; /* entry type */
142 vm_prot_t protection; /* protection code */
143 vm_prot_t max_protection; /* maximum protection */
144 vm_inherit_t inheritance; /* inheritance */
145 int wired_count; /* can be paged if == 0 */
146 struct vm_aref aref; /* anonymous overlay */
147 int advice; /* madvise advice */
148 uint32_t map_attrib; /* uvm-external map attributes */
149 #define uvm_map_entry_stop_copy flags
150 u_int8_t flags; /* flags */
151
152 #define UVM_MAP_KERNEL 0x01 /* kernel map entry */
153 #define UVM_MAP_KMAPENT 0x02 /* contains map entries */
154 #define UVM_MAP_STATIC 0x04 /* special static entries */
155 #define UVM_MAP_NOMERGE 0x08 /* this entry is not mergable */
156
157 };
158
159 #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0)
160
161 /*
162 * Maps are doubly-linked lists of map entries, kept sorted
163 * by address. A single hint is provided to start
164 * searches again from the last successful search,
165 * insertion, or removal.
166 *
167 * LOCKING PROTOCOL NOTES:
168 * -----------------------
169 *
170 * VM map locking is a little complicated. There are both shared
171 * and exclusive locks on maps. However, it is sometimes required
172 * to downgrade an exclusive lock to a shared lock, and upgrade to
173 * an exclusive lock again (to perform error recovery). However,
174 * another thread *must not* queue itself to receive an exclusive
175 * lock while before we upgrade back to exclusive, otherwise the
176 * error recovery becomes extremely difficult, if not impossible.
177 *
178 * In order to prevent this scenario, we introduce the notion of
179 * a `busy' map. A `busy' map is read-locked, but other threads
180 * attempting to write-lock wait for this flag to clear before
181 * entering the lock manager. A map may only be marked busy
182 * when the map is write-locked (and then the map must be downgraded
183 * to read-locked), and may only be marked unbusy by the thread
184 * which marked it busy (holding *either* a read-lock or a
185 * write-lock, the latter being gained by an upgrade).
186 *
187 * Access to the map `flags' member is controlled by the `flags_lock'
188 * simple lock. Note that some flags are static (set once at map
189 * creation time, and never changed), and thus require no locking
190 * to check those flags. All flags which are r/w must be set or
191 * cleared while the `flags_lock' is asserted. Additional locking
192 * requirements are:
193 *
194 * VM_MAP_PAGEABLE r/o static flag; no locking required
195 *
196 * VM_MAP_INTRSAFE r/o static flag; no locking required
197 *
198 * VM_MAP_WIREFUTURE r/w; may only be set or cleared when
199 * map is write-locked. may be tested
200 * without asserting `flags_lock'.
201 *
202 * VM_MAP_DYING r/o; set when a vmspace is being
203 * destroyed to indicate that updates
204 * to the pmap can be skipped.
205 *
206 * VM_MAP_TOPDOWN r/o; set when the vmspace is
207 * created if the unspecified map
208 * allocations are to be arranged in
209 * a "top down" manner.
210 */
211 struct vm_map {
212 struct pmap * pmap; /* Physical map */
213 krwlock_t lock; /* Non-intrsafe lock */
214 struct lwp * busy; /* LWP holding map busy */
215 kmutex_t mutex; /* INTRSAFE lock */
216 kmutex_t misc_lock; /* Lock for ref_count, cv */
217 kcondvar_t cv; /* For signalling */
218 int flags; /* flags */
219 struct rb_tree rb_tree; /* Tree for entries */
220 struct vm_map_entry header; /* List of entries */
221 int nentries; /* Number of entries */
222 vsize_t size; /* virtual size */
223 int ref_count; /* Reference count */
224 struct vm_map_entry * hint; /* hint for quick lookups */
225 struct vm_map_entry * first_free; /* First free space hint */
226 unsigned int timestamp; /* Version number */
227 };
228
229 #if defined(_KERNEL)
230
231 #include <sys/callback.h>
232
233 #endif /* defined(_KERNEL) */
234
235 #define VM_MAP_IS_KERNEL(map) (vm_map_pmap(map) == pmap_kernel())
236
237 /* vm_map flags */
238 #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */
239 #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */
240 #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */
241 #define VM_MAP_DYING 0x20 /* rw: map is being destroyed */
242 #define VM_MAP_TOPDOWN 0x40 /* ro: arrange map top-down */
243 #define VM_MAP_WANTVA 0x100 /* rw: want va */
244
245 #ifdef _KERNEL
246 struct uvm_map_args {
247 struct vm_map_entry *uma_prev;
248
249 vaddr_t uma_start;
250 vsize_t uma_size;
251
252 struct uvm_object *uma_uobj;
253 voff_t uma_uoffset;
254
255 uvm_flag_t uma_flags;
256 };
257 #endif /* _KERNEL */
258
259 /*
260 * globals:
261 */
262
263 #ifdef _KERNEL
264
265 #include <sys/proc.h>
266
267 #ifdef PMAP_GROWKERNEL
268 extern vaddr_t uvm_maxkaddr;
269 #endif
270
271 /*
272 * protos: the following prototypes define the interface to vm_map
273 */
274
275 void uvm_map_deallocate(struct vm_map *);
276
277 int uvm_map_willneed(struct vm_map *, vaddr_t, vaddr_t);
278 int uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int);
279 void uvm_map_clip_start(struct vm_map *, struct vm_map_entry *,
280 vaddr_t);
281 void uvm_map_clip_end(struct vm_map *, struct vm_map_entry *,
282 vaddr_t);
283 int uvm_map_extract(struct vm_map *, vaddr_t, vsize_t,
284 struct vm_map *, vaddr_t *, int);
285 struct vm_map_entry *
286 uvm_map_findspace(struct vm_map *, vaddr_t, vsize_t,
287 vaddr_t *, struct uvm_object *, voff_t, vsize_t, int);
288 int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t,
289 vm_inherit_t);
290 int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
291 void uvm_map_init(void);
292 void uvm_map_init_caches(void);
293 bool uvm_map_lookup_entry(struct vm_map *, vaddr_t,
294 struct vm_map_entry **);
295 void uvm_map_reference(struct vm_map *);
296 int uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t,
297 vaddr_t *, uvm_flag_t);
298 void uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int);
299 int uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t,
300 struct vm_map *);
301 void uvm_unmap1(struct vm_map *, vaddr_t, vaddr_t, int);
302 #define uvm_unmap(map, s, e) uvm_unmap1((map), (s), (e), 0)
303 void uvm_unmap_detach(struct vm_map_entry *,int);
304 void uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t,
305 struct vm_map_entry **, int);
306
307 int uvm_map_prepare(struct vm_map *, vaddr_t, vsize_t,
308 struct uvm_object *, voff_t, vsize_t, uvm_flag_t,
309 struct uvm_map_args *);
310 int uvm_map_enter(struct vm_map *, const struct uvm_map_args *,
311 struct vm_map_entry *);
312
313 int uvm_mapent_trymerge(struct vm_map *,
314 struct vm_map_entry *, int);
315 #define UVM_MERGE_COPYING 1
316
317 bool vm_map_starved_p(struct vm_map *);
318
319 /*
320 * VM map locking operations.
321 */
322
323 bool vm_map_lock_try(struct vm_map *);
324 void vm_map_lock(struct vm_map *);
325 void vm_map_unlock(struct vm_map *);
326 void vm_map_unbusy(struct vm_map *);
327 void vm_map_lock_read(struct vm_map *);
328 void vm_map_unlock_read(struct vm_map *);
329 void vm_map_busy(struct vm_map *);
330 bool vm_map_locked_p(struct vm_map *);
331
332 void uvm_map_lock_entry(struct vm_map_entry *);
333 void uvm_map_unlock_entry(struct vm_map_entry *);
334
335 #endif /* _KERNEL */
336
337 /*
338 * Functions implemented as macros
339 */
340 #define vm_map_min(map) ((map)->header.end)
341 #define vm_map_max(map) ((map)->header.start)
342 #define vm_map_setmin(map, v) ((map)->header.end = (v))
343 #define vm_map_setmax(map, v) ((map)->header.start = (v))
344
345 #define vm_map_pmap(map) ((map)->pmap)
346
347 #endif /* _UVM_UVM_MAP_H_ */
348