uvm_map.h revision 1.40 1 /* $NetBSD: uvm_map.h,v 1.40 2005/01/01 21:00:06 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_map.h 8.3 (Berkeley) 3/15/94
42 * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 #ifndef _UVM_UVM_MAP_H_
70 #define _UVM_UVM_MAP_H_
71
72 /*
73 * uvm_map.h
74 */
75
76 #ifdef _KERNEL
77
78 /*
79 * macros
80 */
81
82 /*
83 * UVM_MAP_CLIP_START: ensure that the entry begins at or after
84 * the starting address, if it doesn't we split the entry.
85 *
86 * => map must be locked by caller
87 */
88
89 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA,UMR) { \
90 if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA,UMR); }
91
92 /*
93 * UVM_MAP_CLIP_END: ensure that the entry ends at or before
94 * the ending address, if it does't we split the entry.
95 *
96 * => map must be locked by caller
97 */
98
99 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA,UMR) { \
100 if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA,UMR); }
101
102 /*
103 * extract flags
104 */
105 #define UVM_EXTRACT_REMOVE 0x1 /* remove mapping from old map */
106 #define UVM_EXTRACT_CONTIG 0x2 /* try to keep it contig */
107 #define UVM_EXTRACT_QREF 0x4 /* use quick refs */
108 #define UVM_EXTRACT_FIXPROT 0x8 /* set prot to maxprot as we go */
109
110 #endif /* _KERNEL */
111
112 #include <sys/tree.h>
113
114 #include <uvm/uvm_anon.h>
115
116 /*
117 * Address map entries consist of start and end addresses,
118 * a VM object (or sharing map) and offset into that object,
119 * and user-exported inheritance and protection information.
120 * Also included is control information for virtual copy operations.
121 */
122 struct vm_map_entry {
123 RB_ENTRY(vm_map_entry) rb_entry; /* tree information */
124 vaddr_t ownspace; /* free space after */
125 vaddr_t space; /* space in subtree */
126 struct vm_map_entry *prev; /* previous entry */
127 struct vm_map_entry *next; /* next entry */
128 vaddr_t start; /* start address */
129 vaddr_t end; /* end address */
130 union {
131 struct uvm_object *uvm_obj; /* uvm object */
132 struct vm_map *sub_map; /* belongs to another map */
133 } object; /* object I point to */
134 voff_t offset; /* offset into object */
135 int etype; /* entry type */
136 vm_prot_t protection; /* protection code */
137 vm_prot_t max_protection; /* maximum protection */
138 vm_inherit_t inheritance; /* inheritance */
139 int wired_count; /* can be paged if == 0 */
140 struct vm_aref aref; /* anonymous overlay */
141 int advice; /* madvise advice */
142 #define uvm_map_entry_stop_copy flags
143 u_int8_t flags; /* flags */
144
145 #define UVM_MAP_KERNEL 0x01 /* kernel map entry */
146 #define UVM_MAP_KMAPENT 0x02 /* contains map entries */
147 #define UVM_MAP_FIRST 0x04 /* the first special entry */
148 #define UVM_MAP_QUANTUM 0x08 /* allocated with
149 * UVM_FLAG_QUANTUM */
150 #define UVM_MAP_NOMERGE 0x10 /* this entry is not mergable */
151
152 };
153
154 #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0)
155
156 /*
157 * Maps are doubly-linked lists of map entries, kept sorted
158 * by address. A single hint is provided to start
159 * searches again from the last successful search,
160 * insertion, or removal.
161 *
162 * LOCKING PROTOCOL NOTES:
163 * -----------------------
164 *
165 * VM map locking is a little complicated. There are both shared
166 * and exclusive locks on maps. However, it is sometimes required
167 * to downgrade an exclusive lock to a shared lock, and upgrade to
168 * an exclusive lock again (to perform error recovery). However,
169 * another thread *must not* queue itself to receive an exclusive
170 * lock while before we upgrade back to exclusive, otherwise the
171 * error recovery becomes extremely difficult, if not impossible.
172 *
173 * In order to prevent this scenario, we introduce the notion of
174 * a `busy' map. A `busy' map is read-locked, but other threads
175 * attempting to write-lock wait for this flag to clear before
176 * entering the lock manager. A map may only be marked busy
177 * when the map is write-locked (and then the map must be downgraded
178 * to read-locked), and may only be marked unbusy by the thread
179 * which marked it busy (holding *either* a read-lock or a
180 * write-lock, the latter being gained by an upgrade).
181 *
182 * Access to the map `flags' member is controlled by the `flags_lock'
183 * simple lock. Note that some flags are static (set once at map
184 * creation time, and never changed), and thus require no locking
185 * to check those flags. All flags which are r/w must be set or
186 * cleared while the `flags_lock' is asserted. Additional locking
187 * requirements are:
188 *
189 * VM_MAP_PAGEABLE r/o static flag; no locking required
190 *
191 * VM_MAP_INTRSAFE r/o static flag; no locking required
192 *
193 * VM_MAP_WIREFUTURE r/w; may only be set or cleared when
194 * map is write-locked. may be tested
195 * without asserting `flags_lock'.
196 *
197 * VM_MAP_BUSY r/w; may only be set when map is
198 * write-locked, may only be cleared by
199 * thread which set it, map read-locked
200 * or write-locked. must be tested
201 * while `flags_lock' is asserted.
202 *
203 * VM_MAP_WANTLOCK r/w; may only be set when the map
204 * is busy, and thread is attempting
205 * to write-lock. must be tested
206 * while `flags_lock' is asserted.
207 *
208 * VM_MAP_DYING r/o; set when a vmspace is being
209 * destroyed to indicate that updates
210 * to the pmap can be skipped.
211 *
212 * VM_MAP_TOPDOWN r/o; set when the vmspace is
213 * created if the unspecified map
214 * allocations are to be arranged in
215 * a "top down" manner.
216 */
217 struct vm_map {
218 struct pmap * pmap; /* Physical map */
219 struct lock lock; /* Lock for map data */
220 RB_HEAD(uvm_tree, vm_map_entry) rbhead; /* Tree for entries */
221 struct vm_map_entry header; /* List of entries */
222 int nentries; /* Number of entries */
223 vsize_t size; /* virtual size */
224 int ref_count; /* Reference count */
225 struct simplelock ref_lock; /* Lock for ref_count field */
226 struct vm_map_entry * hint; /* hint for quick lookups */
227 struct simplelock hint_lock; /* lock for hint storage */
228 struct vm_map_entry * first_free; /* First free space hint */
229 int flags; /* flags */
230 struct simplelock flags_lock; /* Lock for flags field */
231 unsigned int timestamp; /* Version number */
232 LIST_HEAD(, uvm_kmapent_hdr) kentry_free; /* Freelist of map entry */
233
234 struct vm_map_entry *merged_entries;/* Merged entries, kept for
235 * later splitting */
236
237 #define min_offset header.end
238 #define max_offset header.start
239 };
240
241 /* vm_map flags */
242 #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */
243 #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */
244 #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */
245 #define VM_MAP_BUSY 0x08 /* rw: map is busy */
246 #define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */
247 #define VM_MAP_DYING 0x20 /* rw: map is being destroyed */
248 #define VM_MAP_TOPDOWN 0x40 /* ro: arrange map top-down */
249
250 #ifdef _KERNEL
251 struct uvm_mapent_reservation {
252 struct vm_map_entry *umr_entries[2];
253 int umr_nentries;
254 };
255 #define UMR_EMPTY(umr) ((umr) == NULL || (umr)->umr_nentries == 0)
256 #define UMR_GETENTRY(umr) ((umr)->umr_entries[--(umr)->umr_nentries])
257 #define UMR_PUTENTRY(umr, ent) \
258 (umr)->umr_entries[(umr)->umr_nentries++] = (ent)
259
260 struct uvm_map_args {
261 struct vm_map_entry *uma_prev;
262
263 vaddr_t uma_start;
264 vsize_t uma_size;
265
266 struct uvm_object *uma_uobj;
267 voff_t uma_uoffset;
268
269 uvm_flag_t uma_flags;
270 };
271 #endif /* _KERNEL */
272
273 #ifdef _KERNEL
274 #define vm_map_modflags(map, set, clear) \
275 do { \
276 simple_lock(&(map)->flags_lock); \
277 (map)->flags = ((map)->flags | (set)) & ~(clear); \
278 simple_unlock(&(map)->flags_lock); \
279 } while (/*CONSTCOND*/ 0)
280 #endif /* _KERNEL */
281
282 /*
283 * handle inline options
284 */
285
286 #ifdef UVM_MAP_INLINE
287 #define MAP_INLINE static __inline
288 #else
289 #define MAP_INLINE /* nothing */
290 #endif /* UVM_MAP_INLINE */
291
292 /*
293 * globals:
294 */
295
296 #ifdef _KERNEL
297
298 #ifdef PMAP_GROWKERNEL
299 extern vaddr_t uvm_maxkaddr;
300 #endif
301
302 /*
303 * protos: the following prototypes define the interface to vm_map
304 */
305
306 MAP_INLINE
307 void uvm_map_deallocate(struct vm_map *);
308
309 int uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int);
310 void uvm_map_clip_start(struct vm_map *, struct vm_map_entry *,
311 vaddr_t, struct uvm_mapent_reservation *);
312 void uvm_map_clip_end(struct vm_map *, struct vm_map_entry *,
313 vaddr_t, struct uvm_mapent_reservation *);
314 MAP_INLINE
315 struct vm_map *uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
316 int uvm_map_extract(struct vm_map *, vaddr_t, vsize_t,
317 struct vm_map *, vaddr_t *, int);
318 struct vm_map_entry *
319 uvm_map_findspace(struct vm_map *, vaddr_t, vsize_t,
320 vaddr_t *, struct uvm_object *, voff_t, vsize_t, int);
321 int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t,
322 vm_inherit_t);
323 int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
324 void uvm_map_init(void);
325 boolean_t uvm_map_lookup_entry(struct vm_map *, vaddr_t,
326 struct vm_map_entry **);
327 MAP_INLINE
328 void uvm_map_reference(struct vm_map *);
329 int uvm_map_replace(struct vm_map *, vaddr_t, vaddr_t,
330 struct vm_map_entry *, int);
331 int uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t,
332 vaddr_t *);
333 void uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int);
334 int uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t,
335 struct vm_map *);
336 MAP_INLINE
337 void uvm_unmap(struct vm_map *, vaddr_t, vaddr_t);
338 void uvm_unmap_detach(struct vm_map_entry *,int);
339 void uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t,
340 struct vm_map_entry **, struct uvm_mapent_reservation *);
341
342 int uvm_map_prepare(struct vm_map *, vaddr_t, vsize_t,
343 struct uvm_object *, voff_t, vsize_t, uvm_flag_t,
344 struct uvm_map_args *);
345 int uvm_map_enter(struct vm_map *, const struct uvm_map_args *,
346 struct vm_map_entry *);
347
348 int uvm_mapent_reserve(struct vm_map *,
349 struct uvm_mapent_reservation *, int, int);
350 void uvm_mapent_unreserve(struct vm_map *,
351 struct uvm_mapent_reservation *);
352
353
354 #endif /* _KERNEL */
355
356 /*
357 * VM map locking operations:
358 *
359 * These operations perform locking on the data portion of the
360 * map.
361 *
362 * vm_map_lock_try: try to lock a map, failing if it is already locked.
363 *
364 * vm_map_lock: acquire an exclusive (write) lock on a map.
365 *
366 * vm_map_lock_read: acquire a shared (read) lock on a map.
367 *
368 * vm_map_unlock: release an exclusive lock on a map.
369 *
370 * vm_map_unlock_read: release a shared lock on a map.
371 *
372 * vm_map_downgrade: downgrade an exclusive lock to a shared lock.
373 *
374 * vm_map_upgrade: upgrade a shared lock to an exclusive lock.
375 *
376 * vm_map_busy: mark a map as busy.
377 *
378 * vm_map_unbusy: clear busy status on a map.
379 *
380 * Note that "intrsafe" maps use only exclusive, spin locks. We simply
381 * use the sleep lock's interlock for this.
382 */
383
384 #ifdef _KERNEL
385 /* XXX: clean up later */
386 #include <sys/time.h>
387 #include <sys/proc.h> /* for tsleep(), wakeup() */
388 #include <sys/systm.h> /* for panic() */
389
390 static __inline boolean_t vm_map_lock_try(struct vm_map *);
391 static __inline void vm_map_lock(struct vm_map *);
392 extern const char vmmapbsy[];
393
394 static __inline boolean_t
395 vm_map_lock_try(struct vm_map *map)
396 {
397 boolean_t rv;
398
399 if (map->flags & VM_MAP_INTRSAFE)
400 rv = simple_lock_try(&map->lock.lk_interlock);
401 else {
402 simple_lock(&map->flags_lock);
403 if (map->flags & VM_MAP_BUSY) {
404 simple_unlock(&map->flags_lock);
405 return (FALSE);
406 }
407 rv = (lockmgr(&map->lock, LK_EXCLUSIVE|LK_NOWAIT|LK_INTERLOCK,
408 &map->flags_lock) == 0);
409 }
410
411 if (rv)
412 map->timestamp++;
413
414 return (rv);
415 }
416
417 static __inline void
418 vm_map_lock(struct vm_map *map)
419 {
420 int error;
421
422 if (map->flags & VM_MAP_INTRSAFE) {
423 simple_lock(&map->lock.lk_interlock);
424 return;
425 }
426
427 try_again:
428 simple_lock(&map->flags_lock);
429 while (map->flags & VM_MAP_BUSY) {
430 map->flags |= VM_MAP_WANTLOCK;
431 ltsleep(&map->flags, PVM, vmmapbsy, 0, &map->flags_lock);
432 }
433
434 error = lockmgr(&map->lock, LK_EXCLUSIVE|LK_SLEEPFAIL|LK_INTERLOCK,
435 &map->flags_lock);
436
437 if (error) {
438 KASSERT(error == ENOLCK);
439 goto try_again;
440 }
441
442 (map)->timestamp++;
443 }
444
445 #ifdef DIAGNOSTIC
446 #define vm_map_lock_read(map) \
447 do { \
448 if ((map)->flags & VM_MAP_INTRSAFE) \
449 panic("vm_map_lock_read: intrsafe Map"); \
450 (void) lockmgr(&(map)->lock, LK_SHARED, NULL); \
451 } while (/*CONSTCOND*/ 0)
452 #else
453 #define vm_map_lock_read(map) \
454 (void) lockmgr(&(map)->lock, LK_SHARED, NULL)
455 #endif
456
457 #define vm_map_unlock(map) \
458 do { \
459 if ((map)->flags & VM_MAP_INTRSAFE) \
460 simple_unlock(&(map)->lock.lk_interlock); \
461 else \
462 (void) lockmgr(&(map)->lock, LK_RELEASE, NULL); \
463 } while (/*CONSTCOND*/ 0)
464
465 #define vm_map_unlock_read(map) \
466 (void) lockmgr(&(map)->lock, LK_RELEASE, NULL)
467
468 #define vm_map_downgrade(map) \
469 (void) lockmgr(&(map)->lock, LK_DOWNGRADE, NULL)
470
471 #ifdef DIAGNOSTIC
472 #define vm_map_upgrade(map) \
473 do { \
474 if (lockmgr(&(map)->lock, LK_UPGRADE, NULL) != 0) \
475 panic("vm_map_upgrade: failed to upgrade lock"); \
476 } while (/*CONSTCOND*/ 0)
477 #else
478 #define vm_map_upgrade(map) \
479 (void) lockmgr(&(map)->lock, LK_UPGRADE, NULL)
480 #endif
481
482 #define vm_map_busy(map) \
483 do { \
484 simple_lock(&(map)->flags_lock); \
485 (map)->flags |= VM_MAP_BUSY; \
486 simple_unlock(&(map)->flags_lock); \
487 } while (/*CONSTCOND*/ 0)
488
489 #define vm_map_unbusy(map) \
490 do { \
491 int oflags; \
492 \
493 simple_lock(&(map)->flags_lock); \
494 oflags = (map)->flags; \
495 (map)->flags &= ~(VM_MAP_BUSY|VM_MAP_WANTLOCK); \
496 simple_unlock(&(map)->flags_lock); \
497 if (oflags & VM_MAP_WANTLOCK) \
498 wakeup(&(map)->flags); \
499 } while (/*CONSTCOND*/ 0)
500
501 #endif /* _KERNEL */
502
503 /*
504 * Functions implemented as macros
505 */
506 #define vm_map_min(map) ((map)->min_offset)
507 #define vm_map_max(map) ((map)->max_offset)
508 #define vm_map_pmap(map) ((map)->pmap)
509
510 #endif /* _UVM_UVM_MAP_H_ */
511