uvm_map.h revision 1.37 1 /* $NetBSD: uvm_map.h,v 1.37 2003/11/01 11:09:02 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_map.h 8.3 (Berkeley) 3/15/94
42 * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 #ifndef _UVM_UVM_MAP_H_
70 #define _UVM_UVM_MAP_H_
71
72 /*
73 * uvm_map.h
74 */
75
76 #ifdef _KERNEL
77
78 /*
79 * macros
80 */
81
82 /*
83 * UVM_MAP_CLIP_START: ensure that the entry begins at or after
84 * the starting address, if it doesn't we split the entry.
85 *
86 * => map must be locked by caller
87 */
88
89 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \
90 if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA); }
91
92 /*
93 * UVM_MAP_CLIP_END: ensure that the entry ends at or before
94 * the ending address, if it does't we split the entry.
95 *
96 * => map must be locked by caller
97 */
98
99 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \
100 if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA); }
101
102 /*
103 * extract flags
104 */
105 #define UVM_EXTRACT_REMOVE 0x1 /* remove mapping from old map */
106 #define UVM_EXTRACT_CONTIG 0x2 /* try to keep it contig */
107 #define UVM_EXTRACT_QREF 0x4 /* use quick refs */
108 #define UVM_EXTRACT_FIXPROT 0x8 /* set prot to maxprot as we go */
109
110 #endif /* _KERNEL */
111
112 #include <sys/tree.h>
113
114 #include <uvm/uvm_anon.h>
115
116 /*
117 * Address map entries consist of start and end addresses,
118 * a VM object (or sharing map) and offset into that object,
119 * and user-exported inheritance and protection information.
120 * Also included is control information for virtual copy operations.
121 */
122 struct vm_map_entry {
123 RB_ENTRY(vm_map_entry) rb_entry; /* tree information */
124 vaddr_t ownspace; /* free space after */
125 vaddr_t space; /* space in subtree */
126 struct vm_map_entry *prev; /* previous entry */
127 struct vm_map_entry *next; /* next entry */
128 vaddr_t start; /* start address */
129 vaddr_t end; /* end address */
130 union {
131 struct uvm_object *uvm_obj; /* uvm object */
132 struct vm_map *sub_map; /* belongs to another map */
133 } object; /* object I point to */
134 voff_t offset; /* offset into object */
135 int etype; /* entry type */
136 vm_prot_t protection; /* protection code */
137 vm_prot_t max_protection; /* maximum protection */
138 vm_inherit_t inheritance; /* inheritance */
139 int wired_count; /* can be paged if == 0 */
140 struct vm_aref aref; /* anonymous overlay */
141 int advice; /* madvise advice */
142 #define uvm_map_entry_stop_copy flags
143 u_int8_t flags; /* flags */
144
145 #define UVM_MAP_STATIC 0x01 /* static map entry */
146 #define UVM_MAP_KMEM 0x02 /* from kmem entry pool */
147
148 };
149
150 #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0)
151
152 /*
153 * Maps are doubly-linked lists of map entries, kept sorted
154 * by address. A single hint is provided to start
155 * searches again from the last successful search,
156 * insertion, or removal.
157 *
158 * LOCKING PROTOCOL NOTES:
159 * -----------------------
160 *
161 * VM map locking is a little complicated. There are both shared
162 * and exclusive locks on maps. However, it is sometimes required
163 * to downgrade an exclusive lock to a shared lock, and upgrade to
164 * an exclusive lock again (to perform error recovery). However,
165 * another thread *must not* queue itself to receive an exclusive
166 * lock while before we upgrade back to exclusive, otherwise the
167 * error recovery becomes extremely difficult, if not impossible.
168 *
169 * In order to prevent this scenario, we introduce the notion of
170 * a `busy' map. A `busy' map is read-locked, but other threads
171 * attempting to write-lock wait for this flag to clear before
172 * entering the lock manager. A map may only be marked busy
173 * when the map is write-locked (and then the map must be downgraded
174 * to read-locked), and may only be marked unbusy by the thread
175 * which marked it busy (holding *either* a read-lock or a
176 * write-lock, the latter being gained by an upgrade).
177 *
178 * Access to the map `flags' member is controlled by the `flags_lock'
179 * simple lock. Note that some flags are static (set once at map
180 * creation time, and never changed), and thus require no locking
181 * to check those flags. All flags which are r/w must be set or
182 * cleared while the `flags_lock' is asserted. Additional locking
183 * requirements are:
184 *
185 * VM_MAP_PAGEABLE r/o static flag; no locking required
186 *
187 * VM_MAP_INTRSAFE r/o static flag; no locking required
188 *
189 * VM_MAP_WIREFUTURE r/w; may only be set or cleared when
190 * map is write-locked. may be tested
191 * without asserting `flags_lock'.
192 *
193 * VM_MAP_BUSY r/w; may only be set when map is
194 * write-locked, may only be cleared by
195 * thread which set it, map read-locked
196 * or write-locked. must be tested
197 * while `flags_lock' is asserted.
198 *
199 * VM_MAP_WANTLOCK r/w; may only be set when the map
200 * is busy, and thread is attempting
201 * to write-lock. must be tested
202 * while `flags_lock' is asserted.
203 *
204 * VM_MAP_DYING r/o; set when a vmspace is being
205 * destroyed to indicate that updates
206 * to the pmap can be skipped.
207 *
208 * VM_MAP_TOPDOWN r/o; set when the vmspace is
209 * created if the unspecified map
210 * allocations are to be arranged in
211 * a "top down" manner.
212 */
213 struct vm_map {
214 struct pmap * pmap; /* Physical map */
215 struct lock lock; /* Lock for map data */
216 RB_HEAD(uvm_tree, vm_map_entry) rbhead; /* Tree for entries */
217 struct vm_map_entry header; /* List of entries */
218 int nentries; /* Number of entries */
219 vsize_t size; /* virtual size */
220 int ref_count; /* Reference count */
221 struct simplelock ref_lock; /* Lock for ref_count field */
222 struct vm_map_entry * hint; /* hint for quick lookups */
223 struct simplelock hint_lock; /* lock for hint storage */
224 struct vm_map_entry * first_free; /* First free space hint */
225 int flags; /* flags */
226 struct simplelock flags_lock; /* Lock for flags field */
227 unsigned int timestamp; /* Version number */
228 #define min_offset header.end
229 #define max_offset header.start
230 };
231
232 /* vm_map flags */
233 #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */
234 #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */
235 #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */
236 #define VM_MAP_BUSY 0x08 /* rw: map is busy */
237 #define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */
238 #define VM_MAP_DYING 0x20 /* rw: map is being destroyed */
239 #define VM_MAP_TOPDOWN 0x40 /* ro: arrange map top-down */
240
241 /* XXX: number of kernel maps and entries to statically allocate */
242
243 #if !defined(MAX_KMAPENT)
244 #if (50 + (2 * NPROC) > 1000)
245 #define MAX_KMAPENT (50 + (2 * NPROC))
246 #else
247 #define MAX_KMAPENT 1000 /* XXXCDC: no crash */
248 #endif
249 #endif /* !defined MAX_KMAPENT */
250
251 #ifdef _KERNEL
252 #define vm_map_modflags(map, set, clear) \
253 do { \
254 simple_lock(&(map)->flags_lock); \
255 (map)->flags = ((map)->flags | (set)) & ~(clear); \
256 simple_unlock(&(map)->flags_lock); \
257 } while (/*CONSTCOND*/ 0)
258 #endif /* _KERNEL */
259
260 /*
261 * handle inline options
262 */
263
264 #ifdef UVM_MAP_INLINE
265 #define MAP_INLINE static __inline
266 #else
267 #define MAP_INLINE /* nothing */
268 #endif /* UVM_MAP_INLINE */
269
270 /*
271 * globals:
272 */
273
274 #ifdef _KERNEL
275
276 #ifdef PMAP_GROWKERNEL
277 extern vaddr_t uvm_maxkaddr;
278 #endif
279
280 /*
281 * protos: the following prototypes define the interface to vm_map
282 */
283
284 MAP_INLINE
285 void uvm_map_deallocate(struct vm_map *);
286
287 int uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int);
288 void uvm_map_clip_start(struct vm_map *, struct vm_map_entry *,
289 vaddr_t);
290 void uvm_map_clip_end(struct vm_map *, struct vm_map_entry *,
291 vaddr_t);
292 MAP_INLINE
293 struct vm_map *uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
294 int uvm_map_extract(struct vm_map *, vaddr_t, vsize_t,
295 struct vm_map *, vaddr_t *, int);
296 struct vm_map_entry *
297 uvm_map_findspace(struct vm_map *, vaddr_t, vsize_t,
298 vaddr_t *, struct uvm_object *, voff_t, vsize_t, int);
299 int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t,
300 vm_inherit_t);
301 int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
302 void uvm_map_init(void);
303 boolean_t uvm_map_lookup_entry(struct vm_map *, vaddr_t,
304 struct vm_map_entry **);
305 MAP_INLINE
306 void uvm_map_reference(struct vm_map *);
307 int uvm_map_replace(struct vm_map *, vaddr_t, vaddr_t,
308 struct vm_map_entry *, int);
309 int uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t,
310 vaddr_t *);
311 void uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int);
312 int uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t,
313 struct vm_map *);
314 MAP_INLINE
315 void uvm_unmap(struct vm_map *, vaddr_t, vaddr_t);
316 void uvm_unmap_detach(struct vm_map_entry *,int);
317 void uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t,
318 struct vm_map_entry **);
319
320 #endif /* _KERNEL */
321
322 /*
323 * VM map locking operations:
324 *
325 * These operations perform locking on the data portion of the
326 * map.
327 *
328 * vm_map_lock_try: try to lock a map, failing if it is already locked.
329 *
330 * vm_map_lock: acquire an exclusive (write) lock on a map.
331 *
332 * vm_map_lock_read: acquire a shared (read) lock on a map.
333 *
334 * vm_map_unlock: release an exclusive lock on a map.
335 *
336 * vm_map_unlock_read: release a shared lock on a map.
337 *
338 * vm_map_downgrade: downgrade an exclusive lock to a shared lock.
339 *
340 * vm_map_upgrade: upgrade a shared lock to an exclusive lock.
341 *
342 * vm_map_busy: mark a map as busy.
343 *
344 * vm_map_unbusy: clear busy status on a map.
345 *
346 * Note that "intrsafe" maps use only exclusive, spin locks. We simply
347 * use the sleep lock's interlock for this.
348 */
349
350 #ifdef _KERNEL
351 /* XXX: clean up later */
352 #include <sys/time.h>
353 #include <sys/proc.h> /* for tsleep(), wakeup() */
354 #include <sys/systm.h> /* for panic() */
355
356 static __inline boolean_t vm_map_lock_try(struct vm_map *);
357 static __inline void vm_map_lock(struct vm_map *);
358 extern const char vmmapbsy[];
359
360 static __inline boolean_t
361 vm_map_lock_try(struct vm_map *map)
362 {
363 boolean_t rv;
364
365 if (map->flags & VM_MAP_INTRSAFE)
366 rv = simple_lock_try(&map->lock.lk_interlock);
367 else {
368 simple_lock(&map->flags_lock);
369 if (map->flags & VM_MAP_BUSY) {
370 simple_unlock(&map->flags_lock);
371 return (FALSE);
372 }
373 rv = (lockmgr(&map->lock, LK_EXCLUSIVE|LK_NOWAIT|LK_INTERLOCK,
374 &map->flags_lock) == 0);
375 }
376
377 if (rv)
378 map->timestamp++;
379
380 return (rv);
381 }
382
383 static __inline void
384 vm_map_lock(struct vm_map *map)
385 {
386 int error;
387
388 if (map->flags & VM_MAP_INTRSAFE) {
389 simple_lock(&map->lock.lk_interlock);
390 return;
391 }
392
393 try_again:
394 simple_lock(&map->flags_lock);
395 while (map->flags & VM_MAP_BUSY) {
396 map->flags |= VM_MAP_WANTLOCK;
397 ltsleep(&map->flags, PVM, vmmapbsy, 0, &map->flags_lock);
398 }
399
400 error = lockmgr(&map->lock, LK_EXCLUSIVE|LK_SLEEPFAIL|LK_INTERLOCK,
401 &map->flags_lock);
402
403 if (error) {
404 KASSERT(error == ENOLCK);
405 goto try_again;
406 }
407
408 (map)->timestamp++;
409 }
410
411 #ifdef DIAGNOSTIC
412 #define vm_map_lock_read(map) \
413 do { \
414 if ((map)->flags & VM_MAP_INTRSAFE) \
415 panic("vm_map_lock_read: intrsafe Map"); \
416 (void) lockmgr(&(map)->lock, LK_SHARED, NULL); \
417 } while (/*CONSTCOND*/ 0)
418 #else
419 #define vm_map_lock_read(map) \
420 (void) lockmgr(&(map)->lock, LK_SHARED, NULL)
421 #endif
422
423 #define vm_map_unlock(map) \
424 do { \
425 if ((map)->flags & VM_MAP_INTRSAFE) \
426 simple_unlock(&(map)->lock.lk_interlock); \
427 else \
428 (void) lockmgr(&(map)->lock, LK_RELEASE, NULL); \
429 } while (/*CONSTCOND*/ 0)
430
431 #define vm_map_unlock_read(map) \
432 (void) lockmgr(&(map)->lock, LK_RELEASE, NULL)
433
434 #define vm_map_downgrade(map) \
435 (void) lockmgr(&(map)->lock, LK_DOWNGRADE, NULL)
436
437 #ifdef DIAGNOSTIC
438 #define vm_map_upgrade(map) \
439 do { \
440 if (lockmgr(&(map)->lock, LK_UPGRADE, NULL) != 0) \
441 panic("vm_map_upgrade: failed to upgrade lock"); \
442 } while (/*CONSTCOND*/ 0)
443 #else
444 #define vm_map_upgrade(map) \
445 (void) lockmgr(&(map)->lock, LK_UPGRADE, NULL)
446 #endif
447
448 #define vm_map_busy(map) \
449 do { \
450 simple_lock(&(map)->flags_lock); \
451 (map)->flags |= VM_MAP_BUSY; \
452 simple_unlock(&(map)->flags_lock); \
453 } while (/*CONSTCOND*/ 0)
454
455 #define vm_map_unbusy(map) \
456 do { \
457 int oflags; \
458 \
459 simple_lock(&(map)->flags_lock); \
460 oflags = (map)->flags; \
461 (map)->flags &= ~(VM_MAP_BUSY|VM_MAP_WANTLOCK); \
462 simple_unlock(&(map)->flags_lock); \
463 if (oflags & VM_MAP_WANTLOCK) \
464 wakeup(&(map)->flags); \
465 } while (/*CONSTCOND*/ 0)
466 #endif /* _KERNEL */
467
468 /*
469 * Functions implemented as macros
470 */
471 #define vm_map_min(map) ((map)->min_offset)
472 #define vm_map_max(map) ((map)->max_offset)
473 #define vm_map_pmap(map) ((map)->pmap)
474
475 #endif /* _UVM_UVM_MAP_H_ */
476