uvm_map.h revision 1.36 1 /* $NetBSD: uvm_map.h,v 1.36 2003/10/01 22:50:15 enami Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_map.h 8.3 (Berkeley) 3/15/94
42 * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 #ifndef _UVM_UVM_MAP_H_
70 #define _UVM_UVM_MAP_H_
71
72 /*
73 * uvm_map.h
74 */
75
76 #ifdef _KERNEL
77
78 /*
79 * macros
80 */
81
82 /*
83 * UVM_MAP_CLIP_START: ensure that the entry begins at or after
84 * the starting address, if it doesn't we split the entry.
85 *
86 * => map must be locked by caller
87 */
88
89 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \
90 if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA); }
91
92 /*
93 * UVM_MAP_CLIP_END: ensure that the entry ends at or before
94 * the ending address, if it does't we split the entry.
95 *
96 * => map must be locked by caller
97 */
98
99 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \
100 if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA); }
101
102 /*
103 * extract flags
104 */
105 #define UVM_EXTRACT_REMOVE 0x1 /* remove mapping from old map */
106 #define UVM_EXTRACT_CONTIG 0x2 /* try to keep it contig */
107 #define UVM_EXTRACT_QREF 0x4 /* use quick refs */
108 #define UVM_EXTRACT_FIXPROT 0x8 /* set prot to maxprot as we go */
109
110 #endif /* _KERNEL */
111
112 #include <uvm/uvm_anon.h>
113
114 /*
115 * Address map entries consist of start and end addresses,
116 * a VM object (or sharing map) and offset into that object,
117 * and user-exported inheritance and protection information.
118 * Also included is control information for virtual copy operations.
119 */
120 struct vm_map_entry {
121 struct vm_map_entry *prev; /* previous entry */
122 struct vm_map_entry *next; /* next entry */
123 vaddr_t start; /* start address */
124 vaddr_t end; /* end address */
125 union {
126 struct uvm_object *uvm_obj; /* uvm object */
127 struct vm_map *sub_map; /* belongs to another map */
128 } object; /* object I point to */
129 voff_t offset; /* offset into object */
130 int etype; /* entry type */
131 vm_prot_t protection; /* protection code */
132 vm_prot_t max_protection; /* maximum protection */
133 vm_inherit_t inheritance; /* inheritance */
134 int wired_count; /* can be paged if == 0 */
135 struct vm_aref aref; /* anonymous overlay */
136 int advice; /* madvise advice */
137 #define uvm_map_entry_stop_copy flags
138 u_int8_t flags; /* flags */
139
140 #define UVM_MAP_STATIC 0x01 /* static map entry */
141 #define UVM_MAP_KMEM 0x02 /* from kmem entry pool */
142
143 };
144
145 #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0)
146
147 /*
148 * Maps are doubly-linked lists of map entries, kept sorted
149 * by address. A single hint is provided to start
150 * searches again from the last successful search,
151 * insertion, or removal.
152 *
153 * LOCKING PROTOCOL NOTES:
154 * -----------------------
155 *
156 * VM map locking is a little complicated. There are both shared
157 * and exclusive locks on maps. However, it is sometimes required
158 * to downgrade an exclusive lock to a shared lock, and upgrade to
159 * an exclusive lock again (to perform error recovery). However,
160 * another thread *must not* queue itself to receive an exclusive
161 * lock while before we upgrade back to exclusive, otherwise the
162 * error recovery becomes extremely difficult, if not impossible.
163 *
164 * In order to prevent this scenario, we introduce the notion of
165 * a `busy' map. A `busy' map is read-locked, but other threads
166 * attempting to write-lock wait for this flag to clear before
167 * entering the lock manager. A map may only be marked busy
168 * when the map is write-locked (and then the map must be downgraded
169 * to read-locked), and may only be marked unbusy by the thread
170 * which marked it busy (holding *either* a read-lock or a
171 * write-lock, the latter being gained by an upgrade).
172 *
173 * Access to the map `flags' member is controlled by the `flags_lock'
174 * simple lock. Note that some flags are static (set once at map
175 * creation time, and never changed), and thus require no locking
176 * to check those flags. All flags which are r/w must be set or
177 * cleared while the `flags_lock' is asserted. Additional locking
178 * requirements are:
179 *
180 * VM_MAP_PAGEABLE r/o static flag; no locking required
181 *
182 * VM_MAP_INTRSAFE r/o static flag; no locking required
183 *
184 * VM_MAP_WIREFUTURE r/w; may only be set or cleared when
185 * map is write-locked. may be tested
186 * without asserting `flags_lock'.
187 *
188 * VM_MAP_BUSY r/w; may only be set when map is
189 * write-locked, may only be cleared by
190 * thread which set it, map read-locked
191 * or write-locked. must be tested
192 * while `flags_lock' is asserted.
193 *
194 * VM_MAP_WANTLOCK r/w; may only be set when the map
195 * is busy, and thread is attempting
196 * to write-lock. must be tested
197 * while `flags_lock' is asserted.
198 *
199 * VM_MAP_DYING r/o; set when a vmspace is being
200 * destroyed to indicate that updates
201 * to the pmap can be skipped.
202 *
203 * VM_MAP_TOPDOWN r/o; set when the vmspace is
204 * created if the unspecified map
205 * allocations are to be arranged in
206 * a "top down" manner.
207 */
208 struct vm_map {
209 struct pmap * pmap; /* Physical map */
210 struct lock lock; /* Lock for map data */
211 struct vm_map_entry header; /* List of entries */
212 int nentries; /* Number of entries */
213 vsize_t size; /* virtual size */
214 int ref_count; /* Reference count */
215 struct simplelock ref_lock; /* Lock for ref_count field */
216 struct vm_map_entry * hint; /* hint for quick lookups */
217 struct simplelock hint_lock; /* lock for hint storage */
218 struct vm_map_entry * first_free; /* First free space hint */
219 int flags; /* flags */
220 struct simplelock flags_lock; /* Lock for flags field */
221 unsigned int timestamp; /* Version number */
222 #define min_offset header.end
223 #define max_offset header.start
224 };
225
226 /* vm_map flags */
227 #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */
228 #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */
229 #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */
230 #define VM_MAP_BUSY 0x08 /* rw: map is busy */
231 #define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */
232 #define VM_MAP_DYING 0x20 /* rw: map is being destroyed */
233 #define VM_MAP_TOPDOWN 0x40 /* ro: arrange map top-down */
234
235 /* XXX: number of kernel maps and entries to statically allocate */
236
237 #if !defined(MAX_KMAPENT)
238 #if (50 + (2 * NPROC) > 1000)
239 #define MAX_KMAPENT (50 + (2 * NPROC))
240 #else
241 #define MAX_KMAPENT 1000 /* XXXCDC: no crash */
242 #endif
243 #endif /* !defined MAX_KMAPENT */
244
245 #ifdef _KERNEL
246 #define vm_map_modflags(map, set, clear) \
247 do { \
248 simple_lock(&(map)->flags_lock); \
249 (map)->flags = ((map)->flags | (set)) & ~(clear); \
250 simple_unlock(&(map)->flags_lock); \
251 } while (/*CONSTCOND*/ 0)
252 #endif /* _KERNEL */
253
254 /*
255 * handle inline options
256 */
257
258 #ifdef UVM_MAP_INLINE
259 #define MAP_INLINE static __inline
260 #else
261 #define MAP_INLINE /* nothing */
262 #endif /* UVM_MAP_INLINE */
263
264 /*
265 * globals:
266 */
267
268 #ifdef _KERNEL
269
270 #ifdef PMAP_GROWKERNEL
271 extern vaddr_t uvm_maxkaddr;
272 #endif
273
274 /*
275 * protos: the following prototypes define the interface to vm_map
276 */
277
278 MAP_INLINE
279 void uvm_map_deallocate(struct vm_map *);
280
281 int uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int);
282 void uvm_map_clip_start(struct vm_map *, struct vm_map_entry *,
283 vaddr_t);
284 void uvm_map_clip_end(struct vm_map *, struct vm_map_entry *,
285 vaddr_t);
286 MAP_INLINE
287 struct vm_map *uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
288 int uvm_map_extract(struct vm_map *, vaddr_t, vsize_t,
289 struct vm_map *, vaddr_t *, int);
290 struct vm_map_entry *
291 uvm_map_findspace(struct vm_map *, vaddr_t, vsize_t,
292 vaddr_t *, struct uvm_object *, voff_t, vsize_t, int);
293 int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t,
294 vm_inherit_t);
295 int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
296 void uvm_map_init(void);
297 boolean_t uvm_map_lookup_entry(struct vm_map *, vaddr_t,
298 struct vm_map_entry **);
299 MAP_INLINE
300 void uvm_map_reference(struct vm_map *);
301 int uvm_map_replace(struct vm_map *, vaddr_t, vaddr_t,
302 struct vm_map_entry *, int);
303 int uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t,
304 vaddr_t *);
305 void uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int);
306 int uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t,
307 struct vm_map *);
308 MAP_INLINE
309 void uvm_unmap(struct vm_map *, vaddr_t, vaddr_t);
310 void uvm_unmap_detach(struct vm_map_entry *,int);
311 void uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t,
312 struct vm_map_entry **);
313
314 #endif /* _KERNEL */
315
316 /*
317 * VM map locking operations:
318 *
319 * These operations perform locking on the data portion of the
320 * map.
321 *
322 * vm_map_lock_try: try to lock a map, failing if it is already locked.
323 *
324 * vm_map_lock: acquire an exclusive (write) lock on a map.
325 *
326 * vm_map_lock_read: acquire a shared (read) lock on a map.
327 *
328 * vm_map_unlock: release an exclusive lock on a map.
329 *
330 * vm_map_unlock_read: release a shared lock on a map.
331 *
332 * vm_map_downgrade: downgrade an exclusive lock to a shared lock.
333 *
334 * vm_map_upgrade: upgrade a shared lock to an exclusive lock.
335 *
336 * vm_map_busy: mark a map as busy.
337 *
338 * vm_map_unbusy: clear busy status on a map.
339 *
340 * Note that "intrsafe" maps use only exclusive, spin locks. We simply
341 * use the sleep lock's interlock for this.
342 */
343
344 #ifdef _KERNEL
345 /* XXX: clean up later */
346 #include <sys/time.h>
347 #include <sys/proc.h> /* for tsleep(), wakeup() */
348 #include <sys/systm.h> /* for panic() */
349
350 static __inline boolean_t vm_map_lock_try(struct vm_map *);
351 static __inline void vm_map_lock(struct vm_map *);
352 extern const char vmmapbsy[];
353
354 static __inline boolean_t
355 vm_map_lock_try(struct vm_map *map)
356 {
357 boolean_t rv;
358
359 if (map->flags & VM_MAP_INTRSAFE)
360 rv = simple_lock_try(&map->lock.lk_interlock);
361 else {
362 simple_lock(&map->flags_lock);
363 if (map->flags & VM_MAP_BUSY) {
364 simple_unlock(&map->flags_lock);
365 return (FALSE);
366 }
367 rv = (lockmgr(&map->lock, LK_EXCLUSIVE|LK_NOWAIT|LK_INTERLOCK,
368 &map->flags_lock) == 0);
369 }
370
371 if (rv)
372 map->timestamp++;
373
374 return (rv);
375 }
376
377 static __inline void
378 vm_map_lock(struct vm_map *map)
379 {
380 int error;
381
382 if (map->flags & VM_MAP_INTRSAFE) {
383 simple_lock(&map->lock.lk_interlock);
384 return;
385 }
386
387 try_again:
388 simple_lock(&map->flags_lock);
389 while (map->flags & VM_MAP_BUSY) {
390 map->flags |= VM_MAP_WANTLOCK;
391 ltsleep(&map->flags, PVM, vmmapbsy, 0, &map->flags_lock);
392 }
393
394 error = lockmgr(&map->lock, LK_EXCLUSIVE|LK_SLEEPFAIL|LK_INTERLOCK,
395 &map->flags_lock);
396
397 if (error) {
398 KASSERT(error == ENOLCK);
399 goto try_again;
400 }
401
402 (map)->timestamp++;
403 }
404
405 #ifdef DIAGNOSTIC
406 #define vm_map_lock_read(map) \
407 do { \
408 if ((map)->flags & VM_MAP_INTRSAFE) \
409 panic("vm_map_lock_read: intrsafe Map"); \
410 (void) lockmgr(&(map)->lock, LK_SHARED, NULL); \
411 } while (/*CONSTCOND*/ 0)
412 #else
413 #define vm_map_lock_read(map) \
414 (void) lockmgr(&(map)->lock, LK_SHARED, NULL)
415 #endif
416
417 #define vm_map_unlock(map) \
418 do { \
419 if ((map)->flags & VM_MAP_INTRSAFE) \
420 simple_unlock(&(map)->lock.lk_interlock); \
421 else \
422 (void) lockmgr(&(map)->lock, LK_RELEASE, NULL); \
423 } while (/*CONSTCOND*/ 0)
424
425 #define vm_map_unlock_read(map) \
426 (void) lockmgr(&(map)->lock, LK_RELEASE, NULL)
427
428 #define vm_map_downgrade(map) \
429 (void) lockmgr(&(map)->lock, LK_DOWNGRADE, NULL)
430
431 #ifdef DIAGNOSTIC
432 #define vm_map_upgrade(map) \
433 do { \
434 if (lockmgr(&(map)->lock, LK_UPGRADE, NULL) != 0) \
435 panic("vm_map_upgrade: failed to upgrade lock"); \
436 } while (/*CONSTCOND*/ 0)
437 #else
438 #define vm_map_upgrade(map) \
439 (void) lockmgr(&(map)->lock, LK_UPGRADE, NULL)
440 #endif
441
442 #define vm_map_busy(map) \
443 do { \
444 simple_lock(&(map)->flags_lock); \
445 (map)->flags |= VM_MAP_BUSY; \
446 simple_unlock(&(map)->flags_lock); \
447 } while (/*CONSTCOND*/ 0)
448
449 #define vm_map_unbusy(map) \
450 do { \
451 int oflags; \
452 \
453 simple_lock(&(map)->flags_lock); \
454 oflags = (map)->flags; \
455 (map)->flags &= ~(VM_MAP_BUSY|VM_MAP_WANTLOCK); \
456 simple_unlock(&(map)->flags_lock); \
457 if (oflags & VM_MAP_WANTLOCK) \
458 wakeup(&(map)->flags); \
459 } while (/*CONSTCOND*/ 0)
460 #endif /* _KERNEL */
461
462 /*
463 * Functions implemented as macros
464 */
465 #define vm_map_min(map) ((map)->min_offset)
466 #define vm_map_max(map) ((map)->max_offset)
467 #define vm_map_pmap(map) ((map)->pmap)
468
469 #endif /* _UVM_UVM_MAP_H_ */
470