1 1.80 kamil /* $NetBSD: uvm_map.h,v 1.80 2020/05/26 00:50:53 kamil Exp $ */ 2 1.1 mrg 3 1.26 chs /* 4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 1.26 chs * Copyright (c) 1991, 1993, The Regents of the University of California. 6 1.1 mrg * 7 1.1 mrg * All rights reserved. 8 1.1 mrg * 9 1.1 mrg * This code is derived from software contributed to Berkeley by 10 1.1 mrg * The Mach Operating System project at Carnegie-Mellon University. 11 1.1 mrg * 12 1.1 mrg * Redistribution and use in source and binary forms, with or without 13 1.1 mrg * modification, are permitted provided that the following conditions 14 1.1 mrg * are met: 15 1.1 mrg * 1. Redistributions of source code must retain the above copyright 16 1.1 mrg * notice, this list of conditions and the following disclaimer. 17 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright 18 1.1 mrg * notice, this list of conditions and the following disclaimer in the 19 1.1 mrg * documentation and/or other materials provided with the distribution. 20 1.66 chuck * 3. Neither the name of the University nor the names of its contributors 21 1.1 mrg * may be used to endorse or promote products derived from this software 22 1.1 mrg * without specific prior written permission. 23 1.1 mrg * 24 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 1.1 mrg * SUCH DAMAGE. 35 1.1 mrg * 36 1.1 mrg * @(#)vm_map.h 8.3 (Berkeley) 3/15/94 37 1.4 mrg * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp 38 1.1 mrg * 39 1.1 mrg * 40 1.1 mrg * Copyright (c) 1987, 1990 Carnegie-Mellon University. 41 1.1 mrg * All rights reserved. 42 1.26 chs * 43 1.1 mrg * Permission to use, copy, modify and distribute this software and 44 1.1 mrg * its documentation is hereby granted, provided that both the copyright 45 1.1 mrg * notice and this permission notice appear in all copies of the 46 1.1 mrg * software, derivative works or modified versions, and any portions 47 1.1 mrg * thereof, and that both notices appear in supporting documentation. 48 1.26 chs * 49 1.26 chs * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 50 1.26 chs * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 51 1.1 mrg * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 52 1.26 chs * 53 1.1 mrg * Carnegie Mellon requests users of this software to return to 54 1.1 mrg * 55 1.1 mrg * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU 56 1.1 mrg * School of Computer Science 57 1.1 mrg * Carnegie Mellon University 58 1.1 mrg * Pittsburgh PA 15213-3890 59 1.1 mrg * 60 1.1 mrg * any improvements or extensions that they make and grant Carnegie the 61 1.1 mrg * rights to redistribute these changes. 62 1.1 mrg */ 63 1.1 mrg 64 1.5 perry #ifndef _UVM_UVM_MAP_H_ 65 1.5 perry #define _UVM_UVM_MAP_H_ 66 1.5 perry 67 1.1 mrg /* 68 1.1 mrg * uvm_map.h 69 1.1 mrg */ 70 1.1 mrg 71 1.15 thorpej #ifdef _KERNEL 72 1.15 thorpej 73 1.1 mrg /* 74 1.1 mrg * macros 75 1.1 mrg */ 76 1.1 mrg 77 1.1 mrg /* 78 1.1 mrg * UVM_MAP_CLIP_START: ensure that the entry begins at or after 79 1.1 mrg * the starting address, if it doesn't we split the entry. 80 1.26 chs * 81 1.1 mrg * => map must be locked by caller 82 1.1 mrg */ 83 1.1 mrg 84 1.70 para #define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \ 85 1.69 chs if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \ 86 1.70 para uvm_map_clip_start(MAP,ENTRY,VA); \ 87 1.69 chs } \ 88 1.69 chs } 89 1.1 mrg 90 1.1 mrg /* 91 1.1 mrg * UVM_MAP_CLIP_END: ensure that the entry ends at or before 92 1.1 mrg * the ending address, if it does't we split the entry. 93 1.1 mrg * 94 1.1 mrg * => map must be locked by caller 95 1.1 mrg */ 96 1.1 mrg 97 1.70 para #define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \ 98 1.69 chs if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \ 99 1.70 para uvm_map_clip_end(MAP,ENTRY,VA); \ 100 1.69 chs } \ 101 1.70 para } 102 1.1 mrg 103 1.1 mrg /* 104 1.1 mrg * extract flags 105 1.1 mrg */ 106 1.50 yamt #define UVM_EXTRACT_REMOVE 0x01 /* remove mapping from old map */ 107 1.50 yamt #define UVM_EXTRACT_CONTIG 0x02 /* try to keep it contig */ 108 1.50 yamt #define UVM_EXTRACT_QREF 0x04 /* use quick refs */ 109 1.50 yamt #define UVM_EXTRACT_FIXPROT 0x08 /* set prot to maxprot as we go */ 110 1.50 yamt #define UVM_EXTRACT_RESERVED 0x10 /* caller did uvm_map_reserve() */ 111 1.73 christos #define UVM_EXTRACT_PROT_ALL 0x20 /* set prot to UVM_PROT_ALL */ 112 1.1 mrg 113 1.19 mrg #endif /* _KERNEL */ 114 1.19 mrg 115 1.65 matt #include <sys/rbtree.h> 116 1.42 yamt #include <sys/pool.h> 117 1.57 ad #include <sys/rwlock.h> 118 1.57 ad #include <sys/mutex.h> 119 1.57 ad #include <sys/condvar.h> 120 1.37 yamt 121 1.18 mrg #include <uvm/uvm_anon.h> 122 1.18 mrg 123 1.18 mrg /* 124 1.18 mrg * Address map entries consist of start and end addresses, 125 1.18 mrg * a VM object (or sharing map) and offset into that object, 126 1.18 mrg * and user-exported inheritance and protection information. 127 1.18 mrg * Also included is control information for virtual copy operations. 128 1.79 ad * 129 1.79 ad * At runtime this is aligned on a cacheline boundary, with fields 130 1.79 ad * used during fault processing to do RB tree lookup clustered at 131 1.79 ad * the beginning. 132 1.18 mrg */ 133 1.18 mrg struct vm_map_entry { 134 1.62 matt struct rb_node rb_node; /* tree information */ 135 1.79 ad vaddr_t start; /* start address */ 136 1.79 ad vaddr_t end; /* end address */ 137 1.62 matt vsize_t gap; /* free space after */ 138 1.62 matt vsize_t maxgap; /* space in subtree */ 139 1.18 mrg struct vm_map_entry *prev; /* previous entry */ 140 1.18 mrg struct vm_map_entry *next; /* next entry */ 141 1.28 chs union { 142 1.28 chs struct uvm_object *uvm_obj; /* uvm object */ 143 1.28 chs struct vm_map *sub_map; /* belongs to another map */ 144 1.28 chs } object; /* object I point to */ 145 1.18 mrg voff_t offset; /* offset into object */ 146 1.79 ad uint8_t etype; /* entry type */ 147 1.79 ad uint8_t flags; /* flags */ 148 1.79 ad uint8_t advice; /* madvise advice */ 149 1.79 ad uint8_t unused; /* unused */ 150 1.18 mrg vm_prot_t protection; /* protection code */ 151 1.18 mrg vm_prot_t max_protection; /* maximum protection */ 152 1.18 mrg vm_inherit_t inheritance; /* inheritance */ 153 1.18 mrg int wired_count; /* can be paged if == 0 */ 154 1.18 mrg struct vm_aref aref; /* anonymous overlay */ 155 1.79 ad }; 156 1.18 mrg 157 1.79 ad /* flags */ 158 1.40 yamt #define UVM_MAP_KERNEL 0x01 /* kernel map entry */ 159 1.70 para #define UVM_MAP_STATIC 0x04 /* special static entries */ 160 1.70 para #define UVM_MAP_NOMERGE 0x08 /* this entry is not mergable */ 161 1.39 matt 162 1.18 mrg #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0) 163 1.18 mrg 164 1.18 mrg /* 165 1.18 mrg * Maps are doubly-linked lists of map entries, kept sorted 166 1.18 mrg * by address. A single hint is provided to start 167 1.18 mrg * searches again from the last successful search, 168 1.18 mrg * insertion, or removal. 169 1.18 mrg * 170 1.18 mrg * LOCKING PROTOCOL NOTES: 171 1.18 mrg * ----------------------- 172 1.18 mrg * 173 1.18 mrg * VM map locking is a little complicated. There are both shared 174 1.18 mrg * and exclusive locks on maps. However, it is sometimes required 175 1.18 mrg * to downgrade an exclusive lock to a shared lock, and upgrade to 176 1.18 mrg * an exclusive lock again (to perform error recovery). However, 177 1.18 mrg * another thread *must not* queue itself to receive an exclusive 178 1.18 mrg * lock while before we upgrade back to exclusive, otherwise the 179 1.18 mrg * error recovery becomes extremely difficult, if not impossible. 180 1.18 mrg * 181 1.18 mrg * In order to prevent this scenario, we introduce the notion of 182 1.18 mrg * a `busy' map. A `busy' map is read-locked, but other threads 183 1.18 mrg * attempting to write-lock wait for this flag to clear before 184 1.18 mrg * entering the lock manager. A map may only be marked busy 185 1.18 mrg * when the map is write-locked (and then the map must be downgraded 186 1.18 mrg * to read-locked), and may only be marked unbusy by the thread 187 1.18 mrg * which marked it busy (holding *either* a read-lock or a 188 1.18 mrg * write-lock, the latter being gained by an upgrade). 189 1.18 mrg * 190 1.18 mrg * Access to the map `flags' member is controlled by the `flags_lock' 191 1.18 mrg * simple lock. Note that some flags are static (set once at map 192 1.18 mrg * creation time, and never changed), and thus require no locking 193 1.18 mrg * to check those flags. All flags which are r/w must be set or 194 1.18 mrg * cleared while the `flags_lock' is asserted. Additional locking 195 1.18 mrg * requirements are: 196 1.18 mrg * 197 1.18 mrg * VM_MAP_PAGEABLE r/o static flag; no locking required 198 1.18 mrg * 199 1.18 mrg * VM_MAP_WIREFUTURE r/w; may only be set or cleared when 200 1.18 mrg * map is write-locked. may be tested 201 1.18 mrg * without asserting `flags_lock'. 202 1.18 mrg * 203 1.34 atatat * VM_MAP_DYING r/o; set when a vmspace is being 204 1.34 atatat * destroyed to indicate that updates 205 1.34 atatat * to the pmap can be skipped. 206 1.34 atatat * 207 1.34 atatat * VM_MAP_TOPDOWN r/o; set when the vmspace is 208 1.34 atatat * created if the unspecified map 209 1.34 atatat * allocations are to be arranged in 210 1.34 atatat * a "top down" manner. 211 1.18 mrg */ 212 1.18 mrg struct vm_map { 213 1.18 mrg struct pmap * pmap; /* Physical map */ 214 1.57 ad krwlock_t lock; /* Non-intrsafe lock */ 215 1.57 ad struct lwp * busy; /* LWP holding map busy */ 216 1.77 ad kmutex_t misc_lock; /* Lock for cv, busy */ 217 1.57 ad kcondvar_t cv; /* For signalling */ 218 1.57 ad int flags; /* flags */ 219 1.62 matt struct rb_tree rb_tree; /* Tree for entries */ 220 1.18 mrg struct vm_map_entry header; /* List of entries */ 221 1.18 mrg int nentries; /* Number of entries */ 222 1.18 mrg vsize_t size; /* virtual size */ 223 1.80 kamil volatile int ref_count; /* Reference count */ 224 1.28 chs struct vm_map_entry * hint; /* hint for quick lookups */ 225 1.28 chs struct vm_map_entry * first_free; /* First free space hint */ 226 1.18 mrg unsigned int timestamp; /* Version number */ 227 1.18 mrg }; 228 1.18 mrg 229 1.41 yamt #if defined(_KERNEL) 230 1.54 yamt 231 1.54 yamt #include <sys/callback.h> 232 1.54 yamt 233 1.41 yamt #endif /* defined(_KERNEL) */ 234 1.41 yamt 235 1.41 yamt #define VM_MAP_IS_KERNEL(map) (vm_map_pmap(map) == pmap_kernel()) 236 1.41 yamt 237 1.18 mrg /* vm_map flags */ 238 1.18 mrg #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */ 239 1.18 mrg #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */ 240 1.32 chs #define VM_MAP_DYING 0x20 /* rw: map is being destroyed */ 241 1.34 atatat #define VM_MAP_TOPDOWN 0x40 /* ro: arrange map top-down */ 242 1.44 yamt #define VM_MAP_WANTVA 0x100 /* rw: want va */ 243 1.18 mrg 244 1.74 christos #define VM_MAP_BITS "\177\020\ 245 1.74 christos b\0PAGEABLE\0\ 246 1.74 christos b\2WIREFUTURE\0\ 247 1.74 christos b\5DYING\0\ 248 1.74 christos b\6TOPDOWN\0\ 249 1.74 christos b\10WANTVA\0" 250 1.74 christos 251 1.40 yamt #ifdef _KERNEL 252 1.40 yamt struct uvm_map_args { 253 1.40 yamt struct vm_map_entry *uma_prev; 254 1.40 yamt 255 1.40 yamt vaddr_t uma_start; 256 1.40 yamt vsize_t uma_size; 257 1.38 yamt 258 1.40 yamt struct uvm_object *uma_uobj; 259 1.40 yamt voff_t uma_uoffset; 260 1.40 yamt 261 1.40 yamt uvm_flag_t uma_flags; 262 1.40 yamt }; 263 1.40 yamt #endif /* _KERNEL */ 264 1.18 mrg 265 1.1 mrg /* 266 1.12 thorpej * globals: 267 1.12 thorpej */ 268 1.19 mrg 269 1.19 mrg #ifdef _KERNEL 270 1.12 thorpej 271 1.58 he #include <sys/proc.h> 272 1.58 he 273 1.12 thorpej #ifdef PMAP_GROWKERNEL 274 1.12 thorpej extern vaddr_t uvm_maxkaddr; 275 1.12 thorpej #endif 276 1.1 mrg 277 1.1 mrg /* 278 1.1 mrg * protos: the following prototypes define the interface to vm_map 279 1.1 mrg */ 280 1.1 mrg 281 1.36 enami void uvm_map_deallocate(struct vm_map *); 282 1.1 mrg 283 1.63 yamt int uvm_map_willneed(struct vm_map *, vaddr_t, vaddr_t); 284 1.36 enami int uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int); 285 1.36 enami void uvm_map_clip_start(struct vm_map *, struct vm_map_entry *, 286 1.70 para vaddr_t); 287 1.36 enami void uvm_map_clip_end(struct vm_map *, struct vm_map_entry *, 288 1.70 para vaddr_t); 289 1.36 enami int uvm_map_extract(struct vm_map *, vaddr_t, vsize_t, 290 1.36 enami struct vm_map *, vaddr_t *, int); 291 1.36 enami struct vm_map_entry * 292 1.36 enami uvm_map_findspace(struct vm_map *, vaddr_t, vsize_t, 293 1.36 enami vaddr_t *, struct uvm_object *, voff_t, vsize_t, int); 294 1.36 enami int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t, 295 1.36 enami vm_inherit_t); 296 1.36 enami int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int); 297 1.36 enami void uvm_map_init(void); 298 1.70 para void uvm_map_init_caches(void); 299 1.55 thorpej bool uvm_map_lookup_entry(struct vm_map *, vaddr_t, 300 1.36 enami struct vm_map_entry **); 301 1.36 enami void uvm_map_reference(struct vm_map *); 302 1.36 enami int uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t, 303 1.50 yamt vaddr_t *, uvm_flag_t); 304 1.36 enami void uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int); 305 1.36 enami int uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t, 306 1.36 enami struct vm_map *); 307 1.43 yamt void uvm_unmap1(struct vm_map *, vaddr_t, vaddr_t, int); 308 1.43 yamt #define uvm_unmap(map, s, e) uvm_unmap1((map), (s), (e), 0) 309 1.36 enami void uvm_unmap_detach(struct vm_map_entry *,int); 310 1.36 enami void uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t, 311 1.70 para struct vm_map_entry **, int); 312 1.40 yamt 313 1.40 yamt int uvm_map_prepare(struct vm_map *, vaddr_t, vsize_t, 314 1.40 yamt struct uvm_object *, voff_t, vsize_t, uvm_flag_t, 315 1.40 yamt struct uvm_map_args *); 316 1.40 yamt int uvm_map_enter(struct vm_map *, const struct uvm_map_args *, 317 1.40 yamt struct vm_map_entry *); 318 1.40 yamt 319 1.47 yamt int uvm_mapent_trymerge(struct vm_map *, 320 1.47 yamt struct vm_map_entry *, int); 321 1.47 yamt #define UVM_MERGE_COPYING 1 322 1.15 thorpej 323 1.18 mrg /* 324 1.57 ad * VM map locking operations. 325 1.18 mrg */ 326 1.18 mrg 327 1.57 ad bool vm_map_lock_try(struct vm_map *); 328 1.57 ad void vm_map_lock(struct vm_map *); 329 1.57 ad void vm_map_unlock(struct vm_map *); 330 1.57 ad void vm_map_unbusy(struct vm_map *); 331 1.59 ad void vm_map_lock_read(struct vm_map *); 332 1.59 ad void vm_map_unlock_read(struct vm_map *); 333 1.59 ad void vm_map_busy(struct vm_map *); 334 1.59 ad bool vm_map_locked_p(struct vm_map *); 335 1.54 yamt 336 1.78 ad void uvm_map_lock_entry(struct vm_map_entry *, krw_t); 337 1.67 rmind void uvm_map_unlock_entry(struct vm_map_entry *); 338 1.67 rmind 339 1.18 mrg #endif /* _KERNEL */ 340 1.18 mrg 341 1.18 mrg /* 342 1.18 mrg * Functions implemented as macros 343 1.18 mrg */ 344 1.45 chs #define vm_map_min(map) ((map)->header.end) 345 1.45 chs #define vm_map_max(map) ((map)->header.start) 346 1.45 chs #define vm_map_setmin(map, v) ((map)->header.end = (v)) 347 1.45 chs #define vm_map_setmax(map, v) ((map)->header.start = (v)) 348 1.45 chs 349 1.18 mrg #define vm_map_pmap(map) ((map)->pmap) 350 1.5 perry 351 1.5 perry #endif /* _UVM_UVM_MAP_H_ */ 352