Lines Matching refs:MAP
81 * => map must be locked by caller
84 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \
86 uvm_map_clip_start(MAP,ENTRY,VA); \
94 * => map must be locked by caller
97 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \
99 uvm_map_clip_end(MAP,ENTRY,VA); \
106 #define UVM_EXTRACT_REMOVE 0x01 /* remove mapping from old map */
124 * Address map entries consist of start and end addresses,
125 * a VM object (or sharing map) and offset into that object,
143 struct vm_map *sub_map; /* belongs to another map */
158 #define UVM_MAP_KERNEL 0x01 /* kernel map entry */
165 * Maps are doubly-linked lists of map entries, kept sorted
173 * VM map locking is a little complicated. There are both shared
182 * a `busy' map. A `busy' map is read-locked, but other threads
184 * entering the lock manager. A map may only be marked busy
185 * when the map is write-locked (and then the map must be downgraded
190 * Access to the map `flags' member is controlled by the `flags_lock'
191 * simple lock. Note that some flags are static (set once at map
200 * map is write-locked. may be tested
208 * created if the unspecified map
213 struct pmap * pmap; /* Physical map */
215 struct lwp * busy; /* LWP holding map busy */
235 #define VM_MAP_IS_KERNEL(map) (vm_map_pmap(map) == pmap_kernel())
240 #define VM_MAP_DYING 0x20 /* rw: map is being destroyed */
241 #define VM_MAP_TOPDOWN 0x40 /* ro: arrange map top-down */
308 #define uvm_unmap(map, s, e) uvm_unmap1((map), (s), (e), 0)
324 * VM map locking operations.
344 #define vm_map_min(map) ((map)->header.end)
345 #define vm_map_max(map) ((map)->header.start)
346 #define vm_map_setmin(map, v) ((map)->header.end = (v))
347 #define vm_map_setmax(map, v) ((map)->header.start = (v))
349 #define vm_map_pmap(map) ((map)->pmap)