1 1.41 ad /* $NetBSD: uvm_amap.h,v 1.41 2020/03/20 19:08:54 ad Exp $ */ 2 1.1 mrg 3 1.1 mrg /* 4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 1.1 mrg * All rights reserved. 6 1.1 mrg * 7 1.1 mrg * Redistribution and use in source and binary forms, with or without 8 1.1 mrg * modification, are permitted provided that the following conditions 9 1.1 mrg * are met: 10 1.1 mrg * 1. Redistributions of source code must retain the above copyright 11 1.1 mrg * notice, this list of conditions and the following disclaimer. 12 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright 13 1.1 mrg * notice, this list of conditions and the following disclaimer in the 14 1.1 mrg * documentation and/or other materials provided with the distribution. 15 1.1 mrg * 16 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 1.1 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 1.1 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 1.1 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 1.1 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 1.1 mrg * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 1.1 mrg * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 1.1 mrg * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 1.1 mrg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 1.1 mrg * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 1.1 mrg */ 27 1.1 mrg 28 1.4 perry #ifndef _UVM_UVM_AMAP_H_ 29 1.4 perry #define _UVM_UVM_AMAP_H_ 30 1.4 perry 31 1.1 mrg /* 32 1.9 chuck * uvm_amap.h: general amap interface and amap implementation-specific info 33 1.9 chuck */ 34 1.9 chuck 35 1.9 chuck /* 36 1.9 chuck * an amap structure contains pointers to a set of anons that are 37 1.9 chuck * mapped together in virtual memory (an anon is a single page of 38 1.9 chuck * anonymous virtual memory -- see uvm_anon.h). in uvm we hide the 39 1.9 chuck * details of the implementation of amaps behind a general amap 40 1.9 chuck * interface. this allows us to change the amap implementation 41 1.9 chuck * without having to touch the rest of the code. this file is divided 42 1.9 chuck * into two parts: the definition of the uvm amap interface and the 43 1.9 chuck * amap implementation-specific definitions. 44 1.9 chuck */ 45 1.9 chuck 46 1.11 thorpej #ifdef _KERNEL 47 1.11 thorpej 48 1.9 chuck /* 49 1.9 chuck * part 1: amap interface 50 1.9 chuck */ 51 1.9 chuck 52 1.33 ad void uvm_amap_init(void); 53 1.33 ad 54 1.9 chuck /* 55 1.9 chuck * forward definition of vm_amap structure. only amap 56 1.9 chuck * implementation-specific code should directly access the fields of 57 1.15 chs * this structure. 58 1.9 chuck */ 59 1.9 chuck 60 1.9 chuck struct vm_amap; 61 1.9 chuck 62 1.9 chuck 63 1.9 chuck /* 64 1.15 chs * prototypes for the amap interface 65 1.9 chuck */ 66 1.9 chuck 67 1.12 thorpej void amap_add /* add an anon to an amap */ 68 1.23 junyoung (struct vm_aref *, vaddr_t, 69 1.31 thorpej struct vm_anon *, bool); 70 1.9 chuck struct vm_amap *amap_alloc /* allocate a new amap */ 71 1.23 junyoung (vaddr_t, vaddr_t, int); 72 1.9 chuck void amap_copy /* clear amap needs-copy flag */ 73 1.23 junyoung (struct vm_map *, struct vm_map_entry *, int, 74 1.29 yamt vaddr_t, vaddr_t); 75 1.9 chuck void amap_cow_now /* resolve all COW faults now */ 76 1.23 junyoung (struct vm_map *, struct vm_map_entry *); 77 1.18 chs int amap_extend /* make amap larger */ 78 1.23 junyoung (struct vm_map_entry *, vsize_t, int); 79 1.9 chuck int amap_flags /* get amap's flags */ 80 1.23 junyoung (struct vm_amap *); 81 1.9 chuck void amap_free /* free amap */ 82 1.23 junyoung (struct vm_amap *); 83 1.9 chuck void amap_lock /* lock amap */ 84 1.23 junyoung (struct vm_amap *); 85 1.9 chuck struct vm_anon *amap_lookup /* lookup an anon @ offset in amap */ 86 1.23 junyoung (struct vm_aref *, vaddr_t); 87 1.9 chuck void amap_lookups /* lookup multiple anons */ 88 1.23 junyoung (struct vm_aref *, vaddr_t, 89 1.23 junyoung struct vm_anon **, int); 90 1.9 chuck void amap_ref /* add a reference to an amap */ 91 1.23 junyoung (struct vm_amap *, vaddr_t, vsize_t, int); 92 1.9 chuck int amap_refs /* get number of references of amap */ 93 1.23 junyoung (struct vm_amap *); 94 1.9 chuck void amap_share_protect /* protect pages in a shared amap */ 95 1.23 junyoung (struct vm_map_entry *, vm_prot_t); 96 1.9 chuck void amap_splitref /* split reference to amap into two */ 97 1.23 junyoung (struct vm_aref *, struct vm_aref *, vaddr_t); 98 1.9 chuck void amap_unadd /* remove an anon from an amap */ 99 1.23 junyoung (struct vm_aref *, vaddr_t); 100 1.9 chuck void amap_unlock /* unlock amap */ 101 1.23 junyoung (struct vm_amap *); 102 1.9 chuck void amap_unref /* drop reference to an amap */ 103 1.32 matt (struct vm_amap *, vaddr_t, vsize_t, bool); 104 1.9 chuck void amap_wipeout /* remove all anons from amap */ 105 1.23 junyoung (struct vm_amap *); 106 1.31 thorpej bool amap_swap_off 107 1.25 yamt (int, int); 108 1.9 chuck 109 1.9 chuck /* 110 1.9 chuck * amap flag values 111 1.9 chuck */ 112 1.9 chuck 113 1.9 chuck #define AMAP_SHARED 0x1 /* amap is shared */ 114 1.9 chuck #define AMAP_REFALL 0x2 /* amap_ref: reference entire amap */ 115 1.25 yamt #define AMAP_SWAPOFF 0x4 /* amap_swap_off() is in progress */ 116 1.19 atatat 117 1.19 atatat /* 118 1.29 yamt * amap_copy flags 119 1.29 yamt */ 120 1.29 yamt 121 1.29 yamt #define AMAP_COPY_NOWAIT 0x02 /* not allowed to sleep */ 122 1.29 yamt #define AMAP_COPY_NOCHUNK 0x04 /* not allowed to chunk */ 123 1.29 yamt #define AMAP_COPY_NOMERGE 0x08 /* not allowed to merge */ 124 1.29 yamt 125 1.29 yamt /* 126 1.20 bouyer * amap_extend flags 127 1.19 atatat */ 128 1.20 bouyer #define AMAP_EXTEND_BACKWARDS 0x00 /* add "size" to start of map */ 129 1.20 bouyer #define AMAP_EXTEND_FORWARDS 0x01 /* add "size" to end of map */ 130 1.20 bouyer #define AMAP_EXTEND_NOWAIT 0x02 /* not allowed to sleep */ 131 1.9 chuck 132 1.11 thorpej #endif /* _KERNEL */ 133 1.9 chuck 134 1.9 chuck /**********************************************************************/ 135 1.9 chuck 136 1.9 chuck /* 137 1.9 chuck * part 2: amap implementation-specific info 138 1.9 chuck */ 139 1.9 chuck 140 1.9 chuck /* 141 1.9 chuck * we currently provide an array-based amap implementation. in this 142 1.9 chuck * implementation we provide the option of tracking split references 143 1.9 chuck * so that we don't lose track of references during partial unmaps 144 1.10 chuck * ... this is enabled with the "UVM_AMAP_PPREF" define. 145 1.9 chuck */ 146 1.9 chuck 147 1.10 chuck #define UVM_AMAP_PPREF /* track partial references */ 148 1.9 chuck 149 1.9 chuck /* 150 1.9 chuck * here is the definition of the vm_amap structure for this implementation. 151 1.9 chuck */ 152 1.9 chuck 153 1.9 chuck struct vm_amap { 154 1.40 ad krwlock_t *am_lock; /* lock [locks all vm_amap fields] */ 155 1.9 chuck int am_ref; /* reference count */ 156 1.9 chuck int am_flags; /* flags */ 157 1.9 chuck int am_maxslot; /* max # of slots allocated */ 158 1.9 chuck int am_nslot; /* # of slots currently in map ( <= maxslot) */ 159 1.9 chuck int am_nused; /* # of slots currently in use */ 160 1.9 chuck int *am_slots; /* contig array of active slots */ 161 1.9 chuck int *am_bckptr; /* back pointer array to am_slots */ 162 1.9 chuck struct vm_anon **am_anon; /* array of anonymous pages */ 163 1.10 chuck #ifdef UVM_AMAP_PPREF 164 1.9 chuck int *am_ppref; /* per page reference count (if !NULL) */ 165 1.9 chuck #endif 166 1.25 yamt LIST_ENTRY(vm_amap) am_list; 167 1.9 chuck }; 168 1.9 chuck 169 1.9 chuck /* 170 1.9 chuck * note that am_slots, am_bckptr, and am_anon are arrays. this allows 171 1.9 chuck * fast lookup of pages based on their virual address at the expense of 172 1.9 chuck * some extra memory. in the future we should be smarter about memory 173 1.15 chs * usage and fall back to a non-array based implementation on systems 174 1.9 chuck * that are short of memory (XXXCDC). 175 1.9 chuck * 176 1.9 chuck * the entries in the array are called slots... for example an amap that 177 1.9 chuck * covers four pages of virtual memory is said to have four slots. here 178 1.9 chuck * is an example of the array usage for a four slot amap. note that only 179 1.9 chuck * slots one and three have anons assigned to them. "D/C" means that we 180 1.9 chuck * "don't care" about the value. 181 1.15 chs * 182 1.9 chuck * 0 1 2 3 183 1.9 chuck * am_anon: NULL, anon0, NULL, anon1 (actual pointers to anons) 184 1.9 chuck * am_bckptr: D/C, 1, D/C, 0 (points to am_slots entry) 185 1.9 chuck * 186 1.9 chuck * am_slots: 3, 1, D/C, D/C (says slots 3 and 1 are in use) 187 1.15 chs * 188 1.9 chuck * note that am_bckptr is D/C if the slot in am_anon is set to NULL. 189 1.9 chuck * to find the entry in am_slots for an anon, look at am_bckptr[slot], 190 1.9 chuck * thus the entry for slot 3 in am_slots[] is at am_slots[am_bckptr[3]]. 191 1.9 chuck * in general, if am_anon[X] is non-NULL, then the following must be 192 1.9 chuck * true: am_slots[am_bckptr[X]] == X 193 1.9 chuck * 194 1.9 chuck * note that am_slots is always contig-packed. 195 1.1 mrg */ 196 1.1 mrg 197 1.1 mrg /* 198 1.34 bjs * defines for handling of large, sparse amaps: 199 1.15 chs * 200 1.9 chuck * one of the problems of array-based amaps is that if you allocate a 201 1.34 bjs * large, sparsely-used area of virtual memory you end up allocating 202 1.9 chuck * large arrays that, for the most part, don't get used. this is a 203 1.9 chuck * problem for BSD in that the kernel likes to make these types of 204 1.9 chuck * allocations to "reserve" memory for possible future use. 205 1.1 mrg * 206 1.9 chuck * for example, the kernel allocates (reserves) a large chunk of user 207 1.9 chuck * VM for possible stack growth. most of the time only a page or two 208 1.9 chuck * of this VM is actually used. since the stack is anonymous memory 209 1.9 chuck * it makes sense for it to live in an amap, but if we allocated an 210 1.9 chuck * amap for the entire stack range we could end up wasting a large 211 1.36 rmind * amount of allocated KVM. 212 1.15 chs * 213 1.15 chs * for example, on the i386 at boot time we allocate two amaps for the stack 214 1.15 chs * of /sbin/init: 215 1.1 mrg * 1. a 7680 slot amap at protection 0 (reserve space for stack) 216 1.1 mrg * 2. a 512 slot amap at protection 7 (top of stack) 217 1.1 mrg * 218 1.15 chs * most of the array allocated for the amaps for this is never used. 219 1.9 chuck * the amap interface provides a way for us to avoid this problem by 220 1.15 chs * allowing amap_copy() to break larger amaps up into smaller sized 221 1.9 chuck * chunks (controlled by the "canchunk" option). we use this feature 222 1.9 chuck * to reduce our memory usage with the BSD stack management. if we 223 1.9 chuck * are asked to create an amap with more than UVM_AMAP_LARGE slots in it, 224 1.9 chuck * we attempt to break it up into a UVM_AMAP_CHUNK sized amap if the 225 1.9 chuck * "canchunk" flag is set. 226 1.1 mrg * 227 1.1 mrg * so, in the i386 example, the 7680 slot area is never referenced so 228 1.9 chuck * nothing gets allocated (amap_copy is never called because the protection 229 1.9 chuck * is zero). the 512 slot area for the top of the stack is referenced. 230 1.9 chuck * the chunking code breaks it up into 16 slot chunks (hopefully a single 231 1.9 chuck * 16 slot chunk is enough to handle the whole stack). 232 1.1 mrg */ 233 1.1 mrg 234 1.9 chuck #define UVM_AMAP_LARGE 256 /* # of slots in "large" amap */ 235 1.9 chuck #define UVM_AMAP_CHUNK 16 /* # of slots to chunk large amaps in */ 236 1.1 mrg 237 1.11 thorpej #ifdef _KERNEL 238 1.1 mrg 239 1.9 chuck /* 240 1.9 chuck * macros 241 1.9 chuck */ 242 1.9 chuck 243 1.9 chuck /* AMAP_B2SLOT: convert byte offset to slot */ 244 1.14 chs #define AMAP_B2SLOT(S,B) { \ 245 1.14 chs KASSERT(((B) & (PAGE_SIZE - 1)) == 0); \ 246 1.14 chs (S) = (B) >> PAGE_SHIFT; \ 247 1.1 mrg } 248 1.1 mrg 249 1.1 mrg /* 250 1.9 chuck * lock/unlock/refs/flags macros 251 1.1 mrg */ 252 1.1 mrg 253 1.9 chuck #define amap_flags(AMAP) ((AMAP)->am_flags) 254 1.40 ad #define amap_lock(AMAP, OP) rw_enter((AMAP)->am_lock, (OP)) 255 1.40 ad #define amap_lock_try(AMAP, OP) rw_tryenter((AMAP)->am_lock, (OP)) 256 1.9 chuck #define amap_refs(AMAP) ((AMAP)->am_ref) 257 1.40 ad #define amap_unlock(AMAP) rw_exit((AMAP)->am_lock) 258 1.1 mrg 259 1.1 mrg /* 260 1.9 chuck * if we enable PPREF, then we have a couple of extra functions that 261 1.9 chuck * we need to prototype here... 262 1.1 mrg */ 263 1.1 mrg 264 1.10 chuck #ifdef UVM_AMAP_PPREF 265 1.1 mrg 266 1.9 chuck #define PPREF_NONE ((int *) -1) /* not using ppref */ 267 1.9 chuck 268 1.9 chuck void amap_pp_adjref /* adjust references */ 269 1.41 ad (struct vm_amap *, int, vsize_t, int); 270 1.9 chuck void amap_pp_establish /* establish ppref */ 271 1.23 junyoung (struct vm_amap *, vaddr_t); 272 1.9 chuck void amap_wiperange /* wipe part of an amap */ 273 1.41 ad (struct vm_amap *, int, int); 274 1.10 chuck #endif /* UVM_AMAP_PPREF */ 275 1.11 thorpej 276 1.11 thorpej #endif /* _KERNEL */ 277 1.4 perry 278 1.4 perry #endif /* _UVM_UVM_AMAP_H_ */ 279