1 1.166 chs /* $NetBSD: uvm_km.c,v 1.166 2024/12/07 23:19:07 chs Exp $ */ 2 1.1 mrg 3 1.47 chs /* 4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 1.47 chs * Copyright (c) 1991, 1993, The Regents of the University of California. 6 1.1 mrg * 7 1.1 mrg * All rights reserved. 8 1.1 mrg * 9 1.1 mrg * This code is derived from software contributed to Berkeley by 10 1.1 mrg * The Mach Operating System project at Carnegie-Mellon University. 11 1.1 mrg * 12 1.1 mrg * Redistribution and use in source and binary forms, with or without 13 1.1 mrg * modification, are permitted provided that the following conditions 14 1.1 mrg * are met: 15 1.1 mrg * 1. Redistributions of source code must retain the above copyright 16 1.1 mrg * notice, this list of conditions and the following disclaimer. 17 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright 18 1.1 mrg * notice, this list of conditions and the following disclaimer in the 19 1.1 mrg * documentation and/or other materials provided with the distribution. 20 1.108 chuck * 3. Neither the name of the University nor the names of its contributors 21 1.1 mrg * may be used to endorse or promote products derived from this software 22 1.1 mrg * without specific prior written permission. 23 1.1 mrg * 24 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 1.1 mrg * SUCH DAMAGE. 35 1.1 mrg * 36 1.1 mrg * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 1.4 mrg * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 38 1.1 mrg * 39 1.1 mrg * 40 1.1 mrg * Copyright (c) 1987, 1990 Carnegie-Mellon University. 41 1.1 mrg * All rights reserved. 42 1.47 chs * 43 1.1 mrg * Permission to use, copy, modify and distribute this software and 44 1.1 mrg * its documentation is hereby granted, provided that both the copyright 45 1.1 mrg * notice and this permission notice appear in all copies of the 46 1.1 mrg * software, derivative works or modified versions, and any portions 47 1.1 mrg * thereof, and that both notices appear in supporting documentation. 48 1.47 chs * 49 1.47 chs * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 50 1.47 chs * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 51 1.1 mrg * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 52 1.47 chs * 53 1.1 mrg * Carnegie Mellon requests users of this software to return to 54 1.1 mrg * 55 1.1 mrg * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU 56 1.1 mrg * School of Computer Science 57 1.1 mrg * Carnegie Mellon University 58 1.1 mrg * Pittsburgh PA 15213-3890 59 1.1 mrg * 60 1.1 mrg * any improvements or extensions that they make and grant Carnegie the 61 1.1 mrg * rights to redistribute these changes. 62 1.1 mrg */ 63 1.6 mrg 64 1.1 mrg /* 65 1.1 mrg * uvm_km.c: handle kernel memory allocation and management 66 1.1 mrg */ 67 1.1 mrg 68 1.7 chuck /* 69 1.7 chuck * overview of kernel memory management: 70 1.7 chuck * 71 1.7 chuck * the kernel virtual address space is mapped by "kernel_map." kernel_map 72 1.62 thorpej * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 73 1.62 thorpej * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 74 1.7 chuck * 75 1.47 chs * the kernel_map has several "submaps." submaps can only appear in 76 1.7 chuck * the kernel_map (user processes can't use them). submaps "take over" 77 1.7 chuck * the management of a sub-range of the kernel's address space. submaps 78 1.7 chuck * are typically allocated at boot time and are never released. kernel 79 1.47 chs * virtual address space that is mapped by a submap is locked by the 80 1.7 chuck * submap's lock -- not the kernel_map's lock. 81 1.7 chuck * 82 1.7 chuck * thus, the useful feature of submaps is that they allow us to break 83 1.7 chuck * up the locking and protection of the kernel address space into smaller 84 1.7 chuck * chunks. 85 1.7 chuck * 86 1.126 para * the vm system has several standard kernel submaps/arenas, including: 87 1.126 para * kmem_arena => used for kmem/pool (memoryallocators(9)) 88 1.7 chuck * pager_map => used to map "buf" structures into kernel space 89 1.7 chuck * exec_map => used during exec to handle exec args 90 1.7 chuck * etc... 91 1.7 chuck * 92 1.127 rmind * The kmem_arena is a "special submap", as it lives in a fixed map entry 93 1.127 rmind * within the kernel_map and is controlled by vmem(9). 94 1.126 para * 95 1.7 chuck * the kernel allocates its private memory out of special uvm_objects whose 96 1.7 chuck * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 97 1.7 chuck * are "special" and never die). all kernel objects should be thought of 98 1.47 chs * as large, fixed-sized, sparsely populated uvm_objects. each kernel 99 1.62 thorpej * object is equal to the size of kernel virtual address space (i.e. the 100 1.62 thorpej * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 101 1.7 chuck * 102 1.101 pooka * note that just because a kernel object spans the entire kernel virtual 103 1.7 chuck * address space doesn't mean that it has to be mapped into the entire space. 104 1.47 chs * large chunks of a kernel object's space go unused either because 105 1.47 chs * that area of kernel VM is unmapped, or there is some other type of 106 1.7 chuck * object mapped into that range (e.g. a vnode). for submap's kernel 107 1.7 chuck * objects, the only part of the object that can ever be populated is the 108 1.7 chuck * offsets that are managed by the submap. 109 1.7 chuck * 110 1.7 chuck * note that the "offset" in a kernel object is always the kernel virtual 111 1.62 thorpej * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 112 1.7 chuck * example: 113 1.62 thorpej * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 114 1.7 chuck * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 115 1.7 chuck * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 116 1.7 chuck * then that means that the page at offset 0x235000 in kernel_object is 117 1.47 chs * mapped at 0xf8235000. 118 1.7 chuck * 119 1.7 chuck * kernel object have one other special property: when the kernel virtual 120 1.7 chuck * memory mapping them is unmapped, the backing memory in the object is 121 1.7 chuck * freed right away. this is done with the uvm_km_pgremove() function. 122 1.7 chuck * this has to be done because there is no backing store for kernel pages 123 1.7 chuck * and no need to save them after they are no longer referenced. 124 1.126 para * 125 1.127 rmind * Generic arenas: 126 1.126 para * 127 1.127 rmind * kmem_arena: 128 1.127 rmind * Main arena controlling the kernel KVA used by other arenas. 129 1.127 rmind * 130 1.127 rmind * kmem_va_arena: 131 1.127 rmind * Implements quantum caching in order to speedup allocations and 132 1.127 rmind * reduce fragmentation. The pool(9), unless created with a custom 133 1.127 rmind * meta-data allocator, and kmem(9) subsystems use this arena. 134 1.127 rmind * 135 1.127 rmind * Arenas for meta-data allocations are used by vmem(9) and pool(9). 136 1.127 rmind * These arenas cannot use quantum cache. However, kmem_va_meta_arena 137 1.127 rmind * compensates this by importing larger chunks from kmem_arena. 138 1.127 rmind * 139 1.127 rmind * kmem_va_meta_arena: 140 1.127 rmind * Space for meta-data. 141 1.127 rmind * 142 1.127 rmind * kmem_meta_arena: 143 1.127 rmind * Imports from kmem_va_meta_arena. Allocations from this arena are 144 1.127 rmind * backed with the pages. 145 1.127 rmind * 146 1.127 rmind * Arena stacking: 147 1.127 rmind * 148 1.127 rmind * kmem_arena 149 1.127 rmind * kmem_va_arena 150 1.127 rmind * kmem_va_meta_arena 151 1.127 rmind * kmem_meta_arena 152 1.7 chuck */ 153 1.55 lukem 154 1.55 lukem #include <sys/cdefs.h> 155 1.166 chs __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.166 2024/12/07 23:19:07 chs Exp $"); 156 1.55 lukem 157 1.55 lukem #include "opt_uvmhist.h" 158 1.7 chuck 159 1.117 para #include "opt_kmempages.h" 160 1.117 para 161 1.117 para #ifndef NKMEMPAGES 162 1.117 para #define NKMEMPAGES 0 163 1.117 para #endif 164 1.117 para 165 1.117 para /* 166 1.117 para * Defaults for lower and upper-bounds for the kmem_arena page count. 167 1.117 para * Can be overridden by kernel config options. 168 1.117 para */ 169 1.117 para #ifndef NKMEMPAGES_MIN 170 1.117 para #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT 171 1.117 para #endif 172 1.117 para 173 1.117 para #ifndef NKMEMPAGES_MAX 174 1.117 para #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT 175 1.117 para #endif 176 1.117 para 177 1.117 para 178 1.1 mrg #include <sys/param.h> 179 1.1 mrg #include <sys/systm.h> 180 1.150 uwe #include <sys/atomic.h> 181 1.1 mrg #include <sys/proc.h> 182 1.72 yamt #include <sys/pool.h> 183 1.112 para #include <sys/vmem.h> 184 1.138 para #include <sys/vmem_impl.h> 185 1.112 para #include <sys/kmem.h> 186 1.147 maxv #include <sys/msan.h> 187 1.1 mrg 188 1.1 mrg #include <uvm/uvm.h> 189 1.1 mrg 190 1.1 mrg /* 191 1.1 mrg * global data structures 192 1.1 mrg */ 193 1.1 mrg 194 1.49 chs struct vm_map *kernel_map = NULL; 195 1.1 mrg 196 1.1 mrg /* 197 1.163 andvar * local data structures 198 1.1 mrg */ 199 1.1 mrg 200 1.112 para static struct vm_map kernel_map_store; 201 1.112 para static struct vm_map_entry kernel_image_mapent_store; 202 1.112 para static struct vm_map_entry kernel_kmem_mapent_store; 203 1.1 mrg 204 1.164 skrll size_t nkmempages = 0; 205 1.112 para vaddr_t kmembase; 206 1.112 para vsize_t kmemsize; 207 1.72 yamt 208 1.138 para static struct vmem kmem_arena_store; 209 1.135 para vmem_t *kmem_arena = NULL; 210 1.138 para static struct vmem kmem_va_arena_store; 211 1.112 para vmem_t *kmem_va_arena; 212 1.72 yamt 213 1.72 yamt /* 214 1.117 para * kmeminit_nkmempages: calculate the size of kmem_arena. 215 1.117 para */ 216 1.117 para void 217 1.117 para kmeminit_nkmempages(void) 218 1.117 para { 219 1.164 skrll size_t npages; 220 1.117 para 221 1.117 para if (nkmempages != 0) { 222 1.117 para /* 223 1.117 para * It's already been set (by us being here before) 224 1.117 para * bail out now; 225 1.117 para */ 226 1.117 para return; 227 1.117 para } 228 1.117 para 229 1.162 chs #if defined(NKMEMPAGES_MAX_UNLIMITED) && !defined(KMSAN) 230 1.166 chs /* 231 1.166 chs * The extra 1/9 here is to account for uvm_km_va_starved_p() 232 1.166 chs * wanting to keep 10% of kmem virtual space free. 233 1.166 chs * The intent is that on "unlimited" platforms we should be able 234 1.166 chs * to allocate all of physical memory as kmem without behaving 235 1.166 chs * as though we running short of kmem virtual space. 236 1.166 chs */ 237 1.166 chs npages = (physmem * 10) / 9; 238 1.161 chs #else 239 1.161 chs 240 1.147 maxv #if defined(KMSAN) 241 1.162 chs npages = (physmem / 4); 242 1.147 maxv #elif defined(PMAP_MAP_POOLPAGE) 243 1.119 para npages = (physmem / 4); 244 1.119 para #else 245 1.119 para npages = (physmem / 3) * 2; 246 1.119 para #endif /* defined(PMAP_MAP_POOLPAGE) */ 247 1.117 para 248 1.162 chs #if !defined(NKMEMPAGES_MAX_UNLIMITED) 249 1.117 para if (npages > NKMEMPAGES_MAX) 250 1.117 para npages = NKMEMPAGES_MAX; 251 1.119 para #endif 252 1.117 para 253 1.162 chs #endif 254 1.162 chs 255 1.117 para if (npages < NKMEMPAGES_MIN) 256 1.117 para npages = NKMEMPAGES_MIN; 257 1.117 para 258 1.117 para nkmempages = npages; 259 1.117 para } 260 1.117 para 261 1.117 para /* 262 1.112 para * uvm_km_bootstrap: init kernel maps and objects to reflect reality (i.e. 263 1.1 mrg * KVM already allocated for text, data, bss, and static data structures). 264 1.1 mrg * 265 1.62 thorpej * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 266 1.82 christos * we assume that [vmin -> start] has already been allocated and that 267 1.62 thorpej * "end" is the end. 268 1.1 mrg */ 269 1.1 mrg 270 1.8 mrg void 271 1.112 para uvm_km_bootstrap(vaddr_t start, vaddr_t end) 272 1.1 mrg { 273 1.119 para bool kmem_arena_small; 274 1.62 thorpej vaddr_t base = VM_MIN_KERNEL_ADDRESS; 275 1.118 matt struct uvm_map_args args; 276 1.118 matt int error; 277 1.118 matt 278 1.159 skrll UVMHIST_FUNC(__func__); 279 1.159 skrll UVMHIST_CALLARGS(maphist, "start=%#jx end=%#jx", start, end, 0,0); 280 1.27 thorpej 281 1.117 para kmeminit_nkmempages(); 282 1.119 para kmemsize = (vsize_t)nkmempages * PAGE_SIZE; 283 1.119 para kmem_arena_small = kmemsize < 64 * 1024 * 1024; 284 1.112 para 285 1.144 pgoyette UVMHIST_LOG(maphist, "kmemsize=%#jx", kmemsize, 0,0,0); 286 1.118 matt 287 1.27 thorpej /* 288 1.27 thorpej * next, init kernel memory objects. 289 1.8 mrg */ 290 1.1 mrg 291 1.8 mrg /* kernel_object: for pageable anonymous kernel memory */ 292 1.95 ad uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 293 1.112 para VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 294 1.1 mrg 295 1.24 thorpej /* 296 1.56 thorpej * init the map and reserve any space that might already 297 1.56 thorpej * have been allocated kernel space before installing. 298 1.8 mrg */ 299 1.1 mrg 300 1.112 para uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 301 1.112 para kernel_map_store.pmap = pmap_kernel(); 302 1.70 yamt if (start != base) { 303 1.112 para error = uvm_map_prepare(&kernel_map_store, 304 1.71 yamt base, start - base, 305 1.70 yamt NULL, UVM_UNKNOWN_OFFSET, 0, 306 1.62 thorpej UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 307 1.70 yamt UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 308 1.70 yamt if (!error) { 309 1.112 para kernel_image_mapent_store.flags = 310 1.112 para UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE; 311 1.112 para error = uvm_map_enter(&kernel_map_store, &args, 312 1.112 para &kernel_image_mapent_store); 313 1.70 yamt } 314 1.70 yamt 315 1.70 yamt if (error) 316 1.70 yamt panic( 317 1.112 para "uvm_km_bootstrap: could not reserve space for kernel"); 318 1.112 para 319 1.112 para kmembase = args.uma_start + args.uma_size; 320 1.114 matt } else { 321 1.114 matt kmembase = base; 322 1.70 yamt } 323 1.47 chs 324 1.118 matt error = uvm_map_prepare(&kernel_map_store, 325 1.118 matt kmembase, kmemsize, 326 1.118 matt NULL, UVM_UNKNOWN_OFFSET, 0, 327 1.118 matt UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 328 1.118 matt UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 329 1.118 matt if (!error) { 330 1.118 matt kernel_kmem_mapent_store.flags = 331 1.118 matt UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE; 332 1.118 matt error = uvm_map_enter(&kernel_map_store, &args, 333 1.118 matt &kernel_kmem_mapent_store); 334 1.118 matt } 335 1.118 matt 336 1.118 matt if (error) 337 1.118 matt panic("uvm_km_bootstrap: could not reserve kernel kmem"); 338 1.118 matt 339 1.8 mrg /* 340 1.8 mrg * install! 341 1.8 mrg */ 342 1.8 mrg 343 1.112 para kernel_map = &kernel_map_store; 344 1.112 para 345 1.112 para pool_subsystem_init(); 346 1.112 para 347 1.138 para kmem_arena = vmem_init(&kmem_arena_store, "kmem", 348 1.138 para kmembase, kmemsize, PAGE_SIZE, NULL, NULL, NULL, 349 1.112 para 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); 350 1.135 para #ifdef PMAP_GROWKERNEL 351 1.135 para /* 352 1.135 para * kmem_arena VA allocations happen independently of uvm_map. 353 1.135 para * grow kernel to accommodate the kmem_arena. 354 1.135 para */ 355 1.135 para if (uvm_maxkaddr < kmembase + kmemsize) { 356 1.135 para uvm_maxkaddr = pmap_growkernel(kmembase + kmemsize); 357 1.135 para KASSERTMSG(uvm_maxkaddr >= kmembase + kmemsize, 358 1.135 para "%#"PRIxVADDR" %#"PRIxVADDR" %#"PRIxVSIZE, 359 1.135 para uvm_maxkaddr, kmembase, kmemsize); 360 1.135 para } 361 1.135 para #endif 362 1.112 para 363 1.138 para vmem_subsystem_init(kmem_arena); 364 1.112 para 365 1.144 pgoyette UVMHIST_LOG(maphist, "kmem vmem created (base=%#jx, size=%#jx", 366 1.144 pgoyette kmembase, kmemsize, 0,0); 367 1.118 matt 368 1.138 para kmem_va_arena = vmem_init(&kmem_va_arena_store, "kva", 369 1.138 para 0, 0, PAGE_SIZE, vmem_alloc, vmem_free, kmem_arena, 370 1.138 para (kmem_arena_small ? 4 : VMEM_QCACHE_IDX_MAX) * PAGE_SIZE, 371 1.138 para VM_NOSLEEP, IPL_VM); 372 1.118 matt 373 1.118 matt UVMHIST_LOG(maphist, "<- done", 0,0,0,0); 374 1.112 para } 375 1.112 para 376 1.112 para /* 377 1.112 para * uvm_km_init: init the kernel maps virtual memory caches 378 1.112 para * and start the pool/kmem allocator. 379 1.112 para */ 380 1.112 para void 381 1.112 para uvm_km_init(void) 382 1.112 para { 383 1.112 para kmem_init(); 384 1.1 mrg } 385 1.1 mrg 386 1.1 mrg /* 387 1.1 mrg * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 388 1.1 mrg * is allocated all references to that area of VM must go through it. this 389 1.1 mrg * allows the locking of VAs in kernel_map to be broken up into regions. 390 1.1 mrg * 391 1.82 christos * => if `fixed' is true, *vmin specifies where the region described 392 1.112 para * pager_map => used to map "buf" structures into kernel space 393 1.5 thorpej * by the submap must start 394 1.1 mrg * => if submap is non NULL we use that as the submap, otherwise we 395 1.1 mrg * alloc a new map 396 1.1 mrg */ 397 1.78 yamt 398 1.8 mrg struct vm_map * 399 1.83 thorpej uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */, 400 1.93 thorpej vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed, 401 1.112 para struct vm_map *submap) 402 1.8 mrg { 403 1.8 mrg int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 404 1.118 matt UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 405 1.1 mrg 406 1.71 yamt KASSERT(vm_map_pmap(map) == pmap_kernel()); 407 1.71 yamt 408 1.8 mrg size = round_page(size); /* round up to pagesize */ 409 1.1 mrg 410 1.8 mrg /* 411 1.8 mrg * first allocate a blank spot in the parent map 412 1.8 mrg */ 413 1.8 mrg 414 1.82 christos if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0, 415 1.8 mrg UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 416 1.43 chs UVM_ADV_RANDOM, mapflags)) != 0) { 417 1.118 matt panic("%s: unable to allocate space in parent map", __func__); 418 1.8 mrg } 419 1.8 mrg 420 1.8 mrg /* 421 1.82 christos * set VM bounds (vmin is filled in by uvm_map) 422 1.8 mrg */ 423 1.1 mrg 424 1.82 christos *vmax = *vmin + size; 425 1.5 thorpej 426 1.8 mrg /* 427 1.8 mrg * add references to pmap and create or init the submap 428 1.8 mrg */ 429 1.1 mrg 430 1.8 mrg pmap_reference(vm_map_pmap(map)); 431 1.8 mrg if (submap == NULL) { 432 1.112 para submap = kmem_alloc(sizeof(*submap), KM_SLEEP); 433 1.8 mrg } 434 1.112 para uvm_map_setup(submap, *vmin, *vmax, flags); 435 1.112 para submap->pmap = vm_map_pmap(map); 436 1.1 mrg 437 1.8 mrg /* 438 1.8 mrg * now let uvm_map_submap plug in it... 439 1.8 mrg */ 440 1.1 mrg 441 1.112 para if (uvm_map_submap(map, *vmin, *vmax, submap) != 0) 442 1.8 mrg panic("uvm_km_suballoc: submap allocation failed"); 443 1.1 mrg 444 1.112 para return(submap); 445 1.1 mrg } 446 1.1 mrg 447 1.1 mrg /* 448 1.110 yamt * uvm_km_pgremove: remove pages from a kernel uvm_object and KVA. 449 1.1 mrg */ 450 1.1 mrg 451 1.8 mrg void 452 1.83 thorpej uvm_km_pgremove(vaddr_t startva, vaddr_t endva) 453 1.1 mrg { 454 1.95 ad struct uvm_object * const uobj = uvm_kernel_object; 455 1.78 yamt const voff_t start = startva - vm_map_min(kernel_map); 456 1.78 yamt const voff_t end = endva - vm_map_min(kernel_map); 457 1.53 chs struct vm_page *pg; 458 1.52 chs voff_t curoff, nextoff; 459 1.53 chs int swpgonlydelta = 0; 460 1.118 matt UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 461 1.1 mrg 462 1.78 yamt KASSERT(VM_MIN_KERNEL_ADDRESS <= startva); 463 1.78 yamt KASSERT(startva < endva); 464 1.86 yamt KASSERT(endva <= VM_MAX_KERNEL_ADDRESS); 465 1.78 yamt 466 1.155 ad rw_enter(uobj->vmobjlock, RW_WRITER); 467 1.110 yamt pmap_remove(pmap_kernel(), startva, endva); 468 1.52 chs for (curoff = start; curoff < end; curoff = nextoff) { 469 1.52 chs nextoff = curoff + PAGE_SIZE; 470 1.52 chs pg = uvm_pagelookup(uobj, curoff); 471 1.53 chs if (pg != NULL && pg->flags & PG_BUSY) { 472 1.157 ad uvm_pagewait(pg, uobj->vmobjlock, "km_pgrm"); 473 1.155 ad rw_enter(uobj->vmobjlock, RW_WRITER); 474 1.52 chs nextoff = curoff; 475 1.8 mrg continue; 476 1.52 chs } 477 1.8 mrg 478 1.52 chs /* 479 1.52 chs * free the swap slot, then the page. 480 1.52 chs */ 481 1.8 mrg 482 1.53 chs if (pg == NULL && 483 1.64 pk uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) { 484 1.53 chs swpgonlydelta++; 485 1.53 chs } 486 1.52 chs uao_dropswap(uobj, curoff >> PAGE_SHIFT); 487 1.53 chs if (pg != NULL) { 488 1.53 chs uvm_pagefree(pg); 489 1.53 chs } 490 1.8 mrg } 491 1.155 ad rw_exit(uobj->vmobjlock); 492 1.8 mrg 493 1.54 chs if (swpgonlydelta > 0) { 494 1.149 ad KASSERT(uvmexp.swpgonly >= swpgonlydelta); 495 1.148 ad atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta); 496 1.54 chs } 497 1.24 thorpej } 498 1.24 thorpej 499 1.24 thorpej 500 1.24 thorpej /* 501 1.78 yamt * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed 502 1.78 yamt * regions. 503 1.24 thorpej * 504 1.24 thorpej * => when you unmap a part of anonymous kernel memory you want to toss 505 1.52 chs * the pages right away. (this is called from uvm_unmap_...). 506 1.24 thorpej * => none of the pages will ever be busy, and none of them will ever 507 1.52 chs * be on the active or inactive queues (because they have no object). 508 1.24 thorpej */ 509 1.24 thorpej 510 1.24 thorpej void 511 1.102 ad uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end) 512 1.24 thorpej { 513 1.122 bouyer #define __PGRM_BATCH 16 514 1.52 chs struct vm_page *pg; 515 1.122 bouyer paddr_t pa[__PGRM_BATCH]; 516 1.122 bouyer int npgrm, i; 517 1.122 bouyer vaddr_t va, batch_vastart; 518 1.122 bouyer 519 1.118 matt UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 520 1.24 thorpej 521 1.102 ad KASSERT(VM_MAP_IS_KERNEL(map)); 522 1.128 matt KASSERTMSG(vm_map_min(map) <= start, 523 1.128 matt "vm_map_min(map) [%#"PRIxVADDR"] <= start [%#"PRIxVADDR"]" 524 1.128 matt " (size=%#"PRIxVSIZE")", 525 1.128 matt vm_map_min(map), start, end - start); 526 1.78 yamt KASSERT(start < end); 527 1.102 ad KASSERT(end <= vm_map_max(map)); 528 1.78 yamt 529 1.122 bouyer for (va = start; va < end;) { 530 1.122 bouyer batch_vastart = va; 531 1.122 bouyer /* create a batch of at most __PGRM_BATCH pages to free */ 532 1.122 bouyer for (i = 0; 533 1.122 bouyer i < __PGRM_BATCH && va < end; 534 1.122 bouyer va += PAGE_SIZE) { 535 1.122 bouyer if (!pmap_extract(pmap_kernel(), va, &pa[i])) { 536 1.122 bouyer continue; 537 1.122 bouyer } 538 1.122 bouyer i++; 539 1.122 bouyer } 540 1.122 bouyer npgrm = i; 541 1.122 bouyer /* now remove the mappings */ 542 1.124 bouyer pmap_kremove(batch_vastart, va - batch_vastart); 543 1.122 bouyer /* and free the pages */ 544 1.122 bouyer for (i = 0; i < npgrm; i++) { 545 1.122 bouyer pg = PHYS_TO_VM_PAGE(pa[i]); 546 1.122 bouyer KASSERT(pg); 547 1.165 riastrad KASSERT(pg->uobject == NULL); 548 1.165 riastrad KASSERT(pg->uanon == NULL); 549 1.122 bouyer KASSERT((pg->flags & PG_BUSY) == 0); 550 1.122 bouyer uvm_pagefree(pg); 551 1.40 chs } 552 1.24 thorpej } 553 1.122 bouyer #undef __PGRM_BATCH 554 1.1 mrg } 555 1.1 mrg 556 1.78 yamt #if defined(DEBUG) 557 1.78 yamt void 558 1.102 ad uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end) 559 1.78 yamt { 560 1.78 yamt vaddr_t va; 561 1.118 matt UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 562 1.78 yamt 563 1.102 ad KDASSERT(VM_MAP_IS_KERNEL(map)); 564 1.102 ad KDASSERT(vm_map_min(map) <= start); 565 1.78 yamt KDASSERT(start < end); 566 1.102 ad KDASSERT(end <= vm_map_max(map)); 567 1.78 yamt 568 1.78 yamt for (va = start; va < end; va += PAGE_SIZE) { 569 1.152 ad paddr_t pa; 570 1.152 ad 571 1.78 yamt if (pmap_extract(pmap_kernel(), va, &pa)) { 572 1.156 rin panic("uvm_km_check_empty: va %p has pa %#llx", 573 1.81 simonb (void *)va, (long long)pa); 574 1.78 yamt } 575 1.152 ad /* 576 1.152 ad * kernel_object should not have pages for the corresponding 577 1.152 ad * region. check it. 578 1.152 ad * 579 1.152 ad * why trylock? because: 580 1.152 ad * - caller might not want to block. 581 1.152 ad * - we can recurse when allocating radix_node for 582 1.152 ad * kernel_object. 583 1.152 ad */ 584 1.157 ad if (rw_tryenter(uvm_kernel_object->vmobjlock, RW_READER)) { 585 1.152 ad struct vm_page *pg; 586 1.152 ad 587 1.152 ad pg = uvm_pagelookup(uvm_kernel_object, 588 1.152 ad va - vm_map_min(kernel_map)); 589 1.155 ad rw_exit(uvm_kernel_object->vmobjlock); 590 1.152 ad if (pg) { 591 1.152 ad panic("uvm_km_check_empty: " 592 1.152 ad "has page hashed at %p", 593 1.152 ad (const void *)va); 594 1.152 ad } 595 1.78 yamt } 596 1.78 yamt } 597 1.78 yamt } 598 1.78 yamt #endif /* defined(DEBUG) */ 599 1.1 mrg 600 1.1 mrg /* 601 1.78 yamt * uvm_km_alloc: allocate an area of kernel memory. 602 1.1 mrg * 603 1.78 yamt * => NOTE: we can return 0 even if we can wait if there is not enough 604 1.1 mrg * free VM space in the map... caller should be prepared to handle 605 1.1 mrg * this case. 606 1.1 mrg * => we return KVA of memory allocated 607 1.1 mrg */ 608 1.1 mrg 609 1.14 eeh vaddr_t 610 1.83 thorpej uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags) 611 1.1 mrg { 612 1.14 eeh vaddr_t kva, loopva; 613 1.14 eeh vaddr_t offset; 614 1.44 thorpej vsize_t loopsize; 615 1.8 mrg struct vm_page *pg; 616 1.78 yamt struct uvm_object *obj; 617 1.78 yamt int pgaflags; 618 1.141 maxv vm_prot_t prot, vaprot; 619 1.78 yamt UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 620 1.1 mrg 621 1.40 chs KASSERT(vm_map_pmap(map) == pmap_kernel()); 622 1.78 yamt KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 623 1.78 yamt (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 624 1.78 yamt (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 625 1.111 matt KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0); 626 1.111 matt KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0); 627 1.1 mrg 628 1.8 mrg /* 629 1.8 mrg * setup for call 630 1.8 mrg */ 631 1.8 mrg 632 1.78 yamt kva = vm_map_min(map); /* hint */ 633 1.8 mrg size = round_page(size); 634 1.95 ad obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL; 635 1.160 skrll UVMHIST_LOG(maphist," (map=%#jx, obj=%#jx, size=%#jx, flags=%#jx)", 636 1.144 pgoyette (uintptr_t)map, (uintptr_t)obj, size, flags); 637 1.1 mrg 638 1.8 mrg /* 639 1.8 mrg * allocate some virtual space 640 1.8 mrg */ 641 1.8 mrg 642 1.141 maxv vaprot = (flags & UVM_KMF_EXEC) ? UVM_PROT_ALL : UVM_PROT_RW; 643 1.78 yamt if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 644 1.141 maxv align, UVM_MAPFLAG(vaprot, UVM_PROT_ALL, UVM_INH_NONE, 645 1.78 yamt UVM_ADV_RANDOM, 646 1.111 matt (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA 647 1.112 para | UVM_KMF_COLORMATCH)))) != 0)) { 648 1.8 mrg UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 649 1.8 mrg return(0); 650 1.8 mrg } 651 1.8 mrg 652 1.8 mrg /* 653 1.8 mrg * if all we wanted was VA, return now 654 1.8 mrg */ 655 1.8 mrg 656 1.78 yamt if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) { 657 1.156 rin UVMHIST_LOG(maphist,"<- done valloc (kva=%#jx)", kva,0,0,0); 658 1.8 mrg return(kva); 659 1.8 mrg } 660 1.40 chs 661 1.8 mrg /* 662 1.8 mrg * recover object offset from virtual address 663 1.8 mrg */ 664 1.8 mrg 665 1.8 mrg offset = kva - vm_map_min(kernel_map); 666 1.156 rin UVMHIST_LOG(maphist, " kva=%#jx, offset=%#jx", kva, offset,0,0); 667 1.8 mrg 668 1.8 mrg /* 669 1.8 mrg * now allocate and map in the memory... note that we are the only ones 670 1.8 mrg * whom should ever get a handle on this area of VM. 671 1.8 mrg */ 672 1.8 mrg 673 1.8 mrg loopva = kva; 674 1.44 thorpej loopsize = size; 675 1.78 yamt 676 1.107 matt pgaflags = UVM_FLAG_COLORMATCH; 677 1.103 ad if (flags & UVM_KMF_NOWAIT) 678 1.103 ad pgaflags |= UVM_PGA_USERESERVE; 679 1.78 yamt if (flags & UVM_KMF_ZERO) 680 1.78 yamt pgaflags |= UVM_PGA_ZERO; 681 1.89 drochner prot = VM_PROT_READ | VM_PROT_WRITE; 682 1.89 drochner if (flags & UVM_KMF_EXEC) 683 1.89 drochner prot |= VM_PROT_EXECUTE; 684 1.44 thorpej while (loopsize) { 685 1.114 matt KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, NULL), 686 1.114 matt "loopva=%#"PRIxVADDR, loopva); 687 1.78 yamt 688 1.107 matt pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags, 689 1.107 matt #ifdef UVM_KM_VMFREELIST 690 1.107 matt UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST 691 1.107 matt #else 692 1.107 matt UVM_PGA_STRAT_NORMAL, 0 693 1.107 matt #endif 694 1.107 matt ); 695 1.47 chs 696 1.8 mrg /* 697 1.8 mrg * out of memory? 698 1.8 mrg */ 699 1.8 mrg 700 1.35 thorpej if (__predict_false(pg == NULL)) { 701 1.58 chs if ((flags & UVM_KMF_NOWAIT) || 702 1.80 yamt ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) { 703 1.8 mrg /* free everything! */ 704 1.78 yamt uvm_km_free(map, kva, size, 705 1.78 yamt flags & UVM_KMF_TYPEMASK); 706 1.58 chs return (0); 707 1.8 mrg } else { 708 1.8 mrg uvm_wait("km_getwait2"); /* sleep here */ 709 1.8 mrg continue; 710 1.8 mrg } 711 1.8 mrg } 712 1.47 chs 713 1.78 yamt pg->flags &= ~PG_BUSY; /* new page */ 714 1.78 yamt UVM_PAGE_OWN(pg, NULL); 715 1.78 yamt 716 1.8 mrg /* 717 1.52 chs * map it in 718 1.8 mrg */ 719 1.40 chs 720 1.104 cegger pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 721 1.106 cegger prot, PMAP_KMPAGE); 722 1.8 mrg loopva += PAGE_SIZE; 723 1.8 mrg offset += PAGE_SIZE; 724 1.44 thorpej loopsize -= PAGE_SIZE; 725 1.8 mrg } 726 1.69 junyoung 727 1.112 para pmap_update(pmap_kernel()); 728 1.69 junyoung 729 1.146 maxv if ((flags & UVM_KMF_ZERO) == 0) { 730 1.147 maxv kmsan_orig((void *)kva, size, KMSAN_TYPE_UVM, __RET_ADDR); 731 1.147 maxv kmsan_mark((void *)kva, size, KMSAN_STATE_UNINIT); 732 1.146 maxv } 733 1.146 maxv 734 1.156 rin UVMHIST_LOG(maphist,"<- done (kva=%#jx)", kva,0,0,0); 735 1.8 mrg return(kva); 736 1.1 mrg } 737 1.1 mrg 738 1.1 mrg /* 739 1.140 maxv * uvm_km_protect: change the protection of an allocated area 740 1.140 maxv */ 741 1.140 maxv 742 1.140 maxv int 743 1.140 maxv uvm_km_protect(struct vm_map *map, vaddr_t addr, vsize_t size, vm_prot_t prot) 744 1.140 maxv { 745 1.140 maxv return uvm_map_protect(map, addr, addr + round_page(size), prot, false); 746 1.140 maxv } 747 1.140 maxv 748 1.140 maxv /* 749 1.1 mrg * uvm_km_free: free an area of kernel memory 750 1.1 mrg */ 751 1.1 mrg 752 1.8 mrg void 753 1.83 thorpej uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags) 754 1.8 mrg { 755 1.118 matt UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 756 1.1 mrg 757 1.78 yamt KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 758 1.78 yamt (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 759 1.78 yamt (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 760 1.78 yamt KASSERT((addr & PAGE_MASK) == 0); 761 1.40 chs KASSERT(vm_map_pmap(map) == pmap_kernel()); 762 1.1 mrg 763 1.8 mrg size = round_page(size); 764 1.1 mrg 765 1.78 yamt if (flags & UVM_KMF_PAGEABLE) { 766 1.78 yamt uvm_km_pgremove(addr, addr + size); 767 1.78 yamt } else if (flags & UVM_KMF_WIRED) { 768 1.109 rmind /* 769 1.109 rmind * Note: uvm_km_pgremove_intrsafe() extracts mapping, thus 770 1.109 rmind * remove it after. See comment below about KVA visibility. 771 1.109 rmind */ 772 1.102 ad uvm_km_pgremove_intrsafe(map, addr, addr + size); 773 1.8 mrg } 774 1.99 yamt 775 1.99 yamt /* 776 1.109 rmind * Note: uvm_unmap_remove() calls pmap_update() for us, before 777 1.109 rmind * KVA becomes globally available. 778 1.99 yamt */ 779 1.8 mrg 780 1.112 para uvm_unmap1(map, addr, addr + size, UVM_FLAG_VAONLY); 781 1.66 pk } 782 1.66 pk 783 1.10 thorpej /* Sanity; must specify both or none. */ 784 1.10 thorpej #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 785 1.10 thorpej (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 786 1.10 thorpej #error Must specify MAP and UNMAP together. 787 1.10 thorpej #endif 788 1.10 thorpej 789 1.153 skrll #if defined(PMAP_ALLOC_POOLPAGE) && \ 790 1.153 skrll !defined(PMAP_MAP_POOLPAGE) && !defined(PMAP_UNMAP_POOLPAGE) 791 1.153 skrll #error Must specify ALLOC with MAP and UNMAP 792 1.153 skrll #endif 793 1.153 skrll 794 1.112 para int 795 1.112 para uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, 796 1.112 para vmem_addr_t *addr) 797 1.72 yamt { 798 1.72 yamt struct vm_page *pg; 799 1.112 para vmem_addr_t va; 800 1.112 para int rc; 801 1.112 para vaddr_t loopva; 802 1.112 para vsize_t loopsize; 803 1.72 yamt 804 1.112 para size = round_page(size); 805 1.72 yamt 806 1.112 para #if defined(PMAP_MAP_POOLPAGE) 807 1.112 para if (size == PAGE_SIZE) { 808 1.72 yamt again: 809 1.112 para #ifdef PMAP_ALLOC_POOLPAGE 810 1.112 para pg = PMAP_ALLOC_POOLPAGE((flags & VM_SLEEP) ? 811 1.112 para 0 : UVM_PGA_USERESERVE); 812 1.112 para #else 813 1.112 para pg = uvm_pagealloc(NULL, 0, NULL, 814 1.112 para (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE); 815 1.112 para #endif /* PMAP_ALLOC_POOLPAGE */ 816 1.112 para if (__predict_false(pg == NULL)) { 817 1.112 para if (flags & VM_SLEEP) { 818 1.112 para uvm_wait("plpg"); 819 1.112 para goto again; 820 1.112 para } 821 1.123 rmind return ENOMEM; 822 1.112 para } 823 1.112 para va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 824 1.145 mlelstv KASSERT(va != 0); 825 1.112 para *addr = va; 826 1.112 para return 0; 827 1.72 yamt } 828 1.112 para #endif /* PMAP_MAP_POOLPAGE */ 829 1.112 para 830 1.112 para rc = vmem_alloc(vm, size, flags, &va); 831 1.112 para if (rc != 0) 832 1.112 para return rc; 833 1.72 yamt 834 1.130 matt #ifdef PMAP_GROWKERNEL 835 1.130 matt /* 836 1.158 skrll * These VA allocations happen independently of uvm_map 837 1.135 para * so this allocation must not extend beyond the current limit. 838 1.135 para */ 839 1.135 para KASSERTMSG(uvm_maxkaddr >= va + size, 840 1.135 para "%#"PRIxVADDR" %#"PRIxPTR" %#zx", 841 1.135 para uvm_maxkaddr, va, size); 842 1.130 matt #endif 843 1.130 matt 844 1.112 para loopva = va; 845 1.112 para loopsize = size; 846 1.72 yamt 847 1.112 para while (loopsize) { 848 1.142 riastrad paddr_t pa __diagused; 849 1.128 matt KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, &pa), 850 1.128 matt "loopva=%#"PRIxVADDR" loopsize=%#"PRIxVSIZE 851 1.128 matt " pa=%#"PRIxPADDR" vmem=%p", 852 1.128 matt loopva, loopsize, pa, vm); 853 1.114 matt 854 1.114 matt pg = uvm_pagealloc(NULL, loopva, NULL, 855 1.115 matt UVM_FLAG_COLORMATCH 856 1.114 matt | ((flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE)); 857 1.112 para if (__predict_false(pg == NULL)) { 858 1.112 para if (flags & VM_SLEEP) { 859 1.112 para uvm_wait("plpg"); 860 1.112 para continue; 861 1.112 para } else { 862 1.112 para uvm_km_pgremove_intrsafe(kernel_map, va, 863 1.112 para va + size); 864 1.125 yamt vmem_free(vm, va, size); 865 1.112 para return ENOMEM; 866 1.112 para } 867 1.112 para } 868 1.123 rmind 869 1.112 para pg->flags &= ~PG_BUSY; /* new page */ 870 1.112 para UVM_PAGE_OWN(pg, NULL); 871 1.112 para pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 872 1.112 para VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE); 873 1.107 matt 874 1.112 para loopva += PAGE_SIZE; 875 1.112 para loopsize -= PAGE_SIZE; 876 1.15 thorpej } 877 1.112 para pmap_update(pmap_kernel()); 878 1.112 para 879 1.112 para *addr = va; 880 1.16 thorpej 881 1.112 para return 0; 882 1.10 thorpej } 883 1.10 thorpej 884 1.10 thorpej void 885 1.112 para uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, size_t size) 886 1.72 yamt { 887 1.112 para 888 1.112 para size = round_page(size); 889 1.72 yamt #if defined(PMAP_UNMAP_POOLPAGE) 890 1.112 para if (size == PAGE_SIZE) { 891 1.112 para paddr_t pa; 892 1.72 yamt 893 1.112 para pa = PMAP_UNMAP_POOLPAGE(addr); 894 1.112 para uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 895 1.72 yamt return; 896 1.72 yamt } 897 1.112 para #endif /* PMAP_UNMAP_POOLPAGE */ 898 1.112 para uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size); 899 1.112 para pmap_update(pmap_kernel()); 900 1.72 yamt 901 1.112 para vmem_free(vm, addr, size); 902 1.72 yamt } 903 1.72 yamt 904 1.112 para bool 905 1.112 para uvm_km_va_starved_p(void) 906 1.10 thorpej { 907 1.112 para vmem_size_t total; 908 1.112 para vmem_size_t free; 909 1.112 para 910 1.135 para if (kmem_arena == NULL) 911 1.135 para return false; 912 1.135 para 913 1.112 para total = vmem_size(kmem_arena, VMEM_ALLOC|VMEM_FREE); 914 1.112 para free = vmem_size(kmem_arena, VMEM_FREE); 915 1.10 thorpej 916 1.112 para return (free < (total / 10)); 917 1.1 mrg } 918